aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs16
-rw-r--r--Documentation/devicetree/bindings/clock/sifive/fu740-prci.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si5351.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/socionext,uniphier-clock.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml33
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ps8640.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,ls060t1sx01.yaml56
-rw-r--r--Documentation/devicetree/bindings/input/cypress-sf.yaml61
-rw-r--r--Documentation/devicetree/bindings/input/microchip,cap11xx.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/mstar,msc313-rtc.yaml49
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt9
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/filesystems/f2fs.rst19
-rw-r--r--Documentation/gpu/todo.rst13
-rw-r--r--Documentation/networking/ip-sysctl.rst6
-rw-r--r--Documentation/security/SCTP.rst43
-rw-r--r--Documentation/virt/kvm/api.rst14
-rw-r--r--Documentation/vm/hmm.rst2
-rw-r--r--Documentation/x86/xstate.rst9
-rw-r--r--MAINTAINERS44
-rw-r--r--arch/arm64/include/asm/esr.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kvm/arm.c5
-rw-r--r--arch/arm64/kvm/guest.c7
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c14
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sys_regs.c2
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/m68k/Kconfig.cpu11
-rw-r--r--arch/m68k/Kconfig.machine1
-rw-r--r--arch/m68k/include/asm/bitops.h2
-rw-r--r--arch/mips/Kbuild.platforms2
-rw-r--r--arch/mips/Kconfig5
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/boot/compressed/.gitignore3
-rw-r--r--arch/mips/boot/compressed/Makefile12
-rw-r--r--arch/mips/boot/compressed/ashldi3.c2
-rw-r--r--arch/mips/boot/compressed/bswapdi.c2
-rw-r--r--arch/mips/boot/compressed/bswapsi.c2
-rw-r--r--arch/mips/boot/compressed/uart-ath79.c2
-rw-r--r--arch/mips/configs/bmips_stb_defconfig155
-rw-r--r--arch/mips/dec/setup.c6
-rw-r--r--arch/mips/include/asm/traps.h2
-rw-r--r--arch/mips/kernel/traps.c8
-rw-r--r--arch/mips/sgi-ip22/ip22-berr.c2
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-berr.c2
-rw-r--r--arch/mips/sibyte/swarm/setup.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4927.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4938.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c2
-rw-r--r--arch/mips/vdso/Makefile2
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c4
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/Makefile10
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts18
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi40
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi2
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts10
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts7
-rw-r--r--arch/riscv/configs/32-bit.config2
-rw-r--r--arch/riscv/configs/64-bit.config2
-rw-r--r--arch/riscv/configs/defconfig7
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h6
-rw-r--r--arch/riscv/include/asm/vdso.h13
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h7
-rw-r--r--arch/riscv/kernel/head.S12
-rw-r--r--arch/riscv/kernel/reset.c12
-rw-r--r--arch/riscv/kernel/vdso.c250
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S3
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S10
-rw-r--r--arch/riscv/mm/context.c8
-rw-r--r--arch/riscv/mm/extable.c4
-rw-r--r--arch/riscv/mm/init.c7
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c2
-rw-r--r--arch/s390/include/asm/pci.h6
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/pci/pci.c148
-rw-r--r--arch/s390/pci/pci_event.c230
-rw-r--r--arch/s390/pci/pci_insn.c4
-rw-r--r--arch/s390/pci/pci_irq.c9
-rw-r--r--arch/x86/events/intel/core.c4
-rw-r--r--arch/x86/events/intel/lbr.c2
-rw-r--r--arch/x86/include/asm/fpu/xcr.h12
-rw-r--r--arch/x86/include/asm/fpu/xstate.h7
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h6
-rw-r--r--arch/x86/include/asm/kvm_para.h12
-rw-r--r--arch/x86/include/asm/mem_encrypt.h4
-rw-r--r--arch/x86/include/asm/paravirt.h6
-rw-r--r--arch/x86/include/asm/paravirt_types.h1
-rw-r--r--arch/x86/include/asm/processor.h5
-rw-r--r--arch/x86/include/asm/set_memory.h1
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/static_call.h1
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c1
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c5
-rw-r--r--arch/x86/kernel/fpu/xstate.h37
-rw-r--r--arch/x86/kernel/kvm.c109
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/smpboot.c18
-rw-r--r--arch/x86/kernel/static_call.c14
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kvm/cpuid.c93
-rw-r--r--arch/x86/kvm/hyperv.c4
-rw-r--r--arch/x86/kvm/lapic.c23
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c10
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c2
-rw-r--r--arch/x86/kvm/pmu.c2
-rw-r--r--arch/x86/kvm/pmu.h4
-rw-r--r--arch/x86/kvm/svm/avic.c3
-rw-r--r--arch/x86/kvm/svm/pmu.c5
-rw-r--r--arch/x86/kvm/svm/sev.c299
-rw-r--r--arch/x86/kvm/svm/svm.c14
-rw-r--r--arch/x86/kvm/svm/svm.h28
-rw-r--r--arch/x86/kvm/vmx/nested.c166
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c7
-rw-r--r--arch/x86/kvm/vmx/vmx.c73
-rw-r--r--arch/x86/kvm/vmx/vmx.h33
-rw-r--r--arch/x86/kvm/x86.c147
-rw-r--r--arch/x86/mm/mem_encrypt.c72
-rw-r--r--arch/x86/mm/pat/set_memory.c6
-rw-r--r--arch/x86/xen/smp_pv.c12
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-ia-ranges.c4
-rw-r--r--block/blk-mq-sched.c4
-rw-r--r--block/blk-mq.c47
-rw-r--r--block/blk-mq.h28
-rw-r--r--block/blk-zoned.c15
-rw-r--r--block/genhd.c6
-rw-r--r--block/ioctl.c24
-rw-r--r--crypto/algapi.c2
-rw-r--r--crypto/zstd.c28
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-core.c13
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/sata_highbank.c4
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/bus/brcmstb_gisb.c2
-rw-r--r--drivers/clk/actions/owl-factor.c1
-rw-r--r--drivers/clk/clk-ast2600.c12
-rw-r--r--drivers/clk/clk-composite.c1
-rw-r--r--drivers/clk/clk-si5351.c8
-rw-r--r--drivers/clk/clk-si5351.h2
-rw-r--r--drivers/clk/clk-versaclock5.c4
-rw-r--r--drivers/clk/imx/clk.h4
-rw-r--r--drivers/clk/ingenic/cgu.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c15
-rw-r--r--drivers/clk/rockchip/Kconfig4
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c6
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c6
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c17
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c47
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h6
-rw-r--r--drivers/dma-buf/dma-buf.c81
-rw-r--r--drivers/dma-buf/dma-resv.c69
-rw-r--r--drivers/gpu/drm/Kconfig20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c52
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c70
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c92
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c150
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c26
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h13
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c87
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c9
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c76
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c35
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c17
-rw-r--r--drivers/gpu/drm/drm_connector.c32
-rw-r--r--drivers/gpu/drm/drm_gem.c26
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c49
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c6
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c132
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c3
-rw-r--r--drivers/gpu/drm/i915/i915_request.c34
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c4
-rw-r--r--drivers/gpu/drm/panel/Kconfig10
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c9
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.h2
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c333
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c35
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c94
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c72
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c3
-rw-r--r--drivers/input/joystick/analog.c18
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c2
-rw-r--r--drivers/input/joystick/tmdc.c2
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/cap11xx.c43
-rw-r--r--drivers/input/keyboard/cypress-sf.c224
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c172
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c4
-rw-r--r--drivers/input/keyboard/omap-keypad.c3
-rw-r--r--drivers/input/keyboard/tm2-touchkey.c7
-rw-r--r--drivers/input/misc/adxl34x-i2c.c4
-rw-r--r--drivers/input/misc/adxl34x-spi.c4
-rw-r--r--drivers/input/misc/adxl34x.c6
-rw-r--r--drivers/input/misc/adxl34x.h2
-rw-r--r--drivers/input/misc/ariel-pwrbutton.c7
-rw-r--r--drivers/input/misc/cpcap-pwrbutton.c7
-rw-r--r--drivers/input/misc/max77693-haptic.c1
-rw-r--r--drivers/input/misc/max8925_onkey.c2
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c5
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c6
-rw-r--r--drivers/input/mouse/elantech.c13
-rw-r--r--drivers/input/rmi4/rmi_bus.c1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/input/touchscreen/Kconfig1
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ads7846.c200
-rw-r--r--drivers/input/touchscreen/elants_i2c.c4
-rw-r--r--drivers/input/touchscreen/goodix.c231
-rw-r--r--drivers/input/touchscreen/goodix.h117
-rw-r--r--drivers/input/touchscreen/goodix_fwupload.c427
-rw-r--r--drivers/input/touchscreen/ili210x.c559
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c54
-rw-r--r--drivers/input/touchscreen/st1232.c3
-rw-r--r--drivers/input/touchscreen/tsc2004.c4
-rw-r--r--drivers/input/touchscreen/tsc2005.c4
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c4
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h2
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c22
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/amt.c11
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c36
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c27
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c5
-rw-r--r--drivers/net/dsa/ocelot/felix.c9
-rw-r--r--drivers/net/dsa/qca8k.c8
-rw-r--r--drivers/net/dsa/qca8k.h1
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c77
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c141
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h5
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c38
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c144
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c16
-rw-r--r--drivers/net/hamradio/6pack.c6
-rw-r--r--drivers/net/hamradio/mkiss.c9
-rw-r--r--drivers/net/phy/microchip_t1.c44
-rw-r--r--drivers/net/phy/phy.c7
-rw-r--r--drivers/net/sungem_phy.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c2
-rw-r--r--drivers/nfc/pn533/pn533.c6
-rw-r--r--drivers/nfc/port100.c6
-rw-r--r--drivers/pci/controller/Kconfig3
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c24
-rw-r--r--drivers/pci/iov.c24
-rw-r--r--drivers/pci/pci-driver.c37
-rw-r--r--drivers/pci/pci.c20
-rw-r--r--drivers/pci/pcie/err.c8
-rw-r--r--drivers/pwm/Kconfig4
-rw-r--r--drivers/pwm/core.c9
-rw-r--r--drivers/pwm/pwm-atmel.c1
-rw-r--r--drivers/pwm/pwm-samsung.c30
-rw-r--r--drivers/pwm/pwm-visconti.c14
-rw-r--r--drivers/pwm/pwm-vt8500.c16
-rw-r--r--drivers/rtc/Kconfig19
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/class.c20
-rw-r--r--drivers/rtc/dev.c65
-rw-r--r--drivers/rtc/interface.c15
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c3
-rw-r--r--drivers/rtc/rtc-ab8500.c23
-rw-r--r--drivers/rtc/rtc-ds1302.c7
-rw-r--r--drivers/rtc/rtc-ds1390.c7
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-mcp795.c7
-rw-r--r--drivers/rtc/rtc-msc313.c259
-rw-r--r--drivers/rtc/rtc-omap.c1
-rw-r--r--drivers/rtc/rtc-pcf2123.c9
-rw-r--r--drivers/rtc/rtc-pcf85063.c16
-rw-r--r--drivers/rtc/rtc-pcf8523.c434
-rw-r--r--drivers/rtc/rtc-rv3028.c74
-rw-r--r--drivers/rtc/rtc-rv3032.c89
-rw-r--r--drivers/rtc/rtc-rv8803.c4
-rw-r--r--drivers/rtc/rtc-rx6110.c2
-rw-r--r--drivers/rtc/rtc-rx8025.c141
-rw-r--r--drivers/rtc/rtc-s35390a.c7
-rw-r--r--drivers/rtc/rtc-s3c.c106
-rw-r--r--drivers/rtc/rtc-s5m.c1
-rw-r--r--drivers/rtc/rtc-sun6i.c13
-rw-r--r--drivers/rtc/rtc-tps80031.c324
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/cio/css.c4
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c9
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c328
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c108
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_error.c25
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c12
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c98
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c354
-rw-r--r--drivers/scsi/ufs/ufs-exynos.h27
-rw-r--r--drivers/scsi/ufs/ufshcd.c108
-rw-r--r--drivers/scsi/ufs/ufshcd.h13
-rw-r--r--drivers/scsi/ufs/ufshci.h15
-rw-r--r--drivers/scsi/ufs/ufshpb.c24
-rw-r--r--drivers/scsi/ufs/ufshpb.h1
-rw-r--r--drivers/target/target_core_tmr.c17
-rw-r--r--drivers/target/target_core_transport.c30
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c1
-rw-r--r--drivers/usb/core/hcd.c29
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci.c1
-rw-r--r--drivers/video/fbdev/core/bitblit.c16
-rw-r--r--drivers/video/fbdev/core/fbcon.c509
-rw-r--r--drivers/video/fbdev/core/fbcon.h59
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c28
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c28
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h9
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c37
-rw-r--r--drivers/video/fbdev/core/fbmem.c5
-rw-r--r--drivers/video/fbdev/core/tileblit.c16
-rw-r--r--drivers/video/fbdev/skeletonfb.c12
-rw-r--r--drivers/virtio/virtio_mem.c1
-rw-r--r--fs/9p/vfs_addr.c83
-rw-r--r--fs/9p/vfs_file.c20
-rw-r--r--fs/afs/dir.c229
-rw-r--r--fs/afs/dir_edit.c154
-rw-r--r--fs/afs/file.c70
-rw-r--r--fs/afs/internal.h46
-rw-r--r--fs/afs/write.c347
-rw-r--r--fs/btrfs/zstd.c68
-rw-r--r--fs/ceph/addr.c109
-rw-r--r--fs/ceph/cache.c23
-rw-r--r--fs/ceph/caps.c151
-rw-r--r--fs/ceph/debugfs.c167
-rw-r--r--fs/ceph/export.c12
-rw-r--r--fs/ceph/file.c103
-rw-r--r--fs/ceph/inode.c54
-rw-r--r--fs/ceph/locks.c6
-rw-r--r--fs/ceph/mds_client.c139
-rw-r--r--fs/ceph/mdsmap.c4
-rw-r--r--fs/ceph/metric.c128
-rw-r--r--fs/ceph/metric.h88
-rw-r--r--fs/ceph/super.c17
-rw-r--r--fs/ceph/super.h18
-rw-r--r--fs/cifs/cifs_debug.c7
-rw-r--r--fs/cifs/cifs_dfs_ref.c59
-rw-r--r--fs/cifs/cifs_fs_sb.h5
-rw-r--r--fs/cifs/cifsglob.h47
-rw-r--r--fs/cifs/cifsproto.h10
-rw-r--r--fs/cifs/connect.c1468
-rw-r--r--fs/cifs/dfs_cache.c46
-rw-r--r--fs/cifs/file.c35
-rw-r--r--fs/cifs/fs_context.c36
-rw-r--r--fs/cifs/fs_context.h1
-rw-r--r--fs/cifs/fscache.c8
-rw-r--r--fs/cifs/misc.c64
-rw-r--r--fs/cifs/ntlmssp.h4
-rw-r--r--fs/cifs/sess.c240
-rw-r--r--fs/cifs/smb2inode.c22
-rw-r--r--fs/cifs/smb2ops.c10
-rw-r--r--fs/cifs/smb2pdu.c52
-rw-r--r--fs/cifs/transport.c3
-rw-r--r--fs/erofs/zdata.c33
-rw-r--r--fs/erofs/zdata.h1
-rw-r--r--fs/erofs/zpvec.h13
-rw-r--r--fs/f2fs/checkpoint.c8
-rw-r--r--fs/f2fs/compress.c76
-rw-r--r--fs/f2fs/data.c95
-rw-r--r--fs/f2fs/f2fs.h54
-rw-r--r--fs/f2fs/file.c6
-rw-r--r--fs/f2fs/gc.c5
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/inode.c4
-rw-r--r--fs/f2fs/namei.c32
-rw-r--r--fs/f2fs/node.c1
-rw-r--r--fs/f2fs/node.h5
-rw-r--r--fs/f2fs/recovery.c14
-rw-r--r--fs/f2fs/segment.c83
-rw-r--r--fs/f2fs/segment.h1
-rw-r--r--fs/f2fs/super.c41
-rw-r--r--fs/f2fs/sysfs.c24
-rw-r--r--fs/f2fs/verity.c2
-rw-r--r--fs/f2fs/xattr.c2
-rw-r--r--fs/io-wq.c17
-rw-r--r--fs/ksmbd/Kconfig2
-rw-r--r--fs/ksmbd/auth.c11
-rw-r--r--fs/ksmbd/connection.c11
-rw-r--r--fs/ksmbd/ksmbd_work.c1
-rw-r--r--fs/ksmbd/ksmbd_work.h4
-rw-r--r--fs/ksmbd/oplock.c48
-rw-r--r--fs/ksmbd/oplock.h2
-rw-r--r--fs/ksmbd/server.c2
-rw-r--r--fs/ksmbd/smb2misc.c16
-rw-r--r--fs/ksmbd/smb2ops.c9
-rw-r--r--fs/ksmbd/smb2pdu.c591
-rw-r--r--fs/ksmbd/smb2pdu.h696
-rw-r--r--fs/ksmbd/smb_common.c13
-rw-r--r--fs/ksmbd/smb_common.h55
-rw-r--r--fs/ksmbd/transport_rdma.c3
-rw-r--r--fs/ksmbd/vfs.c8
-rw-r--r--fs/ksmbd/vfs.h39
-rw-r--r--fs/libfs.c29
-rw-r--r--fs/netfs/read_helper.c165
-rw-r--r--fs/pstore/platform.c2
-rw-r--r--fs/squashfs/zstd_wrapper.c16
-rw-r--r--include/drm/drm_modeset_lock.h8
-rw-r--r--include/drm/ttm/ttm_bo_api.h9
-rw-r--r--include/linux/bpf.h6
-rw-r--r--include/linux/ceph/ceph_fs.h2
-rw-r--r--include/linux/ceph/osd_client.h19
-rw-r--r--include/linux/dma-resv.h25
-rw-r--r--include/linux/dsa/ocelot.h1
-rw-r--r--include/linux/efi.h1
-rw-r--r--include/linux/ethtool_netlink.h3
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h6
-rw-r--r--include/linux/input/cy8ctmg110_pdata.h10
-rw-r--r--include/linux/kcsan-checks.h3
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kvm_host.h12
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/lsm_hook_defs.h4
-rw-r--r--include/linux/lsm_hooks.h8
-rw-r--r--include/linux/migrate.h1
-rw-r--r--include/linux/netfs.h12
-rw-r--r--include/linux/page_owner.h12
-rw-r--r--include/linux/pagemap.h33
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pwm.h13
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/skbuff.h35
-rw-r--r--include/linux/skmsg.h12
-rw-r--r--include/linux/spi/ads7846.h15
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/vermagic.h2
-rw-r--r--include/linux/zstd.h1252
-rw-r--r--include/linux/zstd_errors.h77
-rw-r--r--include/linux/zstd_lib.h2432
-rw-r--r--include/net/llc.h4
-rw-r--r--include/net/sctp/structs.h20
-rw-r--r--include/net/strparser.h20
-rw-r--r--include/net/tcp.h8
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_device.h6
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/sound/memalloc.h14
-rw-r--r--include/trace/events/afs.h21
-rw-r--r--include/trace/events/f2fs.h33
-rw-r--r--include/uapi/linux/ethtool_netlink.h4
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/rtc.h31
-rw-r--r--include/uapi/linux/virtio_mem.h9
-rw-r--r--init/Kconfig2
-rw-r--r--init/Makefile2
-rw-r--r--kernel/Kconfig.preempt42
-rw-r--r--kernel/bpf/core.c7
-rw-r--r--kernel/bpf/verifier.c55
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/kcsan/core.c75
-rw-r--r--kernel/kcsan/kcsan.h8
-rw-r--r--kernel/kcsan/kcsan_test.c62
-rw-r--r--kernel/kcsan/report.c77
-rw-r--r--kernel/kcsan/selftest.c72
-rw-r--r--kernel/reboot.c1
-rw-r--r--kernel/sched/autogroup.c2
-rw-r--r--kernel/sched/core.c53
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/sched/rt.c12
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--kernel/trace/ftrace.c3
-rw-r--r--kernel/trace/ring_buffer.c5
-rw-r--r--kernel/trace/trace_events_hist.c12
-rw-r--r--kernel/trace/trace_osnoise.c3
-rw-r--r--lib/decompress_unzstd.c48
-rw-r--r--lib/test_hmm.c5
-rw-r--r--lib/zstd/Makefile46
-rw-r--r--lib/zstd/bitstream.h380
-rw-r--r--lib/zstd/common/bitstream.h437
-rw-r--r--lib/zstd/common/compiler.h170
-rw-r--r--lib/zstd/common/cpu.h194
-rw-r--r--lib/zstd/common/debug.c24
-rw-r--r--lib/zstd/common/debug.h101
-rw-r--r--lib/zstd/common/entropy_common.c357
-rw-r--r--lib/zstd/common/error_private.c56
-rw-r--r--lib/zstd/common/error_private.h66
-rw-r--r--lib/zstd/common/fse.h710
-rw-r--r--lib/zstd/common/fse_decompress.c390
-rw-r--r--lib/zstd/common/huf.h356
-rw-r--r--lib/zstd/common/mem.h259
-rw-r--r--lib/zstd/common/zstd_common.c83
-rw-r--r--lib/zstd/common/zstd_deps.h125
-rw-r--r--lib/zstd/common/zstd_internal.h450
-rw-r--r--lib/zstd/compress.c3485
-rw-r--r--lib/zstd/compress/fse_compress.c625
-rw-r--r--lib/zstd/compress/hist.c165
-rw-r--r--lib/zstd/compress/hist.h75
-rw-r--r--lib/zstd/compress/huf_compress.c905
-rw-r--r--lib/zstd/compress/zstd_compress.c5109
-rw-r--r--lib/zstd/compress/zstd_compress_internal.h1188
-rw-r--r--lib/zstd/compress/zstd_compress_literals.c158
-rw-r--r--lib/zstd/compress/zstd_compress_literals.h29
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.c439
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.h54
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.c850
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.h32
-rw-r--r--lib/zstd/compress/zstd_cwksp.h482
-rw-r--r--lib/zstd/compress/zstd_double_fast.c519
-rw-r--r--lib/zstd/compress/zstd_double_fast.h32
-rw-r--r--lib/zstd/compress/zstd_fast.c496
-rw-r--r--lib/zstd/compress/zstd_fast.h31
-rw-r--r--lib/zstd/compress/zstd_lazy.c1414
-rw-r--r--lib/zstd/compress/zstd_lazy.h81
-rw-r--r--lib/zstd/compress/zstd_ldm.c686
-rw-r--r--lib/zstd/compress/zstd_ldm.h110
-rw-r--r--lib/zstd/compress/zstd_ldm_geartab.h103
-rw-r--r--lib/zstd/compress/zstd_opt.c1346
-rw-r--r--lib/zstd/compress/zstd_opt.h50
-rw-r--r--lib/zstd/decompress.c2531
-rw-r--r--lib/zstd/decompress/huf_decompress.c1206
-rw-r--r--lib/zstd/decompress/zstd_ddict.c241
-rw-r--r--lib/zstd/decompress/zstd_ddict.h44
-rw-r--r--lib/zstd/decompress/zstd_decompress.c2085
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.c1540
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.h62
-rw-r--r--lib/zstd/decompress/zstd_decompress_internal.h202
-rw-r--r--lib/zstd/decompress_sources.h28
-rw-r--r--lib/zstd/entropy_common.c243
-rw-r--r--lib/zstd/error_private.h53
-rw-r--r--lib/zstd/fse.h575
-rw-r--r--lib/zstd/fse_compress.c795
-rw-r--r--lib/zstd/fse_decompress.c325
-rw-r--r--lib/zstd/huf.h212
-rw-r--r--lib/zstd/huf_compress.c773
-rw-r--r--lib/zstd/huf_decompress.c960
-rw-r--r--lib/zstd/mem.h151
-rw-r--r--lib/zstd/zstd_common.c75
-rw-r--r--lib/zstd/zstd_compress_module.c160
-rw-r--r--lib/zstd/zstd_decompress_module.c105
-rw-r--r--lib/zstd/zstd_internal.h273
-rw-r--r--lib/zstd/zstd_opt.h1014
-rw-r--r--mm/kasan/hw_tags.c14
-rw-r--r--mm/kasan/sw_tags.c2
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/migrate.c151
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_owner.c6
-rw-r--r--mm/shmem.c62
-rw-r--r--mm/userfaultfd.c5
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/can/j1939/main.c7
-rw-r--r--net/can/j1939/transport.c11
-rw-r--r--net/ceph/mon_client.c3
-rw-r--r--net/ceph/osd_client.c60
-rw-r--r--net/core/datagram.c3
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/devlink.c2
-rw-r--r--net/core/filter.c64
-rw-r--r--net/core/skbuff.c17
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sock_map.c6
-rw-r--r--net/dsa/tag_ocelot.c3
-rw-r--r--net/ethtool/pause.c3
-rw-r--r--net/ipv4/tcp.c22
-rw-r--r--net/ipv4/tcp_bpf.c48
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv6/seg6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/mctp/af_mctp.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/nfc/netlink.c15
-rw-r--r--net/sched/sch_taprio.c27
-rw-r--r--net/sctp/sm_statefuns.c32
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/smc/af_smc.c18
-rw-r--r--net/smc/smc_tracepoint.h2
-rw-r--r--net/strparser/strparser.c10
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--scripts/coccinelle/misc/do_div.cocci155
-rwxr-xr-xscripts/remove-stale-files5
-rw-r--r--security/apparmor/apparmorfs.c17
-rw-r--r--security/apparmor/include/file.h2
-rw-r--r--security/apparmor/include/label.h5
-rw-r--r--security/apparmor/include/lib.h9
-rw-r--r--security/apparmor/include/policy.h6
-rw-r--r--security/apparmor/label.c7
-rw-r--r--security/apparmor/lsm.c42
-rw-r--r--security/apparmor/path.c2
-rw-r--r--security/apparmor/policy.c62
-rw-r--r--security/apparmor/policy_unpack.c2
-rw-r--r--security/apparmor/procattr.c2
-rw-r--r--security/security.c8
-rw-r--r--security/selinux/hooks.c22
-rw-r--r--security/selinux/include/netlabel.h4
-rw-r--r--security/selinux/netlabel.c18
-rw-r--r--sound/core/Makefile1
-rw-r--r--sound/core/memalloc.c105
-rw-r--r--sound/core/sgbuf.c201
-rw-r--r--sound/core/timer.c17
-rw-r--r--sound/firewire/Kconfig3
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c5
-rw-r--r--sound/firewire/motu/motu-protocol-v3.c33
-rw-r--r--sound/firewire/motu/motu.c2
-rw-r--r--sound/firewire/motu/motu.h2
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--sound/pci/hda/patch_realtek.c17
-rw-r--r--sound/pci/rme9652/hdsp.c41
-rw-r--r--sound/pci/rme9652/rme9652.c41
-rw-r--r--sound/synth/emux/emux.c2
-rw-r--r--tools/Makefile16
-rw-r--r--tools/arch/x86/include/asm/msr-index.h2
-rw-r--r--tools/arch/x86/include/uapi/asm/prctl.h4
-rw-r--r--tools/bpf/bpftool/Makefile32
-rw-r--r--tools/include/uapi/asm-generic/unistd.h5
-rw-r--r--tools/include/uapi/drm/i915_drm.h242
-rw-r--r--tools/include/uapi/linux/prctl.h3
-rw-r--r--tools/include/uapi/sound/asound.h2
-rw-r--r--tools/lib/bpf/bpf.c4
-rw-r--r--tools/lib/lockdep/.gitignore2
-rw-r--r--tools/lib/lockdep/Build1
-rw-r--r--tools/lib/lockdep/Makefile162
-rw-r--r--tools/lib/lockdep/common.c29
-rw-r--r--tools/lib/lockdep/include/liblockdep/common.h54
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h73
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h87
-rwxr-xr-xtools/lib/lockdep/lockdep3
-rw-r--r--tools/lib/lockdep/lockdep.c33
-rw-r--r--tools/lib/lockdep/lockdep_internals.h1
-rw-r--r--tools/lib/lockdep/lockdep_states.h1
-rw-r--r--tools/lib/lockdep/preload.c443
-rw-r--r--tools/lib/lockdep/rbtree.c1
-rwxr-xr-xtools/lib/lockdep/run_tests.sh47
-rw-r--r--tools/lib/lockdep/tests/AA.c14
-rw-r--r--tools/lib/lockdep/tests/AA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABA.c14
-rw-r--r--tools/lib/lockdep/tests/ABA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBA.c26
-rw-r--r--tools/lib/lockdep/tests/ABBA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBA_2threads.c47
-rw-r--r--tools/lib/lockdep/tests/ABBA_2threads.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBCCA.c20
-rw-r--r--tools/lib/lockdep/tests/ABBCCA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBCCDDA.c23
-rw-r--r--tools/lib/lockdep/tests/ABBCCDDA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABCABC.c20
-rw-r--r--tools/lib/lockdep/tests/ABCABC.sh2
-rw-r--r--tools/lib/lockdep/tests/ABCDBCDA.c23
-rw-r--r--tools/lib/lockdep/tests/ABCDBCDA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABCDBDDA.c23
-rw-r--r--tools/lib/lockdep/tests/ABCDBDDA.sh2
-rw-r--r--tools/lib/lockdep/tests/WW.c14
-rw-r--r--tools/lib/lockdep/tests/WW.sh2
-rw-r--r--tools/lib/lockdep/tests/common.h13
-rw-r--r--tools/lib/lockdep/tests/unlock_balance.c15
-rw-r--r--tools/lib/lockdep/tests/unlock_balance.sh2
-rw-r--r--tools/objtool/check.c3
-rw-r--r--tools/perf/Documentation/perf-record.txt2
-rw-r--r--tools/perf/Makefile.perf20
-rw-r--r--tools/perf/arch/arm/include/arch-tests.h2
-rw-r--r--tools/perf/arch/arm/tests/arch-tests.c16
-rw-r--r--tools/perf/arch/arm/tests/vectors-page.c5
-rw-r--r--tools/perf/arch/arm64/include/arch-tests.h2
-rw-r--r--tools/perf/arch/arm64/tests/arch-tests.c11
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c283
-rw-r--r--tools/perf/arch/powerpc/include/arch-tests.h2
-rw-r--r--tools/perf/arch/powerpc/tests/arch-tests.c12
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h14
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c47
-rw-r--r--tools/perf/arch/x86/tests/bp-modify.c2
-rw-r--r--tools/perf/arch/x86/tests/insn-x86.c2
-rw-r--r--tools/perf/arch/x86/tests/intel-cqm.c2
-rw-r--r--tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c2
-rw-r--r--tools/perf/arch/x86/tests/rdpmc.c2
-rw-r--r--tools/perf/arch/x86/tests/sample-parsing.c2
-rw-r--r--tools/perf/bench/futex-lock-pi.c1
-rw-r--r--tools/perf/bench/futex-requeue.c1
-rw-r--r--tools/perf/bench/futex-wake-parallel.c1
-rw-r--r--tools/perf/bench/futex-wake.c1
-rw-r--r--tools/perf/builtin-trace.c4
-rw-r--r--tools/perf/design.txt3
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/metrics.json676
-rw-r--r--tools/perf/tests/api-io.c6
-rw-r--r--tools/perf/tests/attr.c4
-rw-r--r--tools/perf/tests/backward-ring-buffer.c4
-rw-r--r--tools/perf/tests/bitmap.c4
-rw-r--r--tools/perf/tests/bp_account.c36
-rw-r--r--tools/perf/tests/bp_signal.c34
-rw-r--r--tools/perf/tests/bp_signal_overflow.c9
-rw-r--r--tools/perf/tests/bpf.c71
-rw-r--r--tools/perf/tests/builtin-test.c578
-rw-r--r--tools/perf/tests/clang.c54
-rw-r--r--tools/perf/tests/code-reading.c4
-rw-r--r--tools/perf/tests/cpumap.c10
-rw-r--r--tools/perf/tests/demangle-java-test.c4
-rw-r--r--tools/perf/tests/demangle-ocaml-test.c4
-rw-r--r--tools/perf/tests/dlfilter-test.c4
-rw-r--r--tools/perf/tests/dso-data.c10
-rw-r--r--tools/perf/tests/dwarf-unwind.c5
-rw-r--r--tools/perf/tests/event-times.c4
-rw-r--r--tools/perf/tests/event_update.c4
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c5
-rw-r--r--tools/perf/tests/evsel-tp-sched.c5
-rw-r--r--tools/perf/tests/expand-cgroup.c6
-rw-r--r--tools/perf/tests/expr.c38
-rw-r--r--tools/perf/tests/fdarray.c7
-rw-r--r--tools/perf/tests/genelf.c6
-rw-r--r--tools/perf/tests/hists_cumulate.c4
-rw-r--r--tools/perf/tests/hists_filter.c4
-rw-r--r--tools/perf/tests/hists_link.c4
-rw-r--r--tools/perf/tests/hists_output.c4
-rw-r--r--tools/perf/tests/is_printable_array.c4
-rw-r--r--tools/perf/tests/keep-tracking.c4
-rw-r--r--tools/perf/tests/kmod-path.c4
-rw-r--r--tools/perf/tests/llvm.c74
-rw-r--r--tools/perf/tests/maps.c4
-rw-r--r--tools/perf/tests/mem.c4
-rw-r--r--tools/perf/tests/mem2node.c4
-rw-r--r--tools/perf/tests/mmap-basic.c4
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c4
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c5
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c5
-rw-r--r--tools/perf/tests/openat-syscall.c5
-rw-r--r--tools/perf/tests/parse-events.c4
-rw-r--r--tools/perf/tests/parse-metric.c4
-rw-r--r--tools/perf/tests/parse-no-sample-id-all.c5
-rw-r--r--tools/perf/tests/pe-file-parsing.c6
-rw-r--r--tools/perf/tests/perf-hooks.c4
-rw-r--r--tools/perf/tests/perf-record.c4
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c30
-rw-r--r--tools/perf/tests/pfm.c63
-rw-r--r--tools/perf/tests/pmu-events.c73
-rw-r--r--tools/perf/tests/pmu.c4
-rw-r--r--tools/perf/tests/python-use.c4
-rw-r--r--tools/perf/tests/sample-parsing.c4
-rw-r--r--tools/perf/tests/sdt.c6
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh4
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh89
-rw-r--r--tools/perf/tests/stat.c11
-rw-r--r--tools/perf/tests/sw-clock.c4
-rw-r--r--tools/perf/tests/switch-tracking.c4
-rw-r--r--tools/perf/tests/task-exit.c4
-rw-r--r--tools/perf/tests/tests.h238
-rw-r--r--tools/perf/tests/thread-map.c10
-rw-r--r--tools/perf/tests/thread-maps-share.c4
-rw-r--r--tools/perf/tests/time-utils-test.c4
-rw-r--r--tools/perf/tests/topology.c4
-rw-r--r--tools/perf/tests/unit_number__scnprintf.c4
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c5
-rw-r--r--tools/perf/tests/wp.c124
-rw-r--r--tools/perf/trace/beauty/beauty.h5
-rw-r--r--tools/perf/trace/beauty/sockaddr.c2
-rwxr-xr-xtools/perf/trace/beauty/sockaddr.sh24
-rw-r--r--tools/perf/trace/beauty/socket.c21
-rwxr-xr-xtools/perf/trace/beauty/socket.sh38
-rwxr-xr-xtools/perf/trace/beauty/socket_ipproto.sh12
-rw-r--r--tools/perf/util/annotate.c13
-rw-r--r--tools/perf/util/annotate.h3
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.c2
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h1
-rw-r--r--tools/perf/util/arm-spe.c122
-rw-r--r--tools/perf/util/bpf-event.c33
-rw-r--r--tools/perf/util/c++/clang-c.h8
-rw-r--r--tools/perf/util/c++/clang-test.cpp6
-rw-r--r--tools/perf/util/cputopo.c78
-rw-r--r--tools/perf/util/cputopo.h33
-rw-r--r--tools/perf/util/cs-etm.c2
-rw-r--r--tools/perf/util/env.c5
-rw-r--r--tools/perf/util/env.h2
-rw-r--r--tools/perf/util/evsel.c12
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/expr.c65
-rw-r--r--tools/perf/util/expr.h4
-rw-r--r--tools/perf/util/expr.l16
-rw-r--r--tools/perf/util/expr.y73
-rw-r--r--tools/perf/util/header.c20
-rw-r--r--tools/perf/util/stat-shadow.c7
-rw-r--r--tools/perf/util/symbol.c9
-rw-r--r--tools/perf/util/symbol.h19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netcnt.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpffs.c85
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_array_map_elem.c12
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect_multi.sh62
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c17
-rw-r--r--tools/testing/selftests/bpf/xdp_redirect_multi.c4
-rw-r--r--tools/testing/selftests/kvm/Makefile3
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h1
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm_util.h2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c24
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/svm.c13
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c203
-rw-r--r--tools/testing/selftests/net/Makefile9
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh13
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_vlan.sh4
-rwxr-xr-xtools/testing/selftests/net/gre_gso.sh9
-rw-r--r--tools/testing/selftests/net/reuseport_bpf_numa.c4
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_under_vrf.sh2
-rw-r--r--tools/testing/selftests/net/tls.c3
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c11
-rw-r--r--virt/kvm/kvm_main.c10
955 files changed, 43254 insertions, 23804 deletions
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index f627e705e663b4..b268e3e18b4a35 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -512,3 +512,19 @@ Date: July 2021
Contact: "Daeho Jeong" <daehojeong@google.com>
Description: You can control the multiplier value of bdi device readahead window size
between 2 (default) and 256 for POSIX_FADV_SEQUENTIAL advise option.
+
+What: /sys/fs/f2fs/<disk>/max_fragment_chunk
+Date: August 2021
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: With "mode=fragment:block" mount options, we can scatter block allocation.
+ f2fs will allocate 1..<max_fragment_chunk> blocks in a chunk and make a hole
+ in the length of 1..<max_fragment_hole> by turns. This value can be set
+ between 1..512 and the default value is 4.
+
+What: /sys/fs/f2fs/<disk>/max_fragment_hole
+Date: August 2021
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: With "mode=fragment:block" mount options, we can scatter block allocation.
+ f2fs will allocate 1..<max_fragment_chunk> blocks in a chunk and make a hole
+ in the length of 1..<max_fragment_hole> by turns. This value can be set
+ between 1..512 and the default value is 4.
diff --git a/Documentation/devicetree/bindings/clock/sifive/fu740-prci.yaml b/Documentation/devicetree/bindings/clock/sifive/fu740-prci.yaml
index e17143cac316fa..252085a0cf65e5 100644
--- a/Documentation/devicetree/bindings/clock/sifive/fu740-prci.yaml
+++ b/Documentation/devicetree/bindings/clock/sifive/fu740-prci.yaml
@@ -42,6 +42,9 @@ properties:
"#clock-cells":
const: 1
+ "#reset-cells":
+ const: 1
+
required:
- compatible
- reg
@@ -57,4 +60,5 @@ examples:
reg = <0x10000000 0x1000>;
clocks = <&hfclk>, <&rtcclk>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/clock/silabs,si5351.txt b/Documentation/devicetree/bindings/clock/silabs,si5351.txt
index 8fe6f80afadee1..bfda6af76beeb5 100644
--- a/Documentation/devicetree/bindings/clock/silabs,si5351.txt
+++ b/Documentation/devicetree/bindings/clock/silabs,si5351.txt
@@ -2,7 +2,7 @@ Binding for Silicon Labs Si5351a/b/c programmable i2c clock generator.
Reference
[1] Si5351A/B/C Data Sheet
- https://www.silabs.com/Support%20Documents/TechnicalDocs/Si5351.pdf
+ https://www.skyworksinc.com/-/media/Skyworks/SL/documents/public/data-sheets/Si5351-B.pdf
The Si5351a/b/c are programmable i2c clock generators with up to 8 output
clocks. Si5351a also has a reduced pin-count package (MSOP10) where only
diff --git a/Documentation/devicetree/bindings/clock/socionext,uniphier-clock.yaml b/Documentation/devicetree/bindings/clock/socionext,uniphier-clock.yaml
index c3930edc410f87..9a0cc73416303a 100644
--- a/Documentation/devicetree/bindings/clock/socionext,uniphier-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/socionext,uniphier-clock.yaml
@@ -23,6 +23,7 @@ properties:
- socionext,uniphier-ld11-clock
- socionext,uniphier-ld20-clock
- socionext,uniphier-pxs3-clock
+ - socionext,uniphier-nx1-clock
- description: Media I/O (MIO) clock, SD clock
enum:
- socionext,uniphier-ld4-mio-clock
@@ -33,6 +34,7 @@ properties:
- socionext,uniphier-ld11-mio-clock
- socionext,uniphier-ld20-sd-clock
- socionext,uniphier-pxs3-sd-clock
+ - socionext,uniphier-nx1-sd-clock
- description: Peripheral clock
enum:
- socionext,uniphier-ld4-peri-clock
@@ -43,6 +45,10 @@ properties:
- socionext,uniphier-ld11-peri-clock
- socionext,uniphier-ld20-peri-clock
- socionext,uniphier-pxs3-peri-clock
+ - socionext,uniphier-nx1-peri-clock
+ - description: SoC-glue clock
+ enum:
+ - socionext,uniphier-pro4-sg-clock
"#clock-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 304a1367faaa72..1faae3e323a462 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -49,11 +49,26 @@ properties:
properties:
port@0:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
description: |
For LVDS encoders, port 0 is the parallel input
For LVDS decoders, port 0 is the LVDS input
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-mapping:
+ enum:
+ - jeida-18
+ - jeida-24
+ - vesa-24
+ description: |
+ The color signals mapping order. See details in
+ Documentation/devicetree/bindings/display/panel/lvds.yaml
+
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: |
@@ -71,6 +86,22 @@ properties:
power-supply: true
+if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: lvds-decoder
+then:
+ properties:
+ ports:
+ properties:
+ port@0:
+ properties:
+ endpoint:
+ properties:
+ data-mapping: false
+
required:
- compatible
- ports
diff --git a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
index fce82b605c8bf4..cdaf7a7a8f8867 100644
--- a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
@@ -40,6 +40,9 @@ properties:
vdd33-supply:
description: Regulator for 3.3V digital core power.
+ aux-bus:
+ $ref: /schemas/display/dp-aux-bus.yaml#
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -98,7 +101,21 @@ examples:
reg = <1>;
ps8640_out: endpoint {
remote-endpoint = <&panel_in>;
- };
+ };
+ };
+ };
+
+ aux-bus {
+ panel {
+ compatible = "boe,nv133fhm-n62";
+ power-supply = <&pp3300_dx_edp>;
+ backlight = <&backlight>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&ps8640_out>;
+ };
+ };
};
};
};
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 335776c45474c5..f3c9395d23b6a4 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -166,6 +166,8 @@ properties:
- innolux,at070tn92
# Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
- innolux,g070y2-l01
+ # Innolux G070Y2-T02 7" WVGA (800x480) TFT LCD TTL panel
+ - innolux,g070y2-t02
# Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
- innolux,g101ice-l01
# Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
@@ -309,6 +311,8 @@ properties:
- urt,umsh-8596md-11t
- urt,umsh-8596md-19t
- urt,umsh-8596md-20t
+ # Vivax TPC-9150 tablet 9.0" WSVGA TFT LCD panel
+ - vivax,tpc9150-panel
# VXT 800x480 color TFT LCD panel
- vxt,vl050-8048nt-c01
# Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
@@ -317,6 +321,7 @@ properties:
- yes-optoelectronics,ytc700tlag-05-201c
backlight: true
+ ddc-i2c-bus: true
enable-gpios: true
port: true
power-supply: true
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls060t1sx01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,ls060t1sx01.yaml
new file mode 100644
index 00000000000000..271c097cc9a443
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,ls060t1sx01.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sharp,ls060t1sx01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sharp Microelectronics 6.0" FullHD TFT LCD panel
+
+maintainers:
+ - Dmitry Baryskov <dmitry.baryshkov@linaro.org>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: sharp,ls060t1sx01
+
+ reg: true
+ backlight: true
+ reset-gpios: true
+ port: true
+
+ avdd-supply:
+ description: handle of the regulator that provides the positive supply voltage
+ avee-supply:
+ description: handle of the regulator that provides the negative supply voltage
+ vddi-supply:
+ description: handle of the regulator that provides the I/O supply voltage
+ vddh-supply:
+ description: handle of the regulator that provides the analog supply voltage
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "sharp,ls060t1sx01";
+ reg = <0>;
+ avdd-supply = <&pm8941_l22>;
+ backlight = <&backlight>;
+ reset-gpios = <&pm8916_gpios 25 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/cypress-sf.yaml b/Documentation/devicetree/bindings/input/cypress-sf.yaml
new file mode 100644
index 00000000000000..c0b05146627280
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/cypress-sf.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/cypress-sf.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cypress StreetFighter touchkey controller
+
+maintainers:
+ - Yassine Oudjana <y.oudjana@protonmail.com>
+
+allOf:
+ - $ref: input.yaml#
+
+properties:
+ compatible:
+ const: cypress,sf3155
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ avdd-supply:
+ description: Regulator for AVDD analog voltage
+
+ vdd-supply:
+ description: Regulator for VDD digital voltage
+
+ linux,keycodes:
+ minItems: 1
+ maxItems: 8
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - avdd-supply
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchkey@28 {
+ compatible = "cypress,sf3155";
+ reg = <0x28>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <77 IRQ_TYPE_EDGE_FALLING>;
+ avdd-supply = <&vreg_l6a_1p8>;
+ vdd-supply = <&vdd_3v2_tp>;
+ linux,keycodes = <KEY_BACK KEY_MENU>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml b/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml
index fa0f37a90ac9af..d5d6bced3148ff 100644
--- a/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml
+++ b/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml
@@ -19,6 +19,7 @@ properties:
- microchip,cap1106
- microchip,cap1126
- microchip,cap1188
+ - microchip,cap1206
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
index 81ccb2110162c3..1f5c6384182e01 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
@@ -35,9 +35,11 @@ properties:
- renesas,tpu-r8a7794 # R-Car E2
- renesas,tpu-r8a7795 # R-Car H3
- renesas,tpu-r8a7796 # R-Car M3-W
+ - renesas,tpu-r8a77961 # R-Car M3-W+
- renesas,tpu-r8a77965 # R-Car M3-N
- renesas,tpu-r8a77970 # R-Car V3M
- renesas,tpu-r8a77980 # R-Car V3H
+ - renesas,tpu-r8a779a0 # R-Car V3U
- const: renesas,tpu
reg:
diff --git a/Documentation/devicetree/bindings/rtc/mstar,msc313-rtc.yaml b/Documentation/devicetree/bindings/rtc/mstar,msc313-rtc.yaml
new file mode 100644
index 00000000000000..114199cf4d28f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/mstar,msc313-rtc.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/mstar,msc313-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mstar MSC313e RTC Device Tree Bindings
+
+allOf:
+ - $ref: "rtc.yaml#"
+
+maintainers:
+ - Daniel Palmer <daniel@0x0f.com>
+ - Romain Perier <romain.perier@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - mstar,msc313-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ start-year: true
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ rtc@2400 {
+ compatible = "mstar,msc313-rtc";
+ reg = <0x2400 0x40>;
+ clocks = <&xtal_div2>;
+ interrupts-extended = <&intc_irq GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
index 627bb533eff7ab..6439682c931960 100644
--- a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
@@ -13,10 +13,19 @@ Optional property:
expressed in femto Farad (fF). Valid values are 7000 and 12500.
Default value (if no value is specified) is 7000fF.
+Optional child node:
+- clock: Provide this if the square wave pin is used as boot-enabled fixed clock.
+
Example:
pcf85063: rtc@51 {
compatible = "nxp,pcf85063";
reg = <0x51>;
quartz-load-femtofarads = <12500>;
+
+ clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 983f5e4afbc563..66d6432fd78122 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -1286,6 +1286,8 @@ patternProperties:
description: Vitesse Semiconductor Corporation
"^vivante,.*":
description: Vivante Corporation
+ "^vivax,.*":
+ description: Vivax brand by M SAN Grupa d.o.o.
"^vocore,.*":
description: VoCore Studio
"^voipac,.*":
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index 6f3c6e91346df0..d7b84695f56aea 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -197,10 +197,29 @@ fault_type=%d Support configuring fault injection type, should be
FAULT_DISCARD 0x000002000
FAULT_WRITE_IO 0x000004000
FAULT_SLAB_ALLOC 0x000008000
+ FAULT_DQUOT_INIT 0x000010000
=================== ===========
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.
+ "fragment:segment" and "fragment:block" are newly added here.
+ These are developer options for experiments to simulate filesystem
+ fragmentation/after-GC situation itself. The developers use these
+ modes to understand filesystem fragmentation/after-GC condition well,
+ and eventually get some insights to handle them better.
+ In "fragment:segment", f2fs allocates a new segment in ramdom
+ position. With this, we can simulate the after-GC condition.
+ In "fragment:block", we can scatter block allocation with
+ "max_fragment_chunk" and "max_fragment_hole" sysfs nodes.
+ We added some randomness to both chunk and hole size to make
+ it close to realistic IO pattern. So, in this mode, f2fs will allocate
+ 1..<max_fragment_chunk> blocks in a chunk and make a hole in the
+ length of 1..<max_fragment_hole> by turns. With this, the newly
+ allocated blocks will be scattered throughout the whole partition.
+ Note that "fragment:block" implicitly enables "fragment:segment"
+ option for more randomness.
+ Please, use these options for your experiments and we strongly
+ recommend to re-format the filesystem after using these options.
io_bits=%u Set the bit size of write IO requests. It should be set
with "mode=lfs".
usrquota Enable plain user disk quota accounting.
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 6613543955e94c..60d1d7ee071973 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -314,16 +314,19 @@ Level: Advanced
Garbage collect fbdev scrolling acceleration
--------------------------------------------
-Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode =
-SCROLL_REDRAW. There's a ton of code this will allow us to remove:
+Scroll acceleration has been disabled in fbcon. Now it works as the old
+SCROLL_REDRAW mode. A ton of code was removed in fbcon.c and the hook bmove was
+removed from fbcon_ops.
+Remaining tasks:
-- lots of code in fbcon.c
-
-- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called
+- a bunch of the hooks in fbcon_ops could be removed or simplified by calling
directly instead of the function table (with a switch on p->rotate)
- fb_copyarea is unused after this, and can be deleted from all drivers
+- after that, fb_copyarea can be deleted from fb_ops in include/linux/fb.h as
+ well as cfb_copyarea
+
Note that not all acceleration code can be deleted, since clearing and cursor
support is still accelerated, which might be good candidates for further
deletion projects.
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index c61cc0219f4c1c..c04431144f7ab9 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -1004,13 +1004,11 @@ udp_l3mdev_accept - BOOLEAN
udp_mem - vector of 3 INTEGERs: min, pressure, max
Number of pages allowed for queueing by all UDP sockets.
- min: Below this number of pages UDP is not bothered about its
- memory appetite. When amount of memory allocated by UDP exceeds
- this number, UDP starts to moderate memory usage.
+ min: Number of pages allowed for queueing by all UDP sockets.
pressure: This value was introduced to follow format of tcp_mem.
- max: Number of pages allowed for queueing by all UDP sockets.
+ max: This value was introduced to follow format of tcp_mem.
Default is calculated at boot time from amount of available memory.
diff --git a/Documentation/security/SCTP.rst b/Documentation/security/SCTP.rst
index 0bcf6c1245ee9a..d5fd6ccc3dcbdc 100644
--- a/Documentation/security/SCTP.rst
+++ b/Documentation/security/SCTP.rst
@@ -26,11 +26,11 @@ described in the `SCTP SELinux Support`_ chapter.
security_sctp_assoc_request()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Passes the ``@ep`` and ``@chunk->skb`` of the association INIT packet to the
+Passes the ``@asoc`` and ``@chunk->skb`` of the association INIT packet to the
security module. Returns 0 on success, error on failure.
::
- @ep - pointer to sctp endpoint structure.
+ @asoc - pointer to sctp association structure.
@skb - pointer to skbuff of association packet.
@@ -117,9 +117,9 @@ Called whenever a new socket is created by **accept**\(2)
calls **sctp_peeloff**\(3).
::
- @ep - pointer to current sctp endpoint structure.
+ @asoc - pointer to current sctp association structure.
@sk - pointer to current sock structure.
- @sk - pointer to new sock structure.
+ @newsk - pointer to new sock structure.
security_inet_conn_established()
@@ -151,9 +151,9 @@ establishing an association.
INIT --------------------------------------------->
sctp_sf_do_5_1B_init()
Respond to an INIT chunk.
- SCTP peer endpoint "A" is
- asking for an association. Call
- security_sctp_assoc_request()
+ SCTP peer endpoint "A" is asking
+ for a temporary association.
+ Call security_sctp_assoc_request()
to set the peer label if first
association.
If not first association, check
@@ -163,9 +163,12 @@ establishing an association.
| discard the packet.
|
COOKIE ECHO ------------------------------------------>
- |
- |
- |
+ sctp_sf_do_5_1D_ce()
+ Respond to an COOKIE ECHO chunk.
+ Confirm the cookie and create a
+ permanent association.
+ Call security_sctp_assoc_request() to
+ do the same as for INIT chunk Response.
<------------------------------------------- COOKIE ACK
| |
sctp_sf_do_5_1E_ca |
@@ -200,22 +203,22 @@ hooks with the SELinux specifics expanded below::
security_sctp_assoc_request()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Passes the ``@ep`` and ``@chunk->skb`` of the association INIT packet to the
+Passes the ``@asoc`` and ``@chunk->skb`` of the association INIT packet to the
security module. Returns 0 on success, error on failure.
::
- @ep - pointer to sctp endpoint structure.
+ @asoc - pointer to sctp association structure.
@skb - pointer to skbuff of association packet.
The security module performs the following operations:
- IF this is the first association on ``@ep->base.sk``, then set the peer
+ IF this is the first association on ``@asoc->base.sk``, then set the peer
sid to that in ``@skb``. This will ensure there is only one peer sid
- assigned to ``@ep->base.sk`` that may support multiple associations.
+ assigned to ``@asoc->base.sk`` that may support multiple associations.
- ELSE validate the ``@ep->base.sk peer_sid`` against the ``@skb peer sid``
+ ELSE validate the ``@asoc->base.sk peer_sid`` against the ``@skb peer sid``
to determine whether the association should be allowed or denied.
- Set the sctp ``@ep sid`` to socket's sid (from ``ep->base.sk``) with
+ Set the sctp ``@asoc sid`` to socket's sid (from ``asoc->base.sk``) with
MLS portion taken from ``@skb peer sid``. This will be used by SCTP
TCP style sockets and peeled off connections as they cause a new socket
to be generated.
@@ -259,13 +262,13 @@ security_sctp_sk_clone()
Called whenever a new socket is created by **accept**\(2) (i.e. a TCP style
socket) or when a socket is 'peeled off' e.g userspace calls
**sctp_peeloff**\(3). ``security_sctp_sk_clone()`` will set the new
-sockets sid and peer sid to that contained in the ``@ep sid`` and
-``@ep peer sid`` respectively.
+sockets sid and peer sid to that contained in the ``@asoc sid`` and
+``@asoc peer sid`` respectively.
::
- @ep - pointer to current sctp endpoint structure.
+ @asoc - pointer to current sctp association structure.
@sk - pointer to current sock structure.
- @sk - pointer to new sock structure.
+ @newsk - pointer to new sock structure.
security_inet_conn_established()
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 3b093d6dbe22c4..aeeb071c768819 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -6911,6 +6911,20 @@ MAP_SHARED mmap will result in an -EINVAL return.
When enabled the VMM may make use of the ``KVM_ARM_MTE_COPY_TAGS`` ioctl to
perform a bulk copy of tags to/from the guest.
+7.29 KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM
+-------------------------------------
+
+Architectures: x86 SEV enabled
+Type: vm
+Parameters: args[0] is the fd of the source vm
+Returns: 0 on success
+
+This capability enables userspace to migrate the encryption context from the VM
+indicated by the fd to the VM this is called on.
+
+This is intended to support intra-host migration of VMs between userspace VMMs,
+upgrading the VMM process without interrupting the guest.
+
8. Other capabilities.
======================
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index a14c2938e7af3c..f2a59ed82ed374 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -360,7 +360,7 @@ between device driver specific code and shared common code:
system memory page, locks the page with ``lock_page()``, and fills in the
``dst`` array entry with::
- dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
+ dst[i] = migrate_pfn(page_to_pfn(dpage));
Now that the driver knows that this page is being migrated, it can
invalidate device private MMU mappings and copy device private memory
diff --git a/Documentation/x86/xstate.rst b/Documentation/x86/xstate.rst
index 65de3f054ba5fb..5cec7fb558d603 100644
--- a/Documentation/x86/xstate.rst
+++ b/Documentation/x86/xstate.rst
@@ -63,3 +63,12 @@ kernel sends SIGILL to the application. If the process has permission then
the handler allocates a larger xstate buffer for the task so the large
state can be context switched. In the unlikely cases that the allocation
fails, the kernel sends SIGSEGV.
+
+Dynamic features in signal frames
+---------------------------------
+
+Dynamcally enabled features are not written to the signal frame upon signal
+entry if the feature is in its initial configuration. This differs from
+non-dynamic features which are always written regardless of their
+configuration. Signal handlers can examine the XSAVE buffer's XSTATE_BV
+field to determine if a features was written.
diff --git a/MAINTAINERS b/MAINTAINERS
index 5b7a13f706fa3e..7a2345ce85213c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -872,9 +872,10 @@ F: Documentation/devicetree/bindings/thermal/amazon,al-thermal.txt
F: drivers/thermal/thermal_mmio.c
AMAZON ETHERNET DRIVERS
-M: Netanel Belgazal <netanel@amazon.com>
+M: Shay Agroskin <shayagr@amazon.com>
M: Arthur Kiyanovski <akiyano@amazon.com>
-R: Guy Tzalik <gtzalik@amazon.com>
+R: David Arinzon <darinzon@amazon.com>
+R: Noam Dagan <ndagan@amazon.com>
R: Saeed Bishara <saeedb@amazon.com>
L: netdev@vger.kernel.org
S: Supported
@@ -2282,6 +2283,7 @@ F: arch/arm/boot/dts/mstar-*
F: arch/arm/mach-mstar/
F: drivers/clk/mstar/
F: drivers/gpio/gpio-msc313.c
+F: drivers/rtc/rtc-msc313.c
F: drivers/watchdog/msc313e_wdt.c
F: include/dt-bindings/clock/mstar-*
F: include/dt-bindings/gpio/msc313-gpio.h
@@ -4457,7 +4459,7 @@ CHIPONE ICN8318 I2C TOUCHSCREEN DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-input@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
+F: Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml
F: drivers/input/touchscreen/chipone_icn8318.c
CHIPONE ICN8505 I2C TOUCHSCREEN DRIVER
@@ -4674,11 +4676,10 @@ COCCINELLE/Semantic Patches (SmPL)
M: Julia Lawall <Julia.Lawall@inria.fr>
M: Gilles Muller <Gilles.Muller@inria.fr>
M: Nicolas Palix <nicolas.palix@imag.fr>
-M: Michal Marek <michal.lkml@markovi.net>
-L: cocci@systeme.lip6.fr (moderated for non-subscribers)
+L: cocci@inria.fr (moderated for non-subscribers)
S: Supported
-W: http://coccinelle.lip6.fr/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
+W: https://coccinelle.gitlabpages.inria.fr/website/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlawall/linux.git
F: Documentation/dev-tools/coccinelle.rst
F: scripts/coccicheck
F: scripts/coccinelle/
@@ -5203,6 +5204,13 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/input/touchscreen/cy8ctma140.c
+CYPRESS STREETFIGHTER TOUCHKEYS DRIVER
+M: Yassine Oudjana <y.oudjana@protonmail.com>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/input/cypress-sf.yaml
+F: drivers/input/keyboard/cypress-sf.c
+
CYTTSP TOUCHSCREEN DRIVER
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-input@vger.kernel.org
@@ -8061,9 +8069,10 @@ F: drivers/media/usb/go7007/
GOODIX TOUCHSCREEN
M: Bastien Nocera <hadess@hadess.net>
+M: Hans de Goede <hdegoede@redhat.com>
L: linux-input@vger.kernel.org
S: Maintained
-F: drivers/input/touchscreen/goodix.c
+F: drivers/input/touchscreen/goodix*
GOOGLE ETHERNET DRIVERS
M: Jeroen de Borst <jeroendb@google.com>
@@ -10798,11 +10807,6 @@ F: drivers/ata/
F: include/linux/ata.h
F: include/linux/libata.h
-LIBLOCKDEP
-M: Sasha Levin <alexander.levin@microsoft.com>
-S: Maintained
-F: tools/lib/lockdep/
-
LIBNVDIMM BLK: MMIO-APERTURE DRIVER
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
@@ -14408,7 +14412,9 @@ M: Juergen Gross <jgross@suse.com>
M: Deep Shah <sdeep@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
L: virtualization@lists.linux-foundation.org
+L: x86@kernel.org
S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
F: Documentation/virt/paravirt_ops.rst
F: arch/*/include/asm/paravirt*.h
F: arch/*/kernel/paravirt*
@@ -21074,6 +21080,18 @@ F: Documentation/vm/zsmalloc.rst
F: include/linux/zsmalloc.h
F: mm/zsmalloc.c
+ZSTD
+M: Nick Terrell <terrelln@fb.com>
+S: Maintained
+B: https://github.com/facebook/zstd/issues
+T: git git://github.com/terrelln/linux.git
+F: include/linux/zstd*
+F: lib/zstd/
+F: lib/decompress_unzstd.c
+F: crypto/zstd.c
+N: zstd
+K: zstd
+
ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@redhat.com>
M: Dan Streetman <ddstreet@ieee.org>
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index a305ce256090b2..d52a0b269ee80e 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -68,6 +68,7 @@
#define ESR_ELx_EC_MAX (0x3F)
#define ESR_ELx_EC_SHIFT (26)
+#define ESR_ELx_EC_WIDTH (6)
#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 4be8486042a7d8..2a5f7f38006ff9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -584,7 +584,7 @@ struct kvm_vcpu_stat {
u64 exits;
};
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
+void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 7de96196ae14c9..6f3e677d88f158 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2868,6 +2868,7 @@ bool this_cpu_has_cap(unsigned int n)
return false;
}
+EXPORT_SYMBOL_GPL(this_cpu_has_cap);
/*
* This helper function is used in a narrow window when,
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index f5490afe1ebfb3..2f03cbfefe6764 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1389,12 +1389,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
}
case KVM_ARM_PREFERRED_TARGET: {
- int err;
struct kvm_vcpu_init init;
- err = kvm_vcpu_preferred_target(&init);
- if (err)
- return err;
+ kvm_vcpu_preferred_target(&init);
if (copy_to_user(argp, &init, sizeof(init)))
return -EFAULT;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 5ce26bedf23c07..e116c77677309e 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -869,13 +869,10 @@ u32 __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_GENERIC_V8;
}
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
+void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
u32 target = kvm_target_cpu();
- if (target < 0)
- return -ENODEV;
-
memset(init, 0, sizeof(*init));
/*
@@ -885,8 +882,6 @@ int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
* target type.
*/
init->target = (__u32)target;
-
- return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 9aa9b73475c95d..b6b6801d96d5a9 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -44,7 +44,7 @@
el1_sync: // Guest trapped into EL2
mrs x0, esr_el2
- lsr x0, x0, #ESR_ELx_EC_SHIFT
+ ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
cmp x0, #ESR_ELx_EC_HVC64
ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
b.ne el1_trap
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 0c6116d34e18f6..3d613e721a75d0 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -141,7 +141,7 @@ SYM_FUNC_END(__host_hvc)
.L__vect_start\@:
stp x0, x1, [sp, #-16]!
mrs x0, esr_el2
- lsr x0, x0, #ESR_ELx_EC_SHIFT
+ ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
cmp x0, #ESR_ELx_EC_HVC64
b.eq __host_hvc
b __host_exit
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 862c7b514e20d9..578f71798c2e0b 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -178,7 +178,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
phys = kvm_pte_to_phys(pte);
if (!addr_is_memory(phys))
- return 0;
+ return -EINVAL;
/*
* Adjust the host stage-2 mappings to match the ownership attributes
@@ -207,8 +207,18 @@ static int finalize_host_mappings(void)
.cb = finalize_host_mappings_walker,
.flags = KVM_PGTABLE_WALK_LEAF,
};
+ int i, ret;
+
+ for (i = 0; i < hyp_memblock_nr; i++) {
+ struct memblock_region *reg = &hyp_memory[i];
+ u64 start = (u64)hyp_phys_to_virt(reg->base);
+
+ ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
+ if (ret)
+ return ret;
+ }
- return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits), &walker);
+ return 0;
}
void __noreturn __pkvm_init_finalise(void)
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 3787ee6fb1a205..792cf6e6ac9205 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -474,7 +474,7 @@ bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
return true;
}
-/**
+/*
* Handler for protected VM restricted exceptions.
*
* Inject an undefined exception into the guest and return true to indicate that
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index ec276f75fa05b4..c12cd700598f50 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -310,7 +310,7 @@ void __init kasan_init(void)
kasan_init_depth();
#if defined(CONFIG_KASAN_GENERIC)
/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
- pr_info("KernelAddressSanitizer initialized\n");
+ pr_info("KernelAddressSanitizer initialized (generic)\n");
#endif
}
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 277d61a094637c..0d00ef5117dcee 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -53,17 +53,6 @@ config M68000
System-On-Chip devices (eg 68328, 68302, etc). It does not contain
a paging MMU.
-config MCPU32
- bool
- select CPU_HAS_NO_BITFIELDS
- select CPU_HAS_NO_CAS
- select CPU_HAS_NO_UNALIGNED
- select CPU_NO_EFFICIENT_FFS
- help
- The Freescale (was then Motorola) CPU32 is a CPU core that is
- based on the 68020 processor. For the most part it is used in
- System-On-Chip parts, and does not contain a paging MMU.
-
config M68020
bool "68020 support"
depends on MMU
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 36fa0c3ef12967..eeab4f3e6c197d 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -203,6 +203,7 @@ config INIT_LCD
config MEMORY_RESERVE
int "Memory reservation (MiB)"
depends on (UCSIMM || UCDIMM)
+ default 0
help
Reserve certain memory regions on 68x328 based boards.
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index 7b414099e5fc20..7b93e1fd8ffa90 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -451,7 +451,7 @@ static inline unsigned long ffz(unsigned long word)
* generic functions for those.
*/
#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
- !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
+ !defined(CONFIG_M68000)
static inline unsigned long __ffs(unsigned long x)
{
__asm__ __volatile__ ("bitrev %0; ff1 %0"
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 2c57994b5217d4..30193bcf9caa87 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -37,4 +37,4 @@ platform-$(CONFIG_MACH_TX49XX) += txx9/
platform-$(CONFIG_MACH_VR41XX) += vr41xx/
# include the platform specific files
-include $(patsubst %, $(srctree)/arch/mips/%/Platform, $(platform-y))
+include $(patsubst %/, $(srctree)/arch/mips/%/Platform, $(platform-y))
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 86510741d49d42..de60ad19005764 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -292,6 +292,8 @@ config BMIPS_GENERIC
select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select HARDIRQS_SW_RESEND
+ select HAVE_PCI
+ select PCI_DRIVERS_GENERIC
help
Build a generic DT-based kernel image that boots on select
BCM33xx cable modem chips, BCM63xx DSL chips, and BCM7xxx set-top
@@ -333,6 +335,9 @@ config BCM63XX
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_HAS_EARLY_PRINTK
+ select SYS_HAS_CPU_BMIPS32_3300
+ select SYS_HAS_CPU_BMIPS4350
+ select SYS_HAS_CPU_BMIPS4380
select SWAP_IO_SPACE
select GPIOLIB
select MIPS_L1_CACHE_SHIFT_4
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index e036fc025cccb2..ace7f033de07c7 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -253,7 +253,9 @@ endif
#
# Board-dependent options and extra files
#
+ifdef need-compiler
include $(srctree)/arch/mips/Kbuild.platforms
+endif
ifdef CONFIG_PHYSICAL_START
load-y = $(CONFIG_PHYSICAL_START)
diff --git a/arch/mips/boot/compressed/.gitignore b/arch/mips/boot/compressed/.gitignore
deleted file mode 100644
index d358395614c9f3..00000000000000
--- a/arch/mips/boot/compressed/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ashldi3.c
-bswapsi.c
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 3548b3b452699c..2861a05c2e0c04 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -50,19 +50,9 @@ vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
vmlinuzobjs-$(CONFIG_ATH79) += $(obj)/uart-ath79.o
endif
-extra-y += uart-ath79.c
-$(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c
- $(call cmd,shipped)
-
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
-extra-y += ashldi3.c
-$(obj)/ashldi3.c: $(obj)/%.c: $(srctree)/lib/%.c FORCE
- $(call if_changed,shipped)
-
-extra-y += bswapsi.c
-$(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c FORCE
- $(call if_changed,shipped)
+vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o
targets := $(notdir $(vmlinuzobjs-y))
diff --git a/arch/mips/boot/compressed/ashldi3.c b/arch/mips/boot/compressed/ashldi3.c
new file mode 100644
index 00000000000000..f7bf6a7aae3153
--- /dev/null
+++ b/arch/mips/boot/compressed/ashldi3.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../../../lib/ashldi3.c"
diff --git a/arch/mips/boot/compressed/bswapdi.c b/arch/mips/boot/compressed/bswapdi.c
new file mode 100644
index 00000000000000..acb28aebb025ab
--- /dev/null
+++ b/arch/mips/boot/compressed/bswapdi.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../lib/bswapdi.c"
diff --git a/arch/mips/boot/compressed/bswapsi.c b/arch/mips/boot/compressed/bswapsi.c
new file mode 100644
index 00000000000000..fdb9c64769045f
--- /dev/null
+++ b/arch/mips/boot/compressed/bswapsi.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../lib/bswapsi.c"
diff --git a/arch/mips/boot/compressed/uart-ath79.c b/arch/mips/boot/compressed/uart-ath79.c
new file mode 100644
index 00000000000000..d686820921be38
--- /dev/null
+++ b/arch/mips/boot/compressed/uart-ath79.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../ath79/early_printk.c"
diff --git a/arch/mips/configs/bmips_stb_defconfig b/arch/mips/configs/bmips_stb_defconfig
index 625bd2d7e6854e..5956fb95c19fa6 100644
--- a/arch/mips/configs/bmips_stb_defconfig
+++ b/arch/mips/configs/bmips_stb_defconfig
@@ -1,6 +1,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_NO_HZ=y
+CONFIG_HZ=1000
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
@@ -8,17 +9,34 @@ CONFIG_EXPERT=y
CONFIG_BMIPS_GENERIC=y
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_HIGHMEM=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
+CONFIG_CC_STACKPROTECTOR_STRONG=y
# CONFIG_SECCOMP is not set
CONFIG_MIPS_O32_FP64_SUPPORT=y
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_RD_XZ=y
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIEASPM_POWERSAVE=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIE_BRCMSTB=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_BMIPS_CPUFREQ=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_NET=y
@@ -32,32 +50,99 @@ CONFIG_INET=y
# CONFIG_INET_DIAG is not set
CONFIG_CFG80211=y
CONFIG_NL80211_TESTMODE=y
+CONFIG_WIRELESS=y
CONFIG_MAC80211=y
+CONFIG_NL80211=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BRCMSTB_GISB_ARB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_UDP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_NETFILTER=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_NET_DSA=y
+CONFIG_NET_SWITCHDEV=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_ALIGNMENT=12
+CONFIG_SPI=y
+CONFIG_SPI_BRCMSTB=y
CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_ABSENT=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_BRCMNAND=y
+CONFIG_MTD_SPI_NOR=y
+# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
# CONFIG_BLK_DEV is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
+CONFIG_VLAN_8021Q=y
+CONFIG_MACVLAN=y
CONFIG_BCMGENET=y
CONFIG_USB_USBNET=y
-# CONFIG_INPUT is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO is not set
-# CONFIG_VT is not set
+CONFIG_VT=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_BRCMSTB=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_POWER_SUPPLY=y
# CONFIG_HWMON is not set
@@ -69,22 +154,76 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_SOC_BRCMSTB=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+CONFIG_CIFS=y
+CONFIG_JBD2_DEBUG=y
CONFIG_FUSE_FS=y
+CONFIG_FHANDLE=y
+CONFIG_CGROUPS=y
+CONFIG_CUSE=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
CONFIG_NFS_FS=y
-CONFIG_CIFS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
-# CONFIG_CRYPTO_HW is not set
CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_INFO_REDUCED is not set
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_DEBUG_USER=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon"
+# CONFIG_MIPS_CMDLINE_FROM_DTB is not set
+CONFIG_MIPS_CMDLINE_DTB_EXTEND=y
+# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_DT_BCM974XX=y
+CONFIG_FW_CFE=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_BRCMSTB=y
+CONFIG_GENERIC_PHY=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_PHY_BRCM_USB=y
+CONFIG_PHY_BRCM_SATA=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_DEBUG=y
+CONFIG_SYSVIPC=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_STACK_TRACER=y
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index eaad0ed4b523bb..a8a30bb1dee8c1 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -117,21 +117,21 @@ static void __init dec_be_init(void)
{
switch (mips_machtype) {
case MACH_DS23100: /* DS2100/DS3100 Pmin/Pmax */
- board_be_handler = dec_kn01_be_handler;
+ mips_set_be_handler(dec_kn01_be_handler);
busirq_handler = dec_kn01_be_interrupt;
busirq_flags |= IRQF_SHARED;
dec_kn01_be_init();
break;
case MACH_DS5000_1XX: /* DS5000/1xx 3min */
case MACH_DS5000_XX: /* DS5000/xx Maxine */
- board_be_handler = dec_kn02xa_be_handler;
+ mips_set_be_handler(dec_kn02xa_be_handler);
busirq_handler = dec_kn02xa_be_interrupt;
dec_kn02xa_be_init();
break;
case MACH_DS5000_200: /* DS5000/200 3max */
case MACH_DS5000_2X0: /* DS5000/240 3max+ */
case MACH_DS5900: /* DS5900 bigmax */
- board_be_handler = dec_ecc_be_handler;
+ mips_set_be_handler(dec_ecc_be_handler);
busirq_handler = dec_ecc_be_interrupt;
dec_ecc_be_init();
break;
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index b710e76c9c658f..15cde638b4070c 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -15,7 +15,7 @@
#define MIPS_BE_FATAL 2 /* treat as an unrecoverable error */
extern void (*board_be_init)(void);
-extern int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
+void mips_set_be_handler(int (*handler)(struct pt_regs *reg, int is_fixup));
extern void (*board_nmi_handler_setup)(void);
extern void (*board_ejtag_handler_setup)(void);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 6f07362de5cec2..d26b0fb8ea067e 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -103,13 +103,19 @@ extern asmlinkage void handle_reserved(void);
extern void tlb_do_page_fault_0(void);
void (*board_be_init)(void);
-int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
+static int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
void (*board_ebase_setup)(void);
void(*board_cache_error_setup)(void);
+void mips_set_be_handler(int (*handler)(struct pt_regs *regs, int is_fixup))
+{
+ board_be_handler = handler;
+}
+EXPORT_SYMBOL_GPL(mips_set_be_handler);
+
static void show_raw_backtrace(unsigned long reg29, const char *loglvl,
bool user)
{
diff --git a/arch/mips/sgi-ip22/ip22-berr.c b/arch/mips/sgi-ip22/ip22-berr.c
index dc0110a607a5b4..afe8a61078e40f 100644
--- a/arch/mips/sgi-ip22/ip22-berr.c
+++ b/arch/mips/sgi-ip22/ip22-berr.c
@@ -112,5 +112,5 @@ static int ip22_be_handler(struct pt_regs *regs, int is_fixup)
void __init ip22_be_init(void)
{
- board_be_handler = ip22_be_handler;
+ mips_set_be_handler(ip22_be_handler);
}
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
index c61362d9ea95de..16ca470deb8096 100644
--- a/arch/mips/sgi-ip22/ip28-berr.c
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -468,7 +468,7 @@ static int ip28_be_handler(struct pt_regs *regs, int is_fixup)
void __init ip22_be_init(void)
{
- board_be_handler = ip28_be_handler;
+ mips_set_be_handler(ip28_be_handler);
}
int ip28_show_be_info(struct seq_file *m)
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index 5a38ae6bdfa91a..923a63a51cda39 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -85,7 +85,7 @@ void __init ip27_be_init(void)
int cpu = LOCAL_HUB_L(PI_CPU_NUM);
int cpuoff = cpu << 8;
- board_be_handler = ip27_be_handler;
+ mips_set_be_handler(ip27_be_handler);
LOCAL_HUB_S(PI_ERR_INT_PEND,
cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c
index c860f95ab7edd4..478b63b4c808f3 100644
--- a/arch/mips/sgi-ip32/ip32-berr.c
+++ b/arch/mips/sgi-ip32/ip32-berr.c
@@ -34,5 +34,5 @@ static int ip32_be_handler(struct pt_regs *regs, int is_fixup)
void __init ip32_be_init(void)
{
- board_be_handler = ip32_be_handler;
+ mips_set_be_handler(ip32_be_handler);
}
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index f07b15dd1c1a31..72a31eeeebbaba 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -122,7 +122,7 @@ void __init plat_mem_setup(void)
#error invalid SiByte board configuration
#endif
- board_be_handler = swarm_be_handler;
+ mips_set_be_handler(swarm_be_handler);
if (xicor_probe())
swarm_rtc_type = RTC_XICOR;
diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c
index 46e9c41013862c..63f9725b2eb08a 100644
--- a/arch/mips/txx9/generic/setup_tx4927.c
+++ b/arch/mips/txx9/generic/setup_tx4927.c
@@ -80,7 +80,7 @@ static int tx4927_be_handler(struct pt_regs *regs, int is_fixup)
}
static void __init tx4927_be_init(void)
{
- board_be_handler = tx4927_be_handler;
+ mips_set_be_handler(tx4927_be_handler);
}
static struct resource tx4927_sdram_resource[4];
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
index 17395d5d15caf9..ba646548c5f694 100644
--- a/arch/mips/txx9/generic/setup_tx4938.c
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -82,7 +82,7 @@ static int tx4938_be_handler(struct pt_regs *regs, int is_fixup)
}
static void __init tx4938_be_init(void)
{
- board_be_handler = tx4938_be_handler;
+ mips_set_be_handler(tx4938_be_handler);
}
static struct resource tx4938_sdram_resource[4];
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index bf8a3cdababf75..f5f59b7401a3d3 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -86,7 +86,7 @@ static int tx4939_be_handler(struct pt_regs *regs, int is_fixup)
}
static void __init tx4939_be_init(void)
{
- board_be_handler = tx4939_be_handler;
+ mips_set_be_handler(tx4939_be_handler);
}
static struct resource tx4939_sdram_resource[4];
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 1b2ea34c3d3bb1..d65f55f67e19bc 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -57,7 +57,7 @@ endif
# VDSO linker flags.
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
- $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
+ $(filter -E%,$(KBUILD_CFLAGS)) -shared \
-G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T
CFLAGS_REMOVE_vdso.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index a7061ee3b1577d..28c436df99355d 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
gpa, 0, page_shift);
if (ret == U_SUCCESS)
- *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
+ *mig.dst = migrate_pfn(pfn);
else {
unlock_page(dpage);
__free_page(dpage);
@@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
}
}
- *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
+ *mig.dst = migrate_pfn(page_to_pfn(dpage));
migrate_vma_pages(&mig);
out_finalize:
migrate_vma_finalize(&mig);
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index a34c531be4e7af..821252b65f8906 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -62,6 +62,7 @@ config RISCV
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+ select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 7f19b784e649a5..5927c94302b87b 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -136,3 +136,13 @@ zinstall: install-image = Image.gz
install zinstall:
$(CONFIG_SHELL) $(srctree)/$(boot)/install.sh $(KERNELRELEASE) \
$(boot)/$(install-image) System.map "$(INSTALL_PATH)"
+
+PHONY += rv32_randconfig
+rv32_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/32-bit.config \
+ -f $(srctree)/Makefile randconfig
+
+PHONY += rv64_randconfig
+rv64_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/64-bit.config \
+ -f $(srctree)/Makefile randconfig
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
index b254c60589a1cc..fc1e5869df1b9f 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
@@ -9,10 +9,8 @@
#define RTCCLK_FREQ 1000000
/ {
- #address-cells = <2>;
- #size-cells = <2>;
model = "Microchip PolarFire-SoC Icicle Kit";
- compatible = "microchip,mpfs-icicle-kit";
+ compatible = "microchip,mpfs-icicle-kit", "microchip,mpfs";
aliases {
ethernet0 = &emac1;
@@ -35,9 +33,6 @@
reg = <0x0 0x80000000 0x0 0x40000000>;
clocks = <&clkcfg 26>;
};
-
- soc {
- };
};
&serial0 {
@@ -56,8 +51,17 @@
status = "okay";
};
-&sdcard {
+&mmc {
status = "okay";
+
+ bus-width = <4>;
+ disable-wp;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
};
&emac0 {
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
index 9d2fbbc1f7778f..c9f6d205d2ba1a 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
@@ -6,8 +6,8 @@
/ {
#address-cells = <2>;
#size-cells = <2>;
- model = "Microchip MPFS Icicle Kit";
- compatible = "microchip,mpfs-icicle-kit";
+ model = "Microchip PolarFire SoC";
+ compatible = "microchip,mpfs";
chosen {
};
@@ -161,7 +161,7 @@
};
clint@2000000 {
- compatible = "sifive,clint0";
+ compatible = "sifive,fu540-c000-clint", "sifive,clint0";
reg = <0x0 0x2000000 0x0 0xC000>;
interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
&cpu1_intc 3 &cpu1_intc 7
@@ -172,7 +172,7 @@
plic: interrupt-controller@c000000 {
#interrupt-cells = <1>;
- compatible = "sifive,plic-1.0.0";
+ compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0";
reg = <0x0 0xc000000 0x0 0x4000000>;
riscv,ndev = <186>;
interrupt-controller;
@@ -262,39 +262,13 @@
status = "disabled";
};
- emmc: mmc@20008000 {
- compatible = "cdns,sd4hc";
+ /* Common node entry for emmc/sd */
+ mmc: mmc@20008000 {
+ compatible = "microchip,mpfs-sd4hc", "cdns,sd4hc";
reg = <0x0 0x20008000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupts = <88 89>;
- pinctrl-names = "default";
clocks = <&clkcfg 6>;
- bus-width = <4>;
- cap-mmc-highspeed;
- mmc-ddr-3_3v;
- max-frequency = <200000000>;
- non-removable;
- no-sd;
- no-sdio;
- voltage-ranges = <3300 3300>;
- status = "disabled";
- };
-
- sdcard: sdhc@20008000 {
- compatible = "cdns,sd4hc";
- reg = <0x0 0x20008000 0x0 0x1000>;
- interrupt-parent = <&plic>;
- interrupts = <88>;
- pinctrl-names = "default";
- clocks = <&clkcfg 6>;
- bus-width = <4>;
- disable-wp;
- cap-sd-highspeed;
- card-detect-delay = <200>;
- sd-uhs-sdr12;
- sd-uhs-sdr25;
- sd-uhs-sdr50;
- sd-uhs-sdr104;
max-frequency = <200000000>;
status = "disabled";
};
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index 7db8610534834e..0655b5c4201d9f 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -141,7 +141,7 @@
ranges;
plic0: interrupt-controller@c000000 {
#interrupt-cells = <1>;
- compatible = "sifive,plic-1.0.0";
+ compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0";
reg = <0x0 0xc000000 0x0 0x4000000>;
riscv,ndev = <53>;
interrupt-controller;
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 60846e88ae4b1c..ba304d4c455c2a 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -8,10 +8,9 @@
#define RTCCLK_FREQ 1000000
/ {
- #address-cells = <2>;
- #size-cells = <2>;
model = "SiFive HiFive Unleashed A00";
- compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000";
+ compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000",
+ "sifive,fu540";
chosen {
stdout-path = "serial0";
@@ -26,9 +25,6 @@
reg = <0x0 0x80000000 0x2 0x00000000>;
};
- soc {
- };
-
hfclk: hfclk {
#clock-cells = <0>;
compatible = "fixed-clock";
@@ -63,7 +59,7 @@
&qspi0 {
status = "okay";
flash@0 {
- compatible = "issi,is25wp256", "jedec,spi-nor";
+ compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
m25p,fast-read;
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index 2e4ea84f27e77a..4f66919215f6eb 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -8,8 +8,6 @@
#define RTCCLK_FREQ 1000000
/ {
- #address-cells = <2>;
- #size-cells = <2>;
model = "SiFive HiFive Unmatched A00";
compatible = "sifive,hifive-unmatched-a00", "sifive,fu740-c000",
"sifive,fu740";
@@ -27,9 +25,6 @@
reg = <0x0 0x80000000 0x4 0x00000000>;
};
- soc {
- };
-
hfclk: hfclk {
#clock-cells = <0>;
compatible = "fixed-clock";
@@ -211,7 +206,7 @@
&qspi0 {
status = "okay";
flash@0 {
- compatible = "issi,is25wp256", "jedec,spi-nor";
+ compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
m25p,fast-read;
diff --git a/arch/riscv/configs/32-bit.config b/arch/riscv/configs/32-bit.config
new file mode 100644
index 00000000000000..43f41323b67e85
--- /dev/null
+++ b/arch/riscv/configs/32-bit.config
@@ -0,0 +1,2 @@
+CONFIG_ARCH_RV32I=y
+CONFIG_32BIT=y
diff --git a/arch/riscv/configs/64-bit.config b/arch/riscv/configs/64-bit.config
new file mode 100644
index 00000000000000..313edc554d84cc
--- /dev/null
+++ b/arch/riscv/configs/64-bit.config
@@ -0,0 +1,2 @@
+CONFIG_ARCH_RV64I=y
+CONFIG_64BIT=y
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 4ebc80315f0135..c252fd5706d20d 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -72,9 +72,10 @@ CONFIG_GPIOLIB=y
CONFIG_GPIO_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_POWER_RESET=y
-CONFIG_DRM=y
-CONFIG_DRM_RADEON=y
-CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_DRM=m
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 109c97e991a639..b3e5ff0125fe48 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -157,6 +157,8 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
+#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) \
(((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr))
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 39b550310ec645..bf204e7c1f7425 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -75,7 +75,8 @@
#endif
#ifdef CONFIG_XIP_KERNEL
-#define XIP_OFFSET SZ_8M
+#define XIP_OFFSET SZ_32M
+#define XIP_OFFSET_MASK (SZ_32M - 1)
#else
#define XIP_OFFSET 0
#endif
@@ -97,7 +98,8 @@
#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({ \
uintptr_t __a = (uintptr_t)(addr); \
- (__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \
+ (__a >= CONFIG_XIP_PHYS_ADDR && \
+ __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
__a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
__a; \
})
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 208e31bc5d1c28..bc6f75f3a199fb 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -8,30 +8,19 @@
#ifndef _ASM_RISCV_VDSO_H
#define _ASM_RISCV_VDSO_H
-
-/*
- * All systems with an MMU have a VDSO, but systems without an MMU don't
- * support shared libraries and therefor don't have one.
- */
-#ifdef CONFIG_MMU
-
-#include <linux/types.h>
/*
* All systems with an MMU have a VDSO, but systems without an MMU don't
* support shared libraries and therefor don't have one.
*/
#ifdef CONFIG_MMU
-#define __VVAR_PAGES 1
+#define __VVAR_PAGES 2
#ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h>
#define VDSO_SYMBOL(base, name) \
(void __user *)((unsigned long)(base) + __vdso_##name##_offset)
-
-#endif /* CONFIG_MMU */
-
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
index f839f16e0d2a94..77d9c2f721c464 100644
--- a/arch/riscv/include/asm/vdso/gettimeofday.h
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -76,6 +76,13 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
return _vdso_data;
}
+#ifdef CONFIG_TIME_NS
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+{
+ return _timens_data;
+}
+#endif
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 25ec505739573c..f52f01ecbeea04 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -20,10 +20,20 @@
REG_L t0, _xip_fixup
add \reg, \reg, t0
.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+ la t1, __data_loc
+ li t0, XIP_OFFSET_MASK
+ and t1, t1, t0
+ li t1, XIP_OFFSET
+ sub t0, t0, t1
+ sub \reg, \reg, t0
+.endm
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
#endif /* CONFIG_XIP_KERNEL */
__HEAD
@@ -267,6 +277,7 @@ pmp_done:
la a3, hart_lottery
mv a2, a3
XIP_FIXUP_OFFSET a2
+ XIP_FIXUP_FLASH_OFFSET a3
lw t1, (a3)
amoswap.w t0, t1, (a2)
/* first time here if hart_lottery in RAM is not set */
@@ -305,6 +316,7 @@ clear_bss_done:
XIP_FIXUP_OFFSET sp
#ifdef CONFIG_BUILTIN_DTB
la a0, __dtb_start
+ XIP_FIXUP_OFFSET a0
#else
mv a0, s1
#endif /* CONFIG_BUILTIN_DTB */
diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c
index ee5878d968cc11..9c842c41684acd 100644
--- a/arch/riscv/kernel/reset.c
+++ b/arch/riscv/kernel/reset.c
@@ -12,7 +12,7 @@ static void default_power_off(void)
wait_for_interrupt();
}
-void (*pm_power_off)(void) = default_power_off;
+void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
void machine_restart(char *cmd)
@@ -23,10 +23,16 @@ void machine_restart(char *cmd)
void machine_halt(void)
{
- pm_power_off();
+ if (pm_power_off != NULL)
+ pm_power_off();
+ else
+ default_power_off();
}
void machine_power_off(void)
{
- pm_power_off();
+ if (pm_power_off != NULL)
+ pm_power_off();
+ else
+ default_power_off();
}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index b70956d8040812..a9436a65161a4f 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <asm/page.h>
#include <asm/vdso.h>
+#include <linux/time_namespace.h>
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
#include <vdso/datapage.h>
@@ -25,14 +26,12 @@ extern char vdso_start[], vdso_end[];
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
VVAR_NR_PAGES,
};
#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
-static unsigned int vdso_pages __ro_after_init;
-static struct page **vdso_pagelist __ro_after_init;
-
/*
* The vDSO data page.
*/
@@ -42,83 +41,228 @@ static union {
} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = &vdso_data_store.data;
-static int __init vdso_init(void)
+struct __vdso_info {
+ const char *name;
+ const char *vdso_code_start;
+ const char *vdso_code_end;
+ unsigned long vdso_pages;
+ /* Data Mapping */
+ struct vm_special_mapping *dm;
+ /* Code Mapping */
+ struct vm_special_mapping *cm;
+};
+
+static struct __vdso_info vdso_info __ro_after_init = {
+ .name = "vdso",
+ .vdso_code_start = vdso_start,
+ .vdso_code_end = vdso_end,
+};
+
+static int vdso_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ current->mm->context.vdso = (void *)new_vma->vm_start;
+
+ return 0;
+}
+
+static int __init __vdso_init(void)
{
unsigned int i;
+ struct page **vdso_pagelist;
+ unsigned long pfn;
- vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
- vdso_pagelist =
- kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
- if (unlikely(vdso_pagelist == NULL)) {
- pr_err("vdso: pagelist allocation failed\n");
- return -ENOMEM;
+ if (memcmp(vdso_info.vdso_code_start, "\177ELF", 4)) {
+ pr_err("vDSO is not a valid ELF object!\n");
+ return -EINVAL;
}
- for (i = 0; i < vdso_pages; i++) {
- struct page *pg;
+ vdso_info.vdso_pages = (
+ vdso_info.vdso_code_end -
+ vdso_info.vdso_code_start) >>
+ PAGE_SHIFT;
+
+ vdso_pagelist = kcalloc(vdso_info.vdso_pages,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (vdso_pagelist == NULL)
+ return -ENOMEM;
+
+ /* Grab the vDSO code pages. */
+ pfn = sym_to_pfn(vdso_info.vdso_code_start);
+
+ for (i = 0; i < vdso_info.vdso_pages; i++)
+ vdso_pagelist[i] = pfn_to_page(pfn + i);
+
+ vdso_info.cm->pages = vdso_pagelist;
+
+ return 0;
+}
+
+#ifdef CONFIG_TIME_NS
+struct vdso_data *arch_get_vdso_data(void *vvar_page)
+{
+ return (struct vdso_data *)(vvar_page);
+}
+
+/*
+ * The vvar mapping contains data for a specific time namespace, so when a task
+ * changes namespace we must unmap its vvar data for the old namespace.
+ * Subsequent faults will map in data for the new namespace.
+ *
+ * For more details see timens_setup_vdso_data().
+ */
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
+{
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
+
+ mmap_read_lock(mm);
- pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
- vdso_pagelist[i] = pg;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long size = vma->vm_end - vma->vm_start;
+
+ if (vma_is_special_mapping(vma, vdso_info.dm))
+ zap_page_range(vma, vma->vm_start, size);
}
- vdso_pagelist[i] = virt_to_page(vdso_data);
+ mmap_read_unlock(mm);
return 0;
}
+
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_mm == current->mm))
+ return current->nsproxy->time_ns->vvar_page;
+
+ /*
+ * VM_PFNMAP | VM_IO protect .fault() handler from being called
+ * through interfaces like /proc/$pid/mem or
+ * process_vm_{readv,writev}() as long as there's no .access()
+ * in special_mapping_vmops.
+ * For more details check_vma_flags() and __access_remote_vm()
+ */
+ WARN(1, "vvar_page accessed remotely");
+
+ return NULL;
+}
+#else
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+#endif
+
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *timens_page = find_timens_vvar_page(vma);
+ unsigned long pfn;
+
+ switch (vmf->pgoff) {
+ case VVAR_DATA_PAGE_OFFSET:
+ if (timens_page)
+ pfn = page_to_pfn(timens_page);
+ else
+ pfn = sym_to_pfn(vdso_data);
+ break;
+#ifdef CONFIG_TIME_NS
+ case VVAR_TIMENS_PAGE_OFFSET:
+ /*
+ * If a task belongs to a time namespace then a namespace
+ * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
+ * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
+ * offset.
+ * See also the comment near timens_setup_vdso_data().
+ */
+ if (!timens_page)
+ return VM_FAULT_SIGBUS;
+ pfn = sym_to_pfn(vdso_data);
+ break;
+#endif /* CONFIG_TIME_NS */
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+enum rv_vdso_map {
+ RV_VDSO_MAP_VVAR,
+ RV_VDSO_MAP_VDSO,
+};
+
+static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
+ [RV_VDSO_MAP_VVAR] = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+ },
+ [RV_VDSO_MAP_VDSO] = {
+ .name = "[vdso]",
+ .mremap = vdso_mremap,
+ },
+};
+
+static int __init vdso_init(void)
+{
+ vdso_info.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR];
+ vdso_info.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO];
+
+ return __vdso_init();
+}
arch_initcall(vdso_init);
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp)
+static int __setup_additional_pages(struct mm_struct *mm,
+ struct linux_binprm *bprm,
+ int uses_interp)
{
- struct mm_struct *mm = current->mm;
- unsigned long vdso_base, vdso_len;
- int ret;
+ unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+ void *ret;
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
- vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
+ vdso_text_len = vdso_info.vdso_pages << PAGE_SHIFT;
+ /* Be sure to map the data page */
+ vdso_mapping_len = vdso_text_len + VVAR_SIZE;
- if (mmap_write_lock_killable(mm))
- return -EINTR;
-
- vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
+ vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
- ret = vdso_base;
- goto end;
+ ret = ERR_PTR(vdso_base);
+ goto up_fail;
}
- mm->context.vdso = NULL;
- ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
- (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
- if (unlikely(ret))
- goto end;
+ ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
+ (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info.dm);
+ if (IS_ERR(ret))
+ goto up_fail;
+ vdso_base += VVAR_SIZE;
+ mm->context.vdso = (void *)vdso_base;
ret =
- install_special_mapping(mm, vdso_base + VVAR_SIZE,
- vdso_pages << PAGE_SHIFT,
+ _install_special_mapping(mm, vdso_base, vdso_text_len,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
- vdso_pagelist);
+ vdso_info.cm);
- if (unlikely(ret))
- goto end;
+ if (IS_ERR(ret))
+ goto up_fail;
- /*
- * Put vDSO base into mm struct. We need to do this before calling
- * install_special_mapping or the perf counter mmap tracking code
- * will fail to recognise it as a vDSO (since arch_vma_name fails).
- */
- mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
+ return 0;
-end:
- mmap_write_unlock(mm);
- return ret;
+up_fail:
+ mm->context.vdso = NULL;
+ return PTR_ERR(ret);
}
-const char *arch_vma_name(struct vm_area_struct *vma)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
- if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
- return "[vdso]";
- if (vma->vm_mm && (vma->vm_start ==
- (long)vma->vm_mm->context.vdso - VVAR_SIZE))
- return "[vdso_data]";
- return NULL;
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ ret = __setup_additional_pages(mm, bprm, uses_interp);
+ mmap_write_unlock(mm);
+
+ return ret;
}
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index e9111f700af080..01d94aae5bf513 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -10,6 +10,9 @@ OUTPUT_ARCH(riscv)
SECTIONS
{
PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+#ifdef CONFIG_TIME_NS
+ PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
+#endif
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 9c9f35091ef04b..f5ed08262139f3 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -64,8 +64,11 @@ SECTIONS
/*
* From this point, stuff is considered writable and will be copied to RAM
*/
- __data_loc = ALIGN(16); /* location in file */
- . = LOAD_OFFSET + XIP_OFFSET; /* location in memory */
+ __data_loc = ALIGN(PAGE_SIZE); /* location in file */
+ . = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */
+
+#undef LOAD_OFFSET
+#define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK))
_sdata = .; /* Start of data section */
_data = .;
@@ -96,7 +99,6 @@ SECTIONS
KEEP(*(__soc_builtin_dtb_table))
__soc_builtin_dtb_table_end = .;
}
- PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(8);
.alternative : {
@@ -122,6 +124,8 @@ SECTIONS
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
.rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) {
*(.rel.dyn*)
}
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index ee3459cb6750b4..ea54cc0c91068e 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -233,8 +233,10 @@ static int __init asids_init(void)
local_flush_tlb_all();
/* Pre-compute ASID details */
- num_asids = 1 << asid_bits;
- asid_mask = num_asids - 1;
+ if (asid_bits) {
+ num_asids = 1 << asid_bits;
+ asid_mask = num_asids - 1;
+ }
/*
* Use ASID allocator only if number of HW ASIDs are
@@ -255,7 +257,7 @@ static int __init asids_init(void)
pr_info("ASID allocator using %lu bits (%lu entries)\n",
asid_bits, num_asids);
} else {
- pr_info("ASID allocator disabled\n");
+ pr_info("ASID allocator disabled (%lu bits)\n", asid_bits);
}
return 0;
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
index 18bf338303b677..ddb7d3b99e891d 100644
--- a/arch/riscv/mm/extable.c
+++ b/arch/riscv/mm/extable.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_BPF_JIT
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
int rv_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs);
#endif
@@ -23,7 +23,7 @@ int fixup_exception(struct pt_regs *regs)
if (!fixup)
return 0;
-#ifdef CONFIG_BPF_JIT
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
if (regs->epc >= BPF_JIT_REGION_START && regs->epc < BPF_JIT_REGION_END)
return rv_bpf_fixup_exception(fixup, regs);
#endif
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index c0cddf0fc22db6..24b2b80446020f 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -41,7 +41,7 @@ phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
#ifdef CONFIG_XIP_KERNEL
-extern char _xiprom[], _exiprom[];
+extern char _xiprom[], _exiprom[], __data_loc;
#endif
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
@@ -454,10 +454,9 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
/* called from head.S with MMU off */
asmlinkage void __init __copy_data(void)
{
- void *from = (void *)(&_sdata);
- void *end = (void *)(&_end);
+ void *from = (void *)(&__data_loc);
void *to = (void *)CONFIG_PHYS_RAM_BASE;
- size_t sz = (size_t)(end - from + 1);
+ size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata));
memcpy(to, from, sz);
}
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 2ca345c7b0bf32..f2a779c7e225de 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -460,6 +460,8 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
int rv_bpf_fixup_exception(const struct exception_table_entry *ex,
+ struct pt_regs *regs);
+int rv_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6b3c366af78eb5..90824be5ce9a89 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -210,9 +210,11 @@ int zpci_deconfigure_device(struct zpci_dev *zdev);
void zpci_device_reserved(struct zpci_dev *zdev);
bool zpci_is_device_configured(struct zpci_dev *zdev);
+int zpci_hot_reset_device(struct zpci_dev *zdev);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8);
void zpci_remove_reserved_devices(void);
+void zpci_update_fh(struct zpci_dev *zdev, u32 fh);
/* CLP */
int clp_setup_writeback_mio(void);
@@ -294,8 +296,10 @@ void zpci_debug_exit(void);
void zpci_debug_init_device(struct zpci_dev *, const char *);
void zpci_debug_exit_device(struct zpci_dev *);
-/* Error reporting */
+/* Error handling */
int zpci_report_error(struct pci_dev *, struct zpci_report_error_header *);
+int zpci_clear_error_state(struct zpci_dev *zdev);
+int zpci_reset_load_store_blocked(struct zpci_dev *zdev);
#ifdef CONFIG_NUMA
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 6f431fa9e4d7b8..ee8707abdb6a01 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -687,8 +687,10 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags)
false);
if (cfdiag_diffctr(cpuhw, event->hw.config_base))
cfdiag_push_sample(event, cpuhw);
- } else
+ } else if (cpuhw->flags & PMU_F_RESERVED) {
+ /* Only update when PMU not hotplugged off */
hw_perf_event_update(event);
+ }
hwc->state |= PERF_HES_UPTODATE;
}
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 872d772b73d200..2f9b78fa82a542 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -481,6 +481,34 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
spin_unlock(&zpci_iomap_lock);
}
+static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
+{
+ int bar, idx;
+
+ spin_lock(&zpci_iomap_lock);
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!zdev->bars[bar].size)
+ continue;
+ idx = zdev->bars[bar].map_idx;
+ if (!zpci_iomap_start[idx].count)
+ continue;
+ WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
+ }
+ spin_unlock(&zpci_iomap_lock);
+}
+
+void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
+{
+ if (!fh || zdev->fh == fh)
+ return;
+
+ zdev->fh = fh;
+ if (zpci_use_mio(zdev))
+ return;
+ if (zdev->has_resources && zdev_enabled(zdev))
+ zpci_do_update_iomap_fh(zdev, fh);
+}
+
static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
unsigned long size, unsigned long flags)
{
@@ -668,7 +696,7 @@ int zpci_enable_device(struct zpci_dev *zdev)
if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
rc = -EIO;
else
- zdev->fh = fh;
+ zpci_update_fh(zdev, fh);
return rc;
}
@@ -679,14 +707,14 @@ int zpci_disable_device(struct zpci_dev *zdev)
cc = clp_disable_fh(zdev, &fh);
if (!cc) {
- zdev->fh = fh;
+ zpci_update_fh(zdev, fh);
} else if (cc == CLP_RC_SETPCIFN_ALRDY) {
pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
zdev->fid);
/* Function is already disabled - update handle */
rc = clp_refresh_fh(zdev->fid, &fh);
if (!rc) {
- zdev->fh = fh;
+ zpci_update_fh(zdev, fh);
rc = -EINVAL;
}
} else {
@@ -696,6 +724,65 @@ int zpci_disable_device(struct zpci_dev *zdev)
}
/**
+ * zpci_hot_reset_device - perform a reset of the given zPCI function
+ * @zdev: the slot which should be reset
+ *
+ * Performs a low level reset of the zPCI function. The reset is low level in
+ * the sense that the zPCI function can be reset without detaching it from the
+ * common PCI subsystem. The reset may be performed while under control of
+ * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
+ * table is reinstated at the end of the reset.
+ *
+ * After the reset the functions internal state is reset to an initial state
+ * equivalent to its state during boot when first probing a driver.
+ * Consequently after reset the PCI function requires re-initialization via the
+ * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
+ * and enabling the function via e.g.pci_enablde_device_flags().The caller
+ * must guard against concurrent reset attempts.
+ *
+ * In most cases this function should not be called directly but through
+ * pci_reset_function() or pci_reset_bus() which handle the save/restore and
+ * locking.
+ *
+ * Return: 0 on success and an error value otherwise
+ */
+int zpci_hot_reset_device(struct zpci_dev *zdev)
+{
+ int rc;
+
+ zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
+ if (zdev_enabled(zdev)) {
+ /* Disables device access, DMAs and IRQs (reset state) */
+ rc = zpci_disable_device(zdev);
+ /*
+ * Due to a z/VM vs LPAR inconsistency in the error state the
+ * FH may indicate an enabled device but disable says the
+ * device is already disabled don't treat it as an error here.
+ */
+ if (rc == -EINVAL)
+ rc = 0;
+ if (rc)
+ return rc;
+ }
+
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ return rc;
+
+ if (zdev->dma_table)
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+ (u64)zdev->dma_table);
+ else
+ rc = zpci_dma_init_device(zdev);
+ if (rc) {
+ zpci_disable_device(zdev);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
* zpci_create_device() - Create a new zpci_dev and add it to the zbus
* @fid: Function ID of the device to be created
* @fh: Current Function Handle of the device to be created
@@ -776,7 +863,7 @@ int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
{
int rc;
- zdev->fh = fh;
+ zpci_update_fh(zdev, fh);
/* the PCI function will be scanned once function 0 appears */
if (!zdev->zbus->bus)
return 0;
@@ -903,6 +990,59 @@ int zpci_report_error(struct pci_dev *pdev,
}
EXPORT_SYMBOL(zpci_report_error);
+/**
+ * zpci_clear_error_state() - Clears the zPCI error state of the device
+ * @zdev: The zdev for which the zPCI error state should be reset
+ *
+ * Clear the zPCI error state of the device. If clearing the zPCI error state
+ * fails the device is left in the error state. In this case it may make sense
+ * to call zpci_io_perm_failure() on the associated pdev if it exists.
+ *
+ * Returns: 0 on success, -EIO otherwise
+ */
+int zpci_clear_error_state(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
+ struct zpci_fib fib = {0};
+ u8 status;
+ int cc;
+
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc) {
+ zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * zpci_reset_load_store_blocked() - Re-enables L/S from error state
+ * @zdev: The zdev for which to unblock load/store access
+ *
+ * Re-enables load/store access for a PCI function in the error state while
+ * keeping DMA blocked. In this state drivers can poke MMIO space to determine
+ * if error recovery is possible while catching any rogue DMA access from the
+ * device.
+ *
+ * Returns: 0 on success, -EIO otherwise
+ */
+int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
+ struct zpci_fib fib = {0};
+ u8 status;
+ int cc;
+
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc) {
+ zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int zpci_mem_init(void)
{
BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 6a5bfa9dc1f2e7..2e3e5b2789257e 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -47,18 +47,223 @@ struct zpci_ccdf_avail {
u16 pec; /* PCI event code */
} __packed;
+static inline bool ers_result_indicates_abort(pci_ers_result_t ers_res)
+{
+ switch (ers_res) {
+ case PCI_ERS_RESULT_CAN_RECOVER:
+ case PCI_ERS_RESULT_RECOVERED:
+ case PCI_ERS_RESULT_NEED_RESET:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool is_passed_through(struct zpci_dev *zdev)
+{
+ return zdev->s390_domain;
+}
+
+static bool is_driver_supported(struct pci_driver *driver)
+{
+ if (!driver || !driver->err_handler)
+ return false;
+ if (!driver->err_handler->error_detected)
+ return false;
+ if (!driver->err_handler->slot_reset)
+ return false;
+ if (!driver->err_handler->resume)
+ return false;
+ return true;
+}
+
+static pci_ers_result_t zpci_event_notify_error_detected(struct pci_dev *pdev,
+ struct pci_driver *driver)
+{
+ pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
+
+ ers_res = driver->err_handler->error_detected(pdev, pdev->error_state);
+ if (ers_result_indicates_abort(ers_res))
+ pr_info("%s: Automatic recovery failed after initial reporting\n", pci_name(pdev));
+ else if (ers_res == PCI_ERS_RESULT_NEED_RESET)
+ pr_debug("%s: Driver needs reset to recover\n", pci_name(pdev));
+
+ return ers_res;
+}
+
+static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev,
+ struct pci_driver *driver)
+{
+ pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
+ struct zpci_dev *zdev = to_zpci(pdev);
+ int rc;
+
+ pr_info("%s: Unblocking device access for examination\n", pci_name(pdev));
+ rc = zpci_reset_load_store_blocked(zdev);
+ if (rc) {
+ pr_err("%s: Unblocking device access failed\n", pci_name(pdev));
+ /* Let's try a full reset instead */
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ if (driver->err_handler->mmio_enabled) {
+ ers_res = driver->err_handler->mmio_enabled(pdev);
+ if (ers_result_indicates_abort(ers_res)) {
+ pr_info("%s: Automatic recovery failed after MMIO re-enable\n",
+ pci_name(pdev));
+ return ers_res;
+ } else if (ers_res == PCI_ERS_RESULT_NEED_RESET) {
+ pr_debug("%s: Driver needs reset to recover\n", pci_name(pdev));
+ return ers_res;
+ }
+ }
+
+ pr_debug("%s: Unblocking DMA\n", pci_name(pdev));
+ rc = zpci_clear_error_state(zdev);
+ if (!rc) {
+ pdev->error_state = pci_channel_io_normal;
+ } else {
+ pr_err("%s: Unblocking DMA failed\n", pci_name(pdev));
+ /* Let's try a full reset instead */
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return ers_res;
+}
+
+static pci_ers_result_t zpci_event_do_reset(struct pci_dev *pdev,
+ struct pci_driver *driver)
+{
+ pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
+
+ pr_info("%s: Initiating reset\n", pci_name(pdev));
+ if (zpci_hot_reset_device(to_zpci(pdev))) {
+ pr_err("%s: The reset request failed\n", pci_name(pdev));
+ return ers_res;
+ }
+ pdev->error_state = pci_channel_io_normal;
+ ers_res = driver->err_handler->slot_reset(pdev);
+ if (ers_result_indicates_abort(ers_res)) {
+ pr_info("%s: Automatic recovery failed after slot reset\n", pci_name(pdev));
+ return ers_res;
+ }
+
+ return ers_res;
+}
+
+/* zpci_event_attempt_error_recovery - Try to recover the given PCI function
+ * @pdev: PCI function to recover currently in the error state
+ *
+ * We follow the scheme outlined in Documentation/PCI/pci-error-recovery.rst.
+ * With the simplification that recovery always happens per function
+ * and the platform determines which functions are affected for
+ * multi-function devices.
+ */
+static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
+{
+ pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
+ struct pci_driver *driver;
+
+ /*
+ * Ensure that the PCI function is not removed concurrently, no driver
+ * is unbound or probed and that userspace can't access its
+ * configuration space while we perform recovery.
+ */
+ pci_dev_lock(pdev);
+ if (pdev->error_state == pci_channel_io_perm_failure) {
+ ers_res = PCI_ERS_RESULT_DISCONNECT;
+ goto out_unlock;
+ }
+ pdev->error_state = pci_channel_io_frozen;
+
+ if (is_passed_through(to_zpci(pdev))) {
+ pr_info("%s: Cannot be recovered in the host because it is a pass-through device\n",
+ pci_name(pdev));
+ goto out_unlock;
+ }
+
+ driver = to_pci_driver(pdev->dev.driver);
+ if (!is_driver_supported(driver)) {
+ if (!driver)
+ pr_info("%s: Cannot be recovered because no driver is bound to the device\n",
+ pci_name(pdev));
+ else
+ pr_info("%s: The %s driver bound to the device does not support error recovery\n",
+ pci_name(pdev),
+ driver->name);
+ goto out_unlock;
+ }
+
+ ers_res = zpci_event_notify_error_detected(pdev, driver);
+ if (ers_result_indicates_abort(ers_res))
+ goto out_unlock;
+
+ if (ers_res == PCI_ERS_RESULT_CAN_RECOVER) {
+ ers_res = zpci_event_do_error_state_clear(pdev, driver);
+ if (ers_result_indicates_abort(ers_res))
+ goto out_unlock;
+ }
+
+ if (ers_res == PCI_ERS_RESULT_NEED_RESET)
+ ers_res = zpci_event_do_reset(pdev, driver);
+
+ if (ers_res != PCI_ERS_RESULT_RECOVERED) {
+ pr_err("%s: Automatic recovery failed; operator intervention is required\n",
+ pci_name(pdev));
+ goto out_unlock;
+ }
+
+ pr_info("%s: The device is ready to resume operations\n", pci_name(pdev));
+ if (driver->err_handler->resume)
+ driver->err_handler->resume(pdev);
+out_unlock:
+ pci_dev_unlock(pdev);
+
+ return ers_res;
+}
+
+/* zpci_event_io_failure - Report PCI channel failure state to driver
+ * @pdev: PCI function for which to report
+ * @es: PCI channel failure state to report
+ */
+static void zpci_event_io_failure(struct pci_dev *pdev, pci_channel_state_t es)
+{
+ struct pci_driver *driver;
+
+ pci_dev_lock(pdev);
+ pdev->error_state = es;
+ /**
+ * While vfio-pci's error_detected callback notifies user-space QEMU
+ * reacts to this by freezing the guest. In an s390 environment PCI
+ * errors are rarely fatal so this is overkill. Instead in the future
+ * we will inject the error event and let the guest recover the device
+ * itself.
+ */
+ if (is_passed_through(to_zpci(pdev)))
+ goto out;
+ driver = to_pci_driver(pdev->dev.driver);
+ if (driver && driver->err_handler && driver->err_handler->error_detected)
+ driver->err_handler->error_detected(pdev, pdev->error_state);
+out:
+ pci_dev_unlock(pdev);
+}
+
static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
struct pci_dev *pdev = NULL;
+ pci_ers_result_t ers_res;
zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n",
ccdf->fid, ccdf->fh, ccdf->pec);
zpci_err("error CCDF:\n");
zpci_err_hex(ccdf, sizeof(*ccdf));
- if (zdev)
- pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+ if (zdev) {
+ zpci_update_fh(zdev, ccdf->fh);
+ if (zdev->zbus->bus)
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+ }
pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
@@ -66,7 +271,20 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
if (!pdev)
return;
- pdev->error_state = pci_channel_io_perm_failure;
+ switch (ccdf->pec) {
+ case 0x003a: /* Service Action or Error Recovery Successful */
+ ers_res = zpci_event_attempt_error_recovery(pdev);
+ if (ers_res != PCI_ERS_RESULT_RECOVERED)
+ zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
+ break;
+ default:
+ /*
+ * Mark as frozen not permanently failed because the device
+ * could be subsequently recovered by the platform.
+ */
+ zpci_event_io_failure(pdev, pci_channel_io_frozen);
+ break;
+ }
pci_dev_put(pdev);
}
@@ -78,7 +296,7 @@ void zpci_event_error(void *data)
static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
{
- zdev->fh = fh;
+ zpci_update_fh(zdev, fh);
/* Give the driver a hint that the function is
* already unusable.
*/
@@ -121,7 +339,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
if (!zdev)
zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
else
- zdev->fh = ccdf->fh;
+ zpci_update_fh(zdev, ccdf->fh);
break;
case 0x0303: /* Deconfiguration requested */
if (zdev) {
@@ -130,7 +348,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
*/
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
break;
- zdev->fh = ccdf->fh;
+ zpci_update_fh(zdev, ccdf->fh);
zpci_deconfigure_device(zdev);
}
break;
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 2e43996159f058..28d863aaafea53 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -163,7 +163,7 @@ static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
unsigned long len)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
- u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+ u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
return __zpci_load(data, req, ZPCI_OFFSET(addr));
}
@@ -244,7 +244,7 @@ static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
unsigned long len)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
- u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+ u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
return __zpci_store(data, req, ZPCI_OFFSET(addr));
}
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 3823e159bf7491..954bb7a8312413 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -387,6 +387,15 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs);
}
+void arch_restore_msi_irqs(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ if (!zdev->irqs_registered)
+ zpci_set_irq(zdev);
+ default_restore_msi_irqs(pdev);
+}
+
static struct airq_struct zpci_airq = {
.handler = zpci_floating_irq_handler,
.isc = PCI_ISC,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 603964408d2d7e..42cf01ecdd1317 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3048,8 +3048,10 @@ intel_vlbr_constraints(struct perf_event *event)
{
struct event_constraint *c = &vlbr_constraint;
- if (unlikely(constraint_match(c, event->hw.config)))
+ if (unlikely(constraint_match(c, event->hw.config))) {
+ event->hw.flags |= c->flags;
return c;
+ }
return NULL;
}
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 6b72e9b55c69cf..8043213b75a52f 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -265,6 +265,8 @@ void intel_pmu_lbr_reset(void)
cpuc->last_task_ctx = NULL;
cpuc->last_log_id = 0;
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && cpuc->lbr_select)
+ wrmsrl(MSR_LBR_SELECT, 0);
}
/*
diff --git a/arch/x86/include/asm/fpu/xcr.h b/arch/x86/include/asm/fpu/xcr.h
index 79f95d3787e219..9656a5bc6feae4 100644
--- a/arch/x86/include/asm/fpu/xcr.h
+++ b/arch/x86/include/asm/fpu/xcr.h
@@ -3,6 +3,7 @@
#define _ASM_X86_FPU_XCR_H
#define XCR_XFEATURE_ENABLED_MASK 0x00000000
+#define XCR_XFEATURE_IN_USE_MASK 0x00000001
static inline u64 xgetbv(u32 index)
{
@@ -20,4 +21,15 @@ static inline void xsetbv(u32 index, u64 value)
asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
}
+/*
+ * Return a mask of xfeatures which are currently being tracked
+ * by the processor as being in the initial configuration.
+ *
+ * Callers should check X86_FEATURE_XGETBV1.
+ */
+static inline u64 xfeatures_in_use(void)
+{
+ return xgetbv(XCR_XFEATURE_IN_USE_MASK);
+}
+
#endif /* _ASM_X86_FPU_XCR_H */
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 0f8b90ab18c9b1..cd3dd170e23a6d 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -92,6 +92,13 @@
#define XFEATURE_MASK_FPSTATE (XFEATURE_MASK_USER_RESTORE | \
XFEATURE_MASK_SUPERVISOR_SUPPORTED)
+/*
+ * Features in this mask have space allocated in the signal frame, but may not
+ * have that space initialized when the feature is in its init state.
+ */
+#define XFEATURE_MASK_SIGFRAME_INITOPT (XFEATURE_MASK_XTILE | \
+ XFEATURE_MASK_USER_DYNAMIC)
+
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
extern void __init update_regset_xstate_info(unsigned int size,
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 27158436f322dc..5a0bcf8b78d7c2 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -108,6 +108,8 @@
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
+#define INTEL_FAM6_RAPTOR_LAKE 0xB7
+
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2acf37cc1991a5..e5d8700319cc08 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -38,7 +38,6 @@
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
#define KVM_MAX_VCPUS 1024
-#define KVM_SOFT_MAX_VCPUS 710
/*
* In x86, the VCPU ID corresponds to the APIC ID, and APIC IDs
@@ -725,6 +724,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
+ u32 kvm_cpuid_base;
u64 reserved_gpa_bits;
int maxphyaddr;
@@ -748,7 +748,7 @@ struct kvm_vcpu_arch {
u8 preempted;
u64 msr_val;
u64 last_steal;
- struct gfn_to_pfn_cache cache;
+ struct gfn_to_hva_cache cache;
} st;
u64 l1_tsc_offset;
@@ -1034,6 +1034,7 @@ struct kvm_x86_msr_filter {
#define APICV_INHIBIT_REASON_IRQWIN 3
#define APICV_INHIBIT_REASON_PIT_REINJ 4
#define APICV_INHIBIT_REASON_X2APIC 5
+#define APICV_INHIBIT_REASON_BLOCKIRQ 6
struct kvm_arch {
unsigned long n_used_mmu_pages;
@@ -1476,6 +1477,7 @@ struct kvm_x86_ops {
int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
+ int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
int (*get_msr_feature)(struct kvm_msr_entry *entry);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 69299878b200a2..56935ebb1dfe1a 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -83,6 +83,18 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
return ret;
}
+static inline long kvm_sev_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ long ret;
+
+ asm volatile("vmmcall"
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
+ : "memory");
+ return ret;
+}
+
#ifdef CONFIG_KVM_GUEST
void kvmclock_init(void);
void kvmclock_disable(void);
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 2d4f5c17d79cc5..e2c6f433ed100b 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -44,6 +44,8 @@ void __init sme_enable(struct boot_params *bp);
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
+ bool enc);
void __init mem_encrypt_free_decrypted_mem(void);
@@ -78,6 +80,8 @@ static inline int __init
early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
static inline int __init
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+static inline void __init
+early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
static inline void mem_encrypt_free_decrypted_mem(void) { }
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index cebec95a7124fd..21c4a694ca114e 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -97,6 +97,12 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
PVOP_VCALL1(mmu.exit_mmap, mm);
}
+static inline void notify_page_enc_status_changed(unsigned long pfn,
+ int npages, bool enc)
+{
+ PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
+}
+
#ifdef CONFIG_PARAVIRT_XXL
static inline void load_sp0(unsigned long sp0)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index fc1151e77569c0..a69012e1903f1d 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -168,6 +168,7 @@ struct pv_mmu_ops {
/* Hook for intercepting the destruction of an mm_struct. */
void (*exit_mmap)(struct mm_struct *mm);
+ void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
#ifdef CONFIG_PARAVIRT_XXL
struct paravirt_callee_save read_cr2;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 191878a65c61a9..355d38c0cf6060 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -806,11 +806,14 @@ static inline u32 amd_get_nodes_per_socket(void) { return 0; }
static inline u32 amd_get_highest_perf(void) { return 0; }
#endif
+#define for_each_possible_hypervisor_cpuid_base(function) \
+ for (function = 0x40000000; function < 0x40010000; function += 0x100)
+
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
{
uint32_t base, eax, signature[3];
- for (base = 0x40000000; base < 0x40010000; base += 0x100) {
+ for_each_possible_hypervisor_cpuid_base(base) {
cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
if (!memcmp(sig, signature, 12) &&
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 43fa081a1adb23..872617542bbc00 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -83,6 +83,7 @@ int set_pages_rw(struct page *page, int numpages);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
+void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);
extern int kernel_set_to_readonly;
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 08b0e90623ad4f..81a0211a372d3d 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -126,6 +126,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
void cpu_disable_common(void);
void native_smp_prepare_boot_cpu(void);
+void smp_prepare_cpus_common(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void calculate_max_logical_packages(void);
void native_smp_cpus_done(unsigned int max_cpus);
diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
index cbb67b6030f978..39ebe05118691d 100644
--- a/arch/x86/include/asm/static_call.h
+++ b/arch/x86/include/asm/static_call.h
@@ -27,6 +27,7 @@
".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
STATIC_CALL_TRAMP_STR(name) ": \n" \
insns " \n" \
+ ".byte 0x53, 0x43, 0x54 \n" \
".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
".popsection \n")
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 5146bbab84d4ca..6e64b27b2c1ee0 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -8,6 +8,7 @@
* should be used to determine that a VM is running under KVM.
*/
#define KVM_CPUID_SIGNATURE 0x40000000
+#define KVM_SIGNATURE "KVMKVMKVM\0\0\0"
/* This CPUID returns two feature bitmaps in eax, edx. Before enabling
* a particular paravirtualization, the appropriate feature bit should
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index cb2fdd130aaee3..c881bcafba7d70 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -76,6 +76,7 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_SGX1, X86_FEATURE_SGX },
{ X86_FEATURE_SGX2, X86_FEATURE_SGX1 },
{ X86_FEATURE_XFD, X86_FEATURE_XSAVES },
+ { X86_FEATURE_XFD, X86_FEATURE_XGETBV1 },
{ X86_FEATURE_AMX_TILE, X86_FEATURE_XFD },
{}
};
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index acfd5d9f93c68d..bb9a46a804bf21 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -547,12 +547,13 @@ bool intel_filter_mce(struct mce *m)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- /* MCE errata HSD131, HSM142, HSW131, BDM48, and HSM142 */
+ /* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */
if ((c->x86 == 6) &&
((c->x86_model == INTEL_FAM6_HASWELL) ||
(c->x86_model == INTEL_FAM6_HASWELL_L) ||
(c->x86_model == INTEL_FAM6_BROADWELL) ||
- (c->x86_model == INTEL_FAM6_HASWELL_G)) &&
+ (c->x86_model == INTEL_FAM6_HASWELL_G) ||
+ (c->x86_model == INTEL_FAM6_SKYLAKE_X)) &&
(m->bank == 0) &&
((m->status & 0xa0000000ffffffff) == 0x80000000000f0005))
return true;
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index e18210dff88cd0..86ea7c0fa2f65e 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -4,6 +4,7 @@
#include <asm/cpufeature.h>
#include <asm/fpu/xstate.h>
+#include <asm/fpu/xcr.h>
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(u64, xfd_state);
@@ -199,6 +200,32 @@ static inline void os_xrstor_supervisor(struct fpstate *fpstate)
}
/*
+ * XSAVE itself always writes all requested xfeatures. Removing features
+ * from the request bitmap reduces the features which are written.
+ * Generate a mask of features which must be written to a sigframe. The
+ * unset features can be optimized away and not written.
+ *
+ * This optimization is user-visible. Only use for states where
+ * uninitialized sigframe contents are tolerable, like dynamic features.
+ *
+ * Users of buffers produced with this optimization must check XSTATE_BV
+ * to determine which features have been optimized out.
+ */
+static inline u64 xfeatures_need_sigframe_write(void)
+{
+ u64 xfeaures_to_write;
+
+ /* In-use features must be written: */
+ xfeaures_to_write = xfeatures_in_use();
+
+ /* Also write all non-optimizable sigframe features: */
+ xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
+ ~XFEATURE_MASK_SIGFRAME_INITOPT;
+
+ return xfeaures_to_write;
+}
+
+/*
* Save xstate to user space xsave area.
*
* We don't use modified optimization because xrstor/xrstors might track
@@ -220,10 +247,16 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
*/
struct fpstate *fpstate = current->thread.fpu.fpstate;
u64 mask = fpstate->user_xfeatures;
- u32 lmask = mask;
- u32 hmask = mask >> 32;
+ u32 lmask;
+ u32 hmask;
int err;
+ /* Optimize away writing unnecessary xfeatures: */
+ if (fpu_state_size_dynamic())
+ mask &= xfeatures_need_sigframe_write();
+
+ lmask = mask;
+ hmask = mask >> 32;
xfd_validate_state(fpstate, mask, false);
stac();
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 8863d1941f1bed..59abbdad7729c8 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -28,6 +28,7 @@
#include <linux/swait.h>
#include <linux/syscore_ops.h>
#include <linux/cc_platform.h>
+#include <linux/efi.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -41,6 +42,7 @@
#include <asm/ptrace.h>
#include <asm/reboot.h>
#include <asm/svm.h>
+#include <asm/e820/api.h>
DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
@@ -434,6 +436,8 @@ static void kvm_guest_cpu_offline(bool shutdown)
kvm_disable_steal_time();
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+ if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
+ wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0);
kvm_pv_disable_apf();
if (!shutdown)
apf_task_wake_all();
@@ -548,6 +552,55 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
__send_ipi_mask(local_mask, vector);
}
+static int __init setup_efi_kvm_sev_migration(void)
+{
+ efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled";
+ efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID;
+ efi_status_t status;
+ unsigned long size;
+ bool enabled;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) ||
+ !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
+ return 0;
+
+ if (!efi_enabled(EFI_BOOT))
+ return 0;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_info("%s : EFI runtime services are not enabled\n", __func__);
+ return 0;
+ }
+
+ size = sizeof(enabled);
+
+ /* Get variable contents into buffer */
+ status = efi.get_variable(efi_sev_live_migration_enabled,
+ &efi_variable_guid, NULL, &size, &enabled);
+
+ if (status == EFI_NOT_FOUND) {
+ pr_info("%s : EFI live migration variable not found\n", __func__);
+ return 0;
+ }
+
+ if (status != EFI_SUCCESS) {
+ pr_info("%s : EFI variable retrieval failed\n", __func__);
+ return 0;
+ }
+
+ if (enabled == 0) {
+ pr_info("%s: live migration disabled in EFI\n", __func__);
+ return 0;
+ }
+
+ pr_info("%s : live migration enabled in EFI\n", __func__);
+ wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
+
+ return 1;
+}
+
+late_initcall(setup_efi_kvm_sev_migration);
+
/*
* Set the IPI entry points
*/
@@ -756,7 +809,7 @@ static noinline uint32_t __kvm_cpuid_base(void)
return 0; /* So we don't blow up on old processors */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
- return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
+ return hypervisor_cpuid_base(KVM_SIGNATURE, 0);
return 0;
}
@@ -806,8 +859,62 @@ static bool __init kvm_msi_ext_dest_id(void)
return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
}
+static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
+{
+ kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages,
+ KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
+}
+
static void __init kvm_init_platform(void)
{
+ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
+ kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
+ unsigned long nr_pages;
+ int i;
+
+ pv_ops.mmu.notify_page_enc_status_changed =
+ kvm_sev_hc_page_enc_status;
+
+ /*
+ * Reset the host's shared pages list related to kernel
+ * specific page encryption status settings before we load a
+ * new kernel by kexec. Reset the page encryption status
+ * during early boot intead of just before kexec to avoid SMP
+ * races during kvm_pv_guest_cpu_reboot().
+ * NOTE: We cannot reset the complete shared pages list
+ * here as we need to retain the UEFI/OVMF firmware
+ * specific settings.
+ */
+
+ for (i = 0; i < e820_table->nr_entries; i++) {
+ struct e820_entry *entry = &e820_table->entries[i];
+
+ if (entry->type != E820_TYPE_RAM)
+ continue;
+
+ nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE);
+
+ kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr,
+ nr_pages,
+ KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
+ }
+
+ /*
+ * Ensure that _bss_decrypted section is marked as decrypted in the
+ * shared pages list.
+ */
+ nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
+ PAGE_SIZE);
+ early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
+ nr_pages, 0);
+
+ /*
+ * If not booted using EFI, enable Live migration support.
+ */
+ if (!efi_enabled(EFI_BOOT))
+ wrmsrl(MSR_KVM_MIGRATION_CONTROL,
+ KVM_MIGRATION_READY);
+ }
kvmclock_init();
x86_platform.apic_post_init = kvm_apic_init;
}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 7157c2df3bc2ab..7f7636aac62091 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -337,6 +337,7 @@ struct paravirt_patch_template pv_ops = {
(void (*)(struct mmu_gather *, void *))tlb_remove_page,
.mmu.exit_mmap = paravirt_nop,
+ .mmu.notify_page_enc_status_changed = paravirt_nop,
#ifdef CONFIG_PARAVIRT_XXL
.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8241927addfff1..ac2909f0cab347 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1350,12 +1350,7 @@ static void __init smp_get_logical_apicid(void)
cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
}
-/*
- * Prepare for SMP bootup.
- * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
- * for common interface support.
- */
-void __init native_smp_prepare_cpus(unsigned int max_cpus)
+void __init smp_prepare_cpus_common(void)
{
unsigned int i;
@@ -1386,6 +1381,17 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
set_sched_topology(x86_topology);
set_cpu_sibling_map(0);
+}
+
+/*
+ * Prepare for SMP bootup.
+ * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
+ * for common interface support.
+ */
+void __init native_smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_prepare_cpus_common();
+
init_freq_invariance(false, false);
smp_sanity_check();
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index ea028e736831a3..9c407a33a77413 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -56,10 +56,15 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
text_poke_bp(insn, code, size, emulate);
}
-static void __static_call_validate(void *insn, bool tail)
+static void __static_call_validate(void *insn, bool tail, bool tramp)
{
u8 opcode = *(u8 *)insn;
+ if (tramp && memcmp(insn+5, "SCT", 3)) {
+ pr_err("trampoline signature fail");
+ BUG();
+ }
+
if (tail) {
if (opcode == JMP32_INSN_OPCODE ||
opcode == RET_INSN_OPCODE)
@@ -74,7 +79,8 @@ static void __static_call_validate(void *insn, bool tail)
/*
* If we ever trigger this, our text is corrupt, we'll probably not live long.
*/
- WARN_ONCE(1, "unexpected static_call insn opcode 0x%x at %pS\n", opcode, insn);
+ pr_err("unexpected static_call insn opcode 0x%x at %pS\n", opcode, insn);
+ BUG();
}
static inline enum insn_type __sc_insn(bool null, bool tail)
@@ -97,12 +103,12 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
mutex_lock(&text_mutex);
if (tramp) {
- __static_call_validate(tramp, true);
+ __static_call_validate(tramp, true, true);
__static_call_transform(tramp, __sc_insn(!func, true), func);
}
if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) {
- __static_call_validate(site, tail);
+ __static_call_validate(site, tail, false);
__static_call_transform(site, __sc_insn(!func, tail), func);
}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index f14f69d7aa3c2d..cce1c89cb7dfd4 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -106,7 +106,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
*/
local_irq_enable();
- BUG_ON(!vm86 || !vm86->user_vm86);
+ BUG_ON(!vm86);
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
user = vm86->user_vm86;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 2d70edb0f3233b..e19dabf1848b44 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -99,11 +99,45 @@ static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
return 0;
}
-void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
+static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
{
- struct kvm_cpuid_entry2 *best;
+ u32 function;
+ struct kvm_cpuid_entry2 *entry;
+
+ vcpu->arch.kvm_cpuid_base = 0;
+
+ for_each_possible_hypervisor_cpuid_base(function) {
+ entry = kvm_find_cpuid_entry(vcpu, function, 0);
- best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
+ if (entry) {
+ u32 signature[3];
+
+ signature[0] = entry->ebx;
+ signature[1] = entry->ecx;
+ signature[2] = entry->edx;
+
+ BUILD_BUG_ON(sizeof(signature) > sizeof(KVM_SIGNATURE));
+ if (!memcmp(signature, KVM_SIGNATURE, sizeof(signature))) {
+ vcpu->arch.kvm_cpuid_base = function;
+ break;
+ }
+ }
+ }
+}
+
+struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
+{
+ u32 base = vcpu->arch.kvm_cpuid_base;
+
+ if (!base)
+ return NULL;
+
+ return kvm_find_cpuid_entry(vcpu, base | KVM_CPUID_FEATURES, 0);
+}
+
+void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
/*
* save the feature bitmap to avoid cpuid lookup for every PV
@@ -142,7 +176,7 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
+ best = kvm_find_kvm_cpuid_features(vcpu);
if (kvm_hlt_in_guest(vcpu->kvm) && best &&
(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
@@ -239,6 +273,26 @@ u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
}
+static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
+ int nent)
+{
+ int r;
+
+ r = kvm_check_cpuid(e2, nent);
+ if (r)
+ return r;
+
+ kvfree(vcpu->arch.cpuid_entries);
+ vcpu->arch.cpuid_entries = e2;
+ vcpu->arch.cpuid_nent = nent;
+
+ kvm_update_kvm_cpuid_base(vcpu);
+ kvm_update_cpuid_runtime(vcpu);
+ kvm_vcpu_after_set_cpuid(vcpu);
+
+ return 0;
+}
+
/* when an old userspace process fills a new kernel module */
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
@@ -275,18 +329,9 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
e2[i].padding[2] = 0;
}
- r = kvm_check_cpuid(e2, cpuid->nent);
- if (r) {
+ r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
+ if (r)
kvfree(e2);
- goto out_free_cpuid;
- }
-
- kvfree(vcpu->arch.cpuid_entries);
- vcpu->arch.cpuid_entries = e2;
- vcpu->arch.cpuid_nent = cpuid->nent;
-
- kvm_update_cpuid_runtime(vcpu);
- kvm_vcpu_after_set_cpuid(vcpu);
out_free_cpuid:
kvfree(e);
@@ -310,20 +355,11 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
return PTR_ERR(e2);
}
- r = kvm_check_cpuid(e2, cpuid->nent);
- if (r) {
+ r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
+ if (r)
kvfree(e2);
- return r;
- }
- kvfree(vcpu->arch.cpuid_entries);
- vcpu->arch.cpuid_entries = e2;
- vcpu->arch.cpuid_nent = cpuid->nent;
-
- kvm_update_cpuid_runtime(vcpu);
- kvm_vcpu_after_set_cpuid(vcpu);
-
- return 0;
+ return r;
}
int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
@@ -871,8 +907,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
}
break;
case KVM_CPUID_SIGNATURE: {
- static const char signature[12] = "KVMKVMKVM\0\0";
- const u32 *sigptr = (const u32 *)signature;
+ const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
entry->eax = KVM_CPUID_FEATURES;
entry->ebx = sigptr[0];
entry->ecx = sigptr[1];
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 4f15c0165c05da..4a555f32885a8c 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1472,7 +1472,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
hv_vcpu->hv_vapic = data;
- if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
+ if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
return 1;
break;
}
@@ -1490,7 +1490,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
return 1;
hv_vcpu->hv_vapic = data;
kvm_vcpu_mark_page_dirty(vcpu, gfn);
- if (kvm_lapic_enable_pv_eoi(vcpu,
+ if (kvm_lapic_set_pv_eoi(vcpu,
gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
sizeof(struct hv_vp_assist_page)))
return 1;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index d6ac32f3f650c0..759952dd122284 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2856,25 +2856,30 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
return 0;
}
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
+int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
{
u64 addr = data & ~KVM_MSR_ENABLED;
struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
unsigned long new_len;
+ int ret;
if (!IS_ALIGNED(addr, 4))
return 1;
- vcpu->arch.pv_eoi.msr_val = data;
- if (!pv_eoi_enabled(vcpu))
- return 0;
+ if (data & KVM_MSR_ENABLED) {
+ if (addr == ghc->gpa && len <= ghc->len)
+ new_len = ghc->len;
+ else
+ new_len = len;
- if (addr == ghc->gpa && len <= ghc->len)
- new_len = ghc->len;
- else
- new_len = len;
+ ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
+ if (ret)
+ return ret;
+ }
+
+ vcpu->arch.pv_eoi.msr_val = data;
- return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
+ return 0;
}
int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index d7c25d0c135498..2b44e533fc8df3 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -127,7 +127,7 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
+int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
void kvm_lapic_exit(void);
#define VEC_POS(v) ((v) & (32 - 1))
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 323b5057d08f72..33794379949e01 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3191,17 +3191,17 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
new_spte |= PT_WRITABLE_MASK;
/*
- * Do not fix write-permission on the large spte. Since
- * we only dirty the first page into the dirty-bitmap in
+ * Do not fix write-permission on the large spte when
+ * dirty logging is enabled. Since we only dirty the
+ * first page into the dirty-bitmap in
* fast_pf_fix_direct_spte(), other pages are missed
* if its slot has dirty logging enabled.
*
* Instead, we let the slow page fault path create a
* normal spte to fix the access.
- *
- * See the comments in kvm_arch_commit_memory_region().
*/
- if (sp->role.level > PG_LEVEL_4K)
+ if (sp->role.level > PG_LEVEL_4K &&
+ kvm_slot_dirty_track_enabled(fault->slot))
break;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 7c5dd83e52dec9..a54c3491af42c9 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -897,7 +897,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault,
struct tdp_iter *iter)
{
- struct kvm_mmu_page *sp = sptep_to_sp(iter->sptep);
+ struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
u64 new_spte;
int ret = RET_PF_FIXED;
bool wrprot = false;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 0772bad9165c55..09873f6488f7c0 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -319,7 +319,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
}
/* check if idx is a valid index to access PMU */
-int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
+bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
}
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 0e4f2b1fa9fbdc..59d6b76203d5b5 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -32,7 +32,7 @@ struct kvm_pmu_ops {
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask);
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
- int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
+ bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
@@ -149,7 +149,7 @@ void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
-int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
+bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 8052d92069e01f..affc0ea98d3022 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -904,7 +904,8 @@ bool svm_check_apicv_inhibit_reasons(ulong bit)
BIT(APICV_INHIBIT_REASON_NESTED) |
BIT(APICV_INHIBIT_REASON_IRQWIN) |
BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
- BIT(APICV_INHIBIT_REASON_X2APIC);
+ BIT(APICV_INHIBIT_REASON_X2APIC) |
+ BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
return supported & BIT(bit);
}
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index fdf587f19c5fb2..871c426ec389a9 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -181,14 +181,13 @@ static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
}
-/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
-static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
+static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
idx &= ~(3u << 30);
- return (idx >= pmu->nr_arch_gp_counters);
+ return idx < pmu->nr_arch_gp_counters;
}
/* idx is the ECX register of RDPMC instruction */
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 1964b9a174beb4..902c52a8dd0c91 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -120,16 +120,26 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
return true;
}
+static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
+{
+ enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
+ return misc_cg_try_charge(type, sev->misc_cg, 1);
+}
+
+static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
+{
+ enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
+ misc_cg_uncharge(type, sev->misc_cg, 1);
+}
+
static int sev_asid_new(struct kvm_sev_info *sev)
{
int asid, min_asid, max_asid, ret;
bool retry = true;
- enum misc_res_type type;
- type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
WARN_ON(sev->misc_cg);
sev->misc_cg = get_current_misc_cg();
- ret = misc_cg_try_charge(type, sev->misc_cg, 1);
+ ret = sev_misc_cg_try_charge(sev);
if (ret) {
put_misc_cg(sev->misc_cg);
sev->misc_cg = NULL;
@@ -162,7 +172,7 @@ again:
return asid;
e_uncharge:
- misc_cg_uncharge(type, sev->misc_cg, 1);
+ sev_misc_cg_uncharge(sev);
put_misc_cg(sev->misc_cg);
sev->misc_cg = NULL;
return ret;
@@ -179,7 +189,6 @@ static void sev_asid_free(struct kvm_sev_info *sev)
{
struct svm_cpu_data *sd;
int cpu;
- enum misc_res_type type;
mutex_lock(&sev_bitmap_lock);
@@ -192,8 +201,7 @@ static void sev_asid_free(struct kvm_sev_info *sev)
mutex_unlock(&sev_bitmap_lock);
- type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
- misc_cg_uncharge(type, sev->misc_cg, 1);
+ sev_misc_cg_uncharge(sev);
put_misc_cg(sev->misc_cg);
sev->misc_cg = NULL;
}
@@ -590,7 +598,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
* traditional VMSA as it has been built so far (in prep
* for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
*/
- memcpy(svm->vmsa, save, sizeof(*save));
+ memcpy(svm->sev_es.vmsa, save, sizeof(*save));
return 0;
}
@@ -612,11 +620,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
* the VMSA memory content (i.e it will write the same memory region
* with the guest's key), so invalidate it first.
*/
- clflush_cache_range(svm->vmsa, PAGE_SIZE);
+ clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
vmsa.reserved = 0;
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
- vmsa.address = __sme_pa(svm->vmsa);
+ vmsa.address = __sme_pa(svm->sev_es.vmsa);
vmsa.len = PAGE_SIZE;
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
if (ret)
@@ -1536,6 +1544,201 @@ static bool cmd_allowed_from_miror(u32 cmd_id)
return false;
}
+static int sev_lock_for_migration(struct kvm *kvm)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ /*
+ * Bail if this VM is already involved in a migration to avoid deadlock
+ * between two VMs trying to migrate to/from each other.
+ */
+ if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
+ return -EBUSY;
+
+ mutex_lock(&kvm->lock);
+
+ return 0;
+}
+
+static void sev_unlock_after_migration(struct kvm *kvm)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ mutex_unlock(&kvm->lock);
+ atomic_set_release(&sev->migration_in_progress, 0);
+}
+
+
+static int sev_lock_vcpus_for_migration(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i, j;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (mutex_lock_killable(&vcpu->mutex))
+ goto out_unlock;
+ }
+
+ return 0;
+
+out_unlock:
+ kvm_for_each_vcpu(j, vcpu, kvm) {
+ if (i == j)
+ break;
+
+ mutex_unlock(&vcpu->mutex);
+ }
+ return -EINTR;
+}
+
+static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ mutex_unlock(&vcpu->mutex);
+ }
+}
+
+static void sev_migrate_from(struct kvm_sev_info *dst,
+ struct kvm_sev_info *src)
+{
+ dst->active = true;
+ dst->asid = src->asid;
+ dst->handle = src->handle;
+ dst->pages_locked = src->pages_locked;
+
+ src->asid = 0;
+ src->active = false;
+ src->handle = 0;
+ src->pages_locked = 0;
+
+ INIT_LIST_HEAD(&dst->regions_list);
+ list_replace_init(&src->regions_list, &dst->regions_list);
+}
+
+static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
+{
+ int i;
+ struct kvm_vcpu *dst_vcpu, *src_vcpu;
+ struct vcpu_svm *dst_svm, *src_svm;
+
+ if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
+ return -EINVAL;
+
+ kvm_for_each_vcpu(i, src_vcpu, src) {
+ if (!src_vcpu->arch.guest_state_protected)
+ return -EINVAL;
+ }
+
+ kvm_for_each_vcpu(i, src_vcpu, src) {
+ src_svm = to_svm(src_vcpu);
+ dst_vcpu = kvm_get_vcpu(dst, i);
+ dst_svm = to_svm(dst_vcpu);
+
+ /*
+ * Transfer VMSA and GHCB state to the destination. Nullify and
+ * clear source fields as appropriate, the state now belongs to
+ * the destination.
+ */
+ memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
+ dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
+ dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
+ dst_vcpu->arch.guest_state_protected = true;
+
+ memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
+ src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
+ src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
+ src_vcpu->arch.guest_state_protected = false;
+ }
+ to_kvm_svm(src)->sev_info.es_active = false;
+ to_kvm_svm(dst)->sev_info.es_active = true;
+
+ return 0;
+}
+
+int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
+{
+ struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *src_sev, *cg_cleanup_sev;
+ struct file *source_kvm_file;
+ struct kvm *source_kvm;
+ bool charged = false;
+ int ret;
+
+ ret = sev_lock_for_migration(kvm);
+ if (ret)
+ return ret;
+
+ if (sev_guest(kvm)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ source_kvm_file = fget(source_fd);
+ if (!file_is_kvm(source_kvm_file)) {
+ ret = -EBADF;
+ goto out_fput;
+ }
+
+ source_kvm = source_kvm_file->private_data;
+ ret = sev_lock_for_migration(source_kvm);
+ if (ret)
+ goto out_fput;
+
+ if (!sev_guest(source_kvm)) {
+ ret = -EINVAL;
+ goto out_source;
+ }
+
+ src_sev = &to_kvm_svm(source_kvm)->sev_info;
+ dst_sev->misc_cg = get_current_misc_cg();
+ cg_cleanup_sev = dst_sev;
+ if (dst_sev->misc_cg != src_sev->misc_cg) {
+ ret = sev_misc_cg_try_charge(dst_sev);
+ if (ret)
+ goto out_dst_cgroup;
+ charged = true;
+ }
+
+ ret = sev_lock_vcpus_for_migration(kvm);
+ if (ret)
+ goto out_dst_cgroup;
+ ret = sev_lock_vcpus_for_migration(source_kvm);
+ if (ret)
+ goto out_dst_vcpu;
+
+ if (sev_es_guest(source_kvm)) {
+ ret = sev_es_migrate_from(kvm, source_kvm);
+ if (ret)
+ goto out_source_vcpu;
+ }
+ sev_migrate_from(dst_sev, src_sev);
+ kvm_vm_dead(source_kvm);
+ cg_cleanup_sev = src_sev;
+ ret = 0;
+
+out_source_vcpu:
+ sev_unlock_vcpus_for_migration(source_kvm);
+out_dst_vcpu:
+ sev_unlock_vcpus_for_migration(kvm);
+out_dst_cgroup:
+ /* Operates on the source on success, on the destination on failure. */
+ if (charged)
+ sev_misc_cg_uncharge(cg_cleanup_sev);
+ put_misc_cg(cg_cleanup_sev->misc_cg);
+ cg_cleanup_sev->misc_cg = NULL;
+out_source:
+ sev_unlock_after_migration(source_kvm);
+out_fput:
+ if (source_kvm_file)
+ fput(source_kvm_file);
+out_unlock:
+ sev_unlock_after_migration(kvm);
+ return ret;
+}
+
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -2038,16 +2241,16 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
svm = to_svm(vcpu);
if (vcpu->arch.guest_state_protected)
- sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
- __free_page(virt_to_page(svm->vmsa));
+ sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
+ __free_page(virt_to_page(svm->sev_es.vmsa));
- if (svm->ghcb_sa_free)
- kfree(svm->ghcb_sa);
+ if (svm->sev_es.ghcb_sa_free)
+ kfree(svm->sev_es.ghcb_sa);
}
static void dump_ghcb(struct vcpu_svm *svm)
{
- struct ghcb *ghcb = svm->ghcb;
+ struct ghcb *ghcb = svm->sev_es.ghcb;
unsigned int nbits;
/* Re-use the dump_invalid_vmcb module parameter */
@@ -2073,7 +2276,7 @@ static void dump_ghcb(struct vcpu_svm *svm)
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- struct ghcb *ghcb = svm->ghcb;
+ struct ghcb *ghcb = svm->sev_es.ghcb;
/*
* The GHCB protocol so far allows for the following data
@@ -2093,7 +2296,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct kvm_vcpu *vcpu = &svm->vcpu;
- struct ghcb *ghcb = svm->ghcb;
+ struct ghcb *ghcb = svm->sev_es.ghcb;
u64 exit_code;
/*
@@ -2140,7 +2343,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
struct ghcb *ghcb;
u64 exit_code = 0;
- ghcb = svm->ghcb;
+ ghcb = svm->sev_es.ghcb;
/* Only GHCB Usage code 0 is supported */
if (ghcb->ghcb_usage)
@@ -2258,33 +2461,34 @@ vmgexit_err:
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
{
- if (!svm->ghcb)
+ if (!svm->sev_es.ghcb)
return;
- if (svm->ghcb_sa_free) {
+ if (svm->sev_es.ghcb_sa_free) {
/*
* The scratch area lives outside the GHCB, so there is a
* buffer that, depending on the operation performed, may
* need to be synced, then freed.
*/
- if (svm->ghcb_sa_sync) {
+ if (svm->sev_es.ghcb_sa_sync) {
kvm_write_guest(svm->vcpu.kvm,
- ghcb_get_sw_scratch(svm->ghcb),
- svm->ghcb_sa, svm->ghcb_sa_len);
- svm->ghcb_sa_sync = false;
+ ghcb_get_sw_scratch(svm->sev_es.ghcb),
+ svm->sev_es.ghcb_sa,
+ svm->sev_es.ghcb_sa_len);
+ svm->sev_es.ghcb_sa_sync = false;
}
- kfree(svm->ghcb_sa);
- svm->ghcb_sa = NULL;
- svm->ghcb_sa_free = false;
+ kfree(svm->sev_es.ghcb_sa);
+ svm->sev_es.ghcb_sa = NULL;
+ svm->sev_es.ghcb_sa_free = false;
}
- trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
+ trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
sev_es_sync_to_ghcb(svm);
- kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
- svm->ghcb = NULL;
+ kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
+ svm->sev_es.ghcb = NULL;
}
void pre_sev_run(struct vcpu_svm *svm, int cpu)
@@ -2314,7 +2518,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
{
struct vmcb_control_area *control = &svm->vmcb->control;
- struct ghcb *ghcb = svm->ghcb;
+ struct ghcb *ghcb = svm->sev_es.ghcb;
u64 ghcb_scratch_beg, ghcb_scratch_end;
u64 scratch_gpa_beg, scratch_gpa_end;
void *scratch_va;
@@ -2350,7 +2554,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
return false;
}
- scratch_va = (void *)svm->ghcb;
+ scratch_va = (void *)svm->sev_es.ghcb;
scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
} else {
/*
@@ -2380,12 +2584,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
* the vCPU next time (i.e. a read was requested so the data
* must be written back to the guest memory).
*/
- svm->ghcb_sa_sync = sync;
- svm->ghcb_sa_free = true;
+ svm->sev_es.ghcb_sa_sync = sync;
+ svm->sev_es.ghcb_sa_free = true;
}
- svm->ghcb_sa = scratch_va;
- svm->ghcb_sa_len = len;
+ svm->sev_es.ghcb_sa = scratch_va;
+ svm->sev_es.ghcb_sa_len = len;
return true;
}
@@ -2504,15 +2708,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
return -EINVAL;
}
- if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
+ if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
/* Unable to map GHCB from guest */
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa);
return -EINVAL;
}
- svm->ghcb = svm->ghcb_map.hva;
- ghcb = svm->ghcb_map.hva;
+ svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
+ ghcb = svm->sev_es.ghcb_map.hva;
trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
@@ -2535,7 +2739,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = kvm_sev_es_mmio_read(vcpu,
control->exit_info_1,
control->exit_info_2,
- svm->ghcb_sa);
+ svm->sev_es.ghcb_sa);
break;
case SVM_VMGEXIT_MMIO_WRITE:
if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
@@ -2544,7 +2748,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = kvm_sev_es_mmio_write(vcpu,
control->exit_info_1,
control->exit_info_2,
- svm->ghcb_sa);
+ svm->sev_es.ghcb_sa);
break;
case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
@@ -2604,7 +2808,8 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
if (!setup_vmgexit_scratch(svm, in, bytes))
return -EINVAL;
- return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, count, in);
+ return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
+ count, in);
}
void sev_es_init_vmcb(struct vcpu_svm *svm)
@@ -2619,7 +2824,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
* VMCB page. Do not include the encryption mask on the VMSA physical
* address since hardware will access it using the guest key.
*/
- svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
+ svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
/* Can't intercept CR register access, HV can't modify CR registers */
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
@@ -2691,8 +2896,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
struct vcpu_svm *svm = to_svm(vcpu);
/* First SIPI: Use the values as initially set by the VMM */
- if (!svm->received_first_sipi) {
- svm->received_first_sipi = true;
+ if (!svm->sev_es.received_first_sipi) {
+ svm->sev_es.received_first_sipi = true;
return;
}
@@ -2701,8 +2906,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
* non-zero value.
*/
- if (!svm->ghcb)
+ if (!svm->sev_es.ghcb)
return;
- ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index b36ca4e476c215..5630c241d5f6e0 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1452,7 +1452,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm_switch_vmcb(svm, &svm->vmcb01);
if (vmsa_page)
- svm->vmsa = page_address(vmsa_page);
+ svm->sev_es.vmsa = page_address(vmsa_page);
svm->guest_state_loaded = false;
@@ -2835,11 +2835,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
{
struct vcpu_svm *svm = to_svm(vcpu);
- if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
+ if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
return kvm_complete_insn_gp(vcpu, err);
- ghcb_set_sw_exit_info_1(svm->ghcb, 1);
- ghcb_set_sw_exit_info_2(svm->ghcb,
+ ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
+ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
X86_TRAP_GP |
SVM_EVTINJ_TYPE_EXEPT |
SVM_EVTINJ_VALID);
@@ -3121,11 +3121,6 @@ static int invpcid_interception(struct kvm_vcpu *vcpu)
type = svm->vmcb->control.exit_info_2;
gva = svm->vmcb->control.exit_info_1;
- if (type > 3) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
return kvm_handle_invpcid(vcpu, type, gva);
}
@@ -4701,6 +4696,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.mem_enc_unreg_region = svm_unregister_enc_region,
.vm_copy_enc_context_from = svm_vm_copy_asid_from,
+ .vm_move_enc_context_from = svm_vm_migrate_from,
.can_emulate_instruction = svm_can_emulate_instruction,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 5e9510d4574e33..437e68504e6691 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -80,6 +80,7 @@ struct kvm_sev_info {
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
struct kvm *enc_context_owner; /* Owner of copied encryption context */
struct misc_cg *misc_cg; /* For misc cgroup accounting */
+ atomic_t migration_in_progress;
};
struct kvm_svm {
@@ -123,6 +124,20 @@ struct svm_nested_state {
bool initialized;
};
+struct vcpu_sev_es_state {
+ /* SEV-ES support */
+ struct vmcb_save_area *vmsa;
+ struct ghcb *ghcb;
+ struct kvm_host_map ghcb_map;
+ bool received_first_sipi;
+
+ /* SEV-ES scratch area support */
+ void *ghcb_sa;
+ u32 ghcb_sa_len;
+ bool ghcb_sa_sync;
+ bool ghcb_sa_free;
+};
+
struct vcpu_svm {
struct kvm_vcpu vcpu;
/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
@@ -186,17 +201,7 @@ struct vcpu_svm {
DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
} shadow_msr_intercept;
- /* SEV-ES support */
- struct vmcb_save_area *vmsa;
- struct ghcb *ghcb;
- struct kvm_host_map ghcb_map;
- bool received_first_sipi;
-
- /* SEV-ES scratch area support */
- void *ghcb_sa;
- u32 ghcb_sa_len;
- bool ghcb_sa_sync;
- bool ghcb_sa_free;
+ struct vcpu_sev_es_state sev_es;
bool guest_state_loaded;
};
@@ -558,6 +563,7 @@ int svm_register_enc_region(struct kvm *kvm,
int svm_unregister_enc_region(struct kvm *kvm,
struct kvm_enc_region *range);
int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
+int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd);
void pre_sev_run(struct vcpu_svm *svm, int cpu);
void __init sev_set_cpu_caps(void);
void __init sev_hardware_setup(void);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b4ee5e9f9e201d..b213ca966d41da 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -525,67 +525,19 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
}
/*
- * Check if MSR is intercepted for L01 MSR bitmap.
+ * For x2APIC MSRs, ignore the vmcs01 bitmap. L1 can enable x2APIC without L1
+ * itself utilizing x2APIC. All MSRs were previously set to be intercepted,
+ * only the "disable intercept" case needs to be handled.
*/
-static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
+static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1,
+ unsigned long *msr_bitmap_l0,
+ u32 msr, int type)
{
- unsigned long *msr_bitmap;
- int f = sizeof(unsigned long);
+ if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr))
+ vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr);
- if (!cpu_has_vmx_msr_bitmap())
- return true;
-
- msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
-
- if (msr <= 0x1fff) {
- return !!test_bit(msr, msr_bitmap + 0x800 / f);
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- return !!test_bit(msr, msr_bitmap + 0xc00 / f);
- }
-
- return true;
-}
-
-/*
- * If a msr is allowed by L0, we should check whether it is allowed by L1.
- * The corresponding bit will be cleared unless both of L0 and L1 allow it.
- */
-static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
- unsigned long *msr_bitmap_nested,
- u32 msr, int type)
-{
- int f = sizeof(unsigned long);
-
- /*
- * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
- * have the write-low and read-high bitmap offsets the wrong way round.
- * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
- */
- if (msr <= 0x1fff) {
- if (type & MSR_TYPE_R &&
- !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
- /* read-low */
- __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
-
- if (type & MSR_TYPE_W &&
- !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
- /* write-low */
- __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
-
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- if (type & MSR_TYPE_R &&
- !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
- /* read-high */
- __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
-
- if (type & MSR_TYPE_W &&
- !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
- /* write-high */
- __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
-
- }
+ if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr))
+ vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr);
}
static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
@@ -600,6 +552,34 @@ static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
}
}
+#define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \
+static inline \
+void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \
+ unsigned long *msr_bitmap_l1, \
+ unsigned long *msr_bitmap_l0, u32 msr) \
+{ \
+ if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
+ vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \
+ vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \
+ else \
+ vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \
+}
+BUILD_NVMX_MSR_INTERCEPT_HELPER(read)
+BUILD_NVMX_MSR_INTERCEPT_HELPER(write)
+
+static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
+ unsigned long *msr_bitmap_l1,
+ unsigned long *msr_bitmap_l0,
+ u32 msr, int types)
+{
+ if (types & MSR_TYPE_R)
+ nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1,
+ msr_bitmap_l0, msr);
+ if (types & MSR_TYPE_W)
+ nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1,
+ msr_bitmap_l0, msr);
+}
+
/*
* Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware.
@@ -607,10 +587,11 @@ static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
int msr;
unsigned long *msr_bitmap_l1;
- unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
- struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
+ unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
+ struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
/* Nothing to do if the MSR bitmap is not in use. */
if (!cpu_has_vmx_msr_bitmap() ||
@@ -625,7 +606,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
/*
* To keep the control flow simple, pay eight 8-byte writes (sixteen
* 4-byte writes on 32-bit systems) up front to enable intercepts for
- * the x2APIC MSR range and selectively disable them below.
+ * the x2APIC MSR range and selectively toggle those relevant to L2.
*/
enable_x2apic_msr_intercepts(msr_bitmap_l0);
@@ -644,61 +625,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
}
}
- nested_vmx_disable_intercept_for_msr(
+ nested_vmx_disable_intercept_for_x2apic_msr(
msr_bitmap_l1, msr_bitmap_l0,
X2APIC_MSR(APIC_TASKPRI),
MSR_TYPE_R | MSR_TYPE_W);
if (nested_cpu_has_vid(vmcs12)) {
- nested_vmx_disable_intercept_for_msr(
+ nested_vmx_disable_intercept_for_x2apic_msr(
msr_bitmap_l1, msr_bitmap_l0,
X2APIC_MSR(APIC_EOI),
MSR_TYPE_W);
- nested_vmx_disable_intercept_for_msr(
+ nested_vmx_disable_intercept_for_x2apic_msr(
msr_bitmap_l1, msr_bitmap_l0,
X2APIC_MSR(APIC_SELF_IPI),
MSR_TYPE_W);
}
}
- /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
+ /*
+ * Always check vmcs01's bitmap to honor userspace MSR filters and any
+ * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through.
+ */
#ifdef CONFIG_X86_64
- nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
- MSR_FS_BASE, MSR_TYPE_RW);
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_FS_BASE, MSR_TYPE_RW);
- nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
- MSR_GS_BASE, MSR_TYPE_RW);
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_GS_BASE, MSR_TYPE_RW);
- nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
- MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
#endif
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
- /*
- * Checking the L0->L1 bitmap is trying to verify two things:
- *
- * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
- * ensures that we do not accidentally generate an L02 MSR bitmap
- * from the L12 MSR bitmap that is too permissive.
- * 2. That L1 or L2s have actually used the MSR. This avoids
- * unnecessarily merging of the bitmap if the MSR is unused. This
- * works properly because we only update the L01 MSR bitmap lazily.
- * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
- * updated to reflect this when L1 (or its L2s) actually write to
- * the MSR.
- */
- if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- MSR_IA32_SPEC_CTRL,
- MSR_TYPE_R | MSR_TYPE_W);
-
- if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- MSR_IA32_PRED_CMD,
- MSR_TYPE_W);
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_PRED_CMD, MSR_TYPE_W);
- kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
+ kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false);
return true;
}
@@ -5379,7 +5343,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
struct {
u64 eptp, gpa;
} operand;
- int i, r;
+ int i, r, gpr_index;
if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_EPT) ||
@@ -5392,7 +5356,8 @@ static int handle_invept(struct kvm_vcpu *vcpu)
return 1;
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
+ gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
+ type = kvm_register_read(vcpu, gpr_index);
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
@@ -5459,7 +5424,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
u64 gla;
} operand;
u16 vpid02;
- int r;
+ int r, gpr_index;
if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
@@ -5472,7 +5437,8 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return 1;
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
+ gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
+ type = kvm_register_read(vcpu, gpr_index);
types = (vmx->nested.msrs.vpid_caps &
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index b8e0d21b7c8a0d..1b7456b2177b9f 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -118,16 +118,15 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
}
}
-/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
-static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
+static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
idx &= ~(3u << 30);
- return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
- (fixed && idx >= pmu->nr_arch_fixed_counters);
+ return fixed ? idx < pmu->nr_arch_fixed_counters
+ : idx < pmu->nr_arch_gp_counters;
}
static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 76861b66bbcfcf..ba66c171d951ba 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -769,24 +769,13 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
/*
* Check if MSR is intercepted for currently loaded MSR bitmap.
*/
-static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
+static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
{
- unsigned long *msr_bitmap;
- int f = sizeof(unsigned long);
-
- if (!cpu_has_vmx_msr_bitmap())
+ if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
return true;
- msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
-
- if (msr <= 0x1fff) {
- return !!test_bit(msr, msr_bitmap + 0x800 / f);
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- return !!test_bit(msr, msr_bitmap + 0xc00 / f);
- }
-
- return true;
+ return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap,
+ MSR_IA32_SPEC_CTRL);
}
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
@@ -3697,46 +3686,6 @@ void free_vpid(int vpid)
spin_unlock(&vmx_vpid_lock);
}
-static void vmx_clear_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
-{
- int f = sizeof(unsigned long);
-
- if (msr <= 0x1fff)
- __clear_bit(msr, msr_bitmap + 0x000 / f);
- else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
- __clear_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
-}
-
-static void vmx_clear_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
-{
- int f = sizeof(unsigned long);
-
- if (msr <= 0x1fff)
- __clear_bit(msr, msr_bitmap + 0x800 / f);
- else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
- __clear_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
-}
-
-static void vmx_set_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
-{
- int f = sizeof(unsigned long);
-
- if (msr <= 0x1fff)
- __set_bit(msr, msr_bitmap + 0x000 / f);
- else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
- __set_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
-}
-
-static void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
-{
- int f = sizeof(unsigned long);
-
- if (msr <= 0x1fff)
- __set_bit(msr, msr_bitmap + 0x800 / f);
- else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
- __set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
-}
-
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -5494,6 +5443,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
u64 pcid;
u64 gla;
} operand;
+ int gpr_index;
if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5501,12 +5451,8 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
}
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
-
- if (type > 3) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
+ gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
+ type = kvm_register_read(vcpu, gpr_index);
/* According to the Intel instruction reference, the memory operand
* is read even if it isn't needed (e.g., for type==all)
@@ -6749,7 +6695,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
* If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it.
*/
- if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
@@ -7563,7 +7509,8 @@ static void hardware_unsetup(void)
static bool vmx_check_apicv_inhibit_reasons(ulong bit)
{
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
- BIT(APICV_INHIBIT_REASON_HYPERV);
+ BIT(APICV_INHIBIT_REASON_HYPERV) |
+ BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
return supported & BIT(bit);
}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index e7db42e3b0ce05..a4ead6023133a6 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -400,6 +400,34 @@ static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
+/*
+ * Note, early Intel manuals have the write-low and read-high bitmap offsets
+ * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and
+ * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and
+ * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and
+ * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always
+ * VM-Exit.
+ */
+#define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
+static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
+ u32 msr) \
+{ \
+ int f = sizeof(unsigned long); \
+ \
+ if (msr <= 0x1fff) \
+ return bitop##_bit(msr, bitmap + base / f); \
+ else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
+ return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
+ return (rtype)true; \
+}
+#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
+ __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
+ __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
+
+BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
+BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
+BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
+
static inline u8 vmx_get_rvi(void)
{
return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
@@ -522,4 +550,9 @@ static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
void dump_vmcs(struct kvm_vcpu *vcpu);
+static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
+{
+ return (vmx_instr_info >> 28) & 0xf;
+}
+
#endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c1c4e2b05a63ab..dc7eb5fddfd3ef 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3260,8 +3260,11 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
static void record_steal_time(struct kvm_vcpu *vcpu)
{
- struct kvm_host_map map;
- struct kvm_steal_time *st;
+ struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
+ struct kvm_steal_time __user *st;
+ struct kvm_memslots *slots;
+ u64 steal;
+ u32 version;
if (kvm_xen_msr_enabled(vcpu->kvm)) {
kvm_xen_runstate_set_running(vcpu);
@@ -3271,47 +3274,86 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
- /* -EAGAIN is returned in atomic context so we can just return. */
- if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
- &map, &vcpu->arch.st.cache, false))
+ if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm))
return;
- st = map.hva +
- offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+ slots = kvm_memslots(vcpu->kvm);
+
+ if (unlikely(slots->generation != ghc->generation ||
+ kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
+ gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
+
+ /* We rely on the fact that it fits in a single page. */
+ BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
+
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) ||
+ kvm_is_error_hva(ghc->hva) || !ghc->memslot)
+ return;
+ }
+ st = (struct kvm_steal_time __user *)ghc->hva;
/*
* Doing a TLB flush here, on the guest's behalf, can avoid
* expensive IPIs.
*/
if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
- u8 st_preempted = xchg(&st->preempted, 0);
+ u8 st_preempted = 0;
+ int err = -EFAULT;
+
+ if (!user_access_begin(st, sizeof(*st)))
+ return;
+
+ asm volatile("1: xchgb %0, %2\n"
+ "xor %1, %1\n"
+ "2:\n"
+ _ASM_EXTABLE_UA(1b, 2b)
+ : "+r" (st_preempted),
+ "+&r" (err)
+ : "m" (st->preempted));
+ if (err)
+ goto out;
+
+ user_access_end();
+
+ vcpu->arch.st.preempted = 0;
trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
st_preempted & KVM_VCPU_FLUSH_TLB);
if (st_preempted & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb_guest(vcpu);
+
+ if (!user_access_begin(st, sizeof(*st)))
+ goto dirty;
} else {
- st->preempted = 0;
- }
+ if (!user_access_begin(st, sizeof(*st)))
+ return;
- vcpu->arch.st.preempted = 0;
+ unsafe_put_user(0, &st->preempted, out);
+ vcpu->arch.st.preempted = 0;
+ }
- if (st->version & 1)
- st->version += 1; /* first time write, random junk */
+ unsafe_get_user(version, &st->version, out);
+ if (version & 1)
+ version += 1; /* first time write, random junk */
- st->version += 1;
+ version += 1;
+ unsafe_put_user(version, &st->version, out);
smp_wmb();
- st->steal += current->sched_info.run_delay -
+ unsafe_get_user(steal, &st->steal, out);
+ steal += current->sched_info.run_delay -
vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ unsafe_put_user(steal, &st->steal, out);
- smp_wmb();
-
- st->version += 1;
+ version += 1;
+ unsafe_put_user(version, &st->version, out);
- kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
+ out:
+ user_access_end();
+ dirty:
+ mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
}
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -3517,7 +3559,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
return 1;
- if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
+ if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8)))
return 1;
break;
@@ -4137,7 +4179,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !static_call(kvm_x86_cpu_has_accelerated_tpr)();
break;
case KVM_CAP_NR_VCPUS:
- r = KVM_SOFT_MAX_VCPUS;
+ r = num_online_cpus();
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
@@ -4351,8 +4393,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{
- struct kvm_host_map map;
- struct kvm_steal_time *st;
+ struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
+ struct kvm_steal_time __user *st;
+ struct kvm_memslots *slots;
+ static const u8 preempted = KVM_VCPU_PREEMPTED;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
@@ -4360,16 +4404,23 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (vcpu->arch.st.preempted)
return;
- if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
- &vcpu->arch.st.cache, true))
+ /* This happens on process exit */
+ if (unlikely(current->mm != vcpu->kvm->mm))
return;
- st = map.hva +
- offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+ slots = kvm_memslots(vcpu->kvm);
+
+ if (unlikely(slots->generation != ghc->generation ||
+ kvm_is_error_hva(ghc->hva) || !ghc->memslot))
+ return;
- st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
+ st = (struct kvm_steal_time __user *)ghc->hva;
+ BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted));
- kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
+ if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted)))
+ vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
+
+ mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -5728,6 +5779,12 @@ split_irqchip_unlock:
if (kvm_x86_ops.vm_copy_enc_context_from)
r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]);
return r;
+ case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
+ r = -EINVAL;
+ if (kvm_x86_ops.vm_move_enc_context_from)
+ r = kvm_x86_ops.vm_move_enc_context_from(
+ kvm, cap->args[0]);
+ return r;
case KVM_CAP_EXIT_HYPERCALL:
if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
r = -EINVAL;
@@ -7328,7 +7385,9 @@ static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
u32 pmc)
{
- return kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc);
+ if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc))
+ return 0;
+ return -EINVAL;
}
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
@@ -9552,7 +9611,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_request_pending(vcpu)) {
- if (kvm_check_request(KVM_REQ_VM_BUGGED, vcpu)) {
+ if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
r = -EIO;
goto out;
}
@@ -10564,6 +10623,24 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return ret;
}
+static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
+{
+ bool inhibit = false;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ down_write(&kvm->arch.apicv_update_lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
+ inhibit = true;
+ break;
+ }
+ }
+ __kvm_request_apicv_update(kvm, !inhibit, APICV_INHIBIT_REASON_BLOCKIRQ);
+ up_write(&kvm->arch.apicv_update_lock);
+}
+
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
@@ -10616,6 +10693,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
static_call(kvm_x86_update_exception_bitmap)(vcpu);
+ kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm);
+
r = 0;
out:
@@ -10859,11 +10938,8 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
- struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
int idx;
- kvm_release_pfn(cache->pfn, cache->dirty, cache);
-
kvmclock_reset(vcpu);
static_call(kvm_x86_vcpu_free)(vcpu);
@@ -12275,7 +12351,8 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
return kvm_skip_emulated_instruction(vcpu);
default:
- BUG(); /* We have already checked above that type <= 3 */
+ kvm_inject_gp(vcpu, 0);
+ return 1;
}
}
EXPORT_SYMBOL_GPL(kvm_handle_invpcid);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 23d54b810f0854..35487305d8afec 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -229,28 +229,75 @@ void __init sev_setup_arch(void)
swiotlb_adjust_size(size);
}
-static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
{
- pgprot_t old_prot, new_prot;
- unsigned long pfn, pa, size;
- pte_t new_pte;
+ unsigned long pfn = 0;
+ pgprot_t prot;
switch (level) {
case PG_LEVEL_4K:
pfn = pte_pfn(*kpte);
- old_prot = pte_pgprot(*kpte);
+ prot = pte_pgprot(*kpte);
break;
case PG_LEVEL_2M:
pfn = pmd_pfn(*(pmd_t *)kpte);
- old_prot = pmd_pgprot(*(pmd_t *)kpte);
+ prot = pmd_pgprot(*(pmd_t *)kpte);
break;
case PG_LEVEL_1G:
pfn = pud_pfn(*(pud_t *)kpte);
- old_prot = pud_pgprot(*(pud_t *)kpte);
+ prot = pud_pgprot(*(pud_t *)kpte);
break;
default:
- return;
+ WARN_ONCE(1, "Invalid level for kpte\n");
+ return 0;
+ }
+
+ if (ret_prot)
+ *ret_prot = prot;
+
+ return pfn;
+}
+
+void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
+{
+#ifdef CONFIG_PARAVIRT
+ unsigned long sz = npages << PAGE_SHIFT;
+ unsigned long vaddr_end = vaddr + sz;
+
+ while (vaddr < vaddr_end) {
+ int psize, pmask, level;
+ unsigned long pfn;
+ pte_t *kpte;
+
+ kpte = lookup_address(vaddr, &level);
+ if (!kpte || pte_none(*kpte)) {
+ WARN_ONCE(1, "kpte lookup for vaddr\n");
+ return;
+ }
+
+ pfn = pg_level_to_pfn(level, kpte, NULL);
+ if (!pfn)
+ continue;
+
+ psize = page_level_size(level);
+ pmask = page_level_mask(level);
+
+ notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc);
+
+ vaddr = (vaddr & pmask) + psize;
}
+#endif
+}
+
+static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+{
+ pgprot_t old_prot, new_prot;
+ unsigned long pfn, pa, size;
+ pte_t new_pte;
+
+ pfn = pg_level_to_pfn(level, kpte, &old_prot);
+ if (!pfn)
+ return;
new_prot = old_prot;
if (enc)
@@ -286,12 +333,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
static int __init early_set_memory_enc_dec(unsigned long vaddr,
unsigned long size, bool enc)
{
- unsigned long vaddr_end, vaddr_next;
+ unsigned long vaddr_end, vaddr_next, start;
unsigned long psize, pmask;
int split_page_size_mask;
int level, ret;
pte_t *kpte;
+ start = vaddr;
vaddr_next = vaddr;
vaddr_end = vaddr + size;
@@ -346,6 +394,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
ret = 0;
+ notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
out:
__flush_tlb_all();
return ret;
@@ -361,6 +410,11 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
return early_set_memory_enc_dec(vaddr, size, true);
}
+void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
+{
+ notify_range_enc_status_changed(vaddr, npages, enc);
+}
+
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
{
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 934dc5b2df3617..b4072115c8ef68 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2023,6 +2023,12 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
*/
cpa_flush(&cpa, 0);
+ /*
+ * Notify hypervisor that a given memory range is mapped encrypted
+ * or decrypted.
+ */
+ notify_range_enc_status_changed(addr, numpages, enc);
+
return ret;
}
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 9e55bcbfcd33c0..6a8f3b53ab8343 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -225,7 +225,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
{
unsigned cpu;
- unsigned int i;
if (skip_ioapic_setup) {
char *m = (max_cpus == 0) ?
@@ -238,16 +237,9 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
}
xen_init_lock_cpu(0);
- smp_store_boot_cpu_info();
- cpu_data(0).x86_max_cores = 1;
+ smp_prepare_cpus_common();
- for_each_possible_cpu(i) {
- zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
- }
- set_cpu_sibling_map(0);
+ cpu_data(0).x86_max_cores = 1;
speculative_store_bypass_ht_init();
diff --git a/block/blk-core.c b/block/blk-core.c
index b043de2baaace2..9ee32f85d74e1a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -809,10 +809,8 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
if (unlikely(!current->io_context))
create_task_io_context(current, GFP_ATOMIC, q->node);
- if (blk_throtl_bio(bio)) {
- blkcg_bio_issue_init(bio);
+ if (blk_throtl_bio(bio))
return false;
- }
blk_cgroup_bio_start(bio);
blkcg_bio_issue_init(bio);
diff --git a/block/blk-ia-ranges.c b/block/blk-ia-ranges.c
index c246c425d0d70b..b925f3db3ab7a4 100644
--- a/block/blk-ia-ranges.c
+++ b/block/blk-ia-ranges.c
@@ -104,8 +104,8 @@ static struct kobj_type blk_ia_ranges_ktype = {
};
/**
- * disk_register_ia_ranges - register with sysfs a set of independent
- * access ranges
+ * disk_register_independent_access_ranges - register with sysfs a set of
+ * independent access ranges
* @disk: Target disk
* @new_iars: New set of independent access ranges
*
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 4be652fa38e78b..ba21449439cc48 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -370,9 +370,6 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
bool ret = false;
enum hctx_type type;
- if (bio_queue_enter(bio))
- return false;
-
if (e && e->type->ops.bio_merge) {
ret = e->type->ops.bio_merge(q, bio, nr_segs);
goto out_put;
@@ -397,7 +394,6 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
spin_unlock(&ctx->lock);
out_put:
- blk_queue_exit(q);
return ret;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 629cf421417f06..3ab34c4f20daf7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2495,8 +2495,9 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
return BLK_MAX_REQUEST_COUNT;
}
-static bool blk_attempt_bio_merge(struct request_queue *q, struct bio *bio,
- unsigned int nr_segs, bool *same_queue_rq)
+static bool blk_mq_attempt_bio_merge(struct request_queue *q,
+ struct bio *bio, unsigned int nr_segs,
+ bool *same_queue_rq)
{
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
@@ -2520,12 +2521,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
- if (unlikely(bio_queue_enter(bio)))
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
return NULL;
- if (unlikely(!submit_bio_checks(bio)))
- goto put_exit;
- if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
- goto put_exit;
rq_qos_throttle(q, bio);
@@ -2542,26 +2539,44 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
-put_exit:
- blk_queue_exit(q);
+
return NULL;
}
+static inline bool blk_mq_can_use_cached_rq(struct request *rq,
+ struct bio *bio)
+{
+ if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
+ return false;
+
+ if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+ return false;
+
+ return true;
+}
+
static inline struct request *blk_mq_get_request(struct request_queue *q,
struct blk_plug *plug,
struct bio *bio,
unsigned int nsegs,
bool *same_queue_rq)
{
+ struct request *rq;
+ bool checked = false;
+
if (plug) {
- struct request *rq;
rq = rq_list_peek(&plug->cached_rq);
if (rq && rq->q == q) {
if (unlikely(!submit_bio_checks(bio)))
return NULL;
- if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs,
+ same_queue_rq))
return NULL;
+ checked = true;
+ if (!blk_mq_can_use_cached_rq(rq, bio))
+ goto fallback;
+ rq->cmd_flags = bio->bi_opf;
plug->cached_rq = rq_list_next(rq);
INIT_LIST_HEAD(&rq->queuelist);
rq_qos_throttle(q, bio);
@@ -2569,7 +2584,15 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
}
}
- return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+fallback:
+ if (unlikely(bio_queue_enter(bio)))
+ return NULL;
+ if (!checked && !submit_bio_checks(bio))
+ return NULL;
+ rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+ if (!rq)
+ blk_queue_exit(q);
+ return rq;
}
/**
diff --git a/block/blk-mq.h b/block/blk-mq.h
index cb0b5482ca5e10..8acfa650f57515 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -89,15 +89,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
}
-/*
- * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
- * @q: request queue
- * @flags: request command flags
- * @ctx: software queue cpu ctx
- */
-static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
- unsigned int flags,
- struct blk_mq_ctx *ctx)
+static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
{
enum hctx_type type = HCTX_TYPE_DEFAULT;
@@ -108,8 +100,20 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
type = HCTX_TYPE_POLL;
else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
type = HCTX_TYPE_READ;
-
- return ctx->hctxs[type];
+ return type;
+}
+
+/*
+ * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
+ * @q: request queue
+ * @flags: request command flags
+ * @ctx: software queue cpu ctx
+ */
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+ unsigned int flags,
+ struct blk_mq_ctx *ctx)
+{
+ return ctx->hctxs[blk_mq_get_hctx_type(flags)];
}
/*
@@ -149,7 +153,7 @@ struct blk_mq_alloc_data {
blk_mq_req_flags_t flags;
unsigned int shallow_depth;
unsigned int cmd_flags;
- unsigned int rq_flags;
+ req_flags_t rq_flags;
/* allocate multiple requests/tags in one go */
unsigned int nr_tags;
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 1d0c76c18fc523..774ecc598bee28 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -429,9 +429,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
+ filemap_invalidate_lock(bdev->bd_inode->i_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
- return ret;
+ goto fail;
break;
case BLKOPENZONE:
op = REQ_OP_ZONE_OPEN;
@@ -449,15 +450,9 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
GFP_KERNEL);
- /*
- * Invalidate the page cache again for zone reset: writes can only be
- * direct for zoned devices so concurrent writes would not add any page
- * to the page cache after/during reset. The page cache may be filled
- * again due to concurrent reads though and dropping the pages for
- * these is fine.
- */
- if (!ret && cmd == BLKRESETZONE)
- ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
+fail:
+ if (cmd == BLKRESETZONE)
+ filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
return ret;
}
diff --git a/block/genhd.c b/block/genhd.c
index ca2fbab1d4252b..c5392cc24d37ef 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -394,8 +394,8 @@ static void disk_scan_partitions(struct gendisk *disk)
* This function registers the partitioning information in @disk
* with the kernel.
*/
-int device_add_disk(struct device *parent, struct gendisk *disk,
- const struct attribute_group **groups)
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
{
struct device *ddev = disk_to_dev(disk);
@@ -544,7 +544,7 @@ out_disk_release_events:
out_free_ext_minor:
if (disk->major == BLOCK_EXT_MAJOR)
blk_free_ext_minor(disk->first_minor);
- return WARN_ON_ONCE(ret); /* keep until all callers handle errors */
+ return ret;
}
EXPORT_SYMBOL(device_add_disk);
diff --git a/block/ioctl.c b/block/ioctl.c
index d6af0ac97e57e3..0a1d10ac2e1a59 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -113,6 +113,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
uint64_t range[2];
uint64_t start, len;
struct request_queue *q = bdev_get_queue(bdev);
+ struct inode *inode = bdev->bd_inode;
int err;
if (!(mode & FMODE_WRITE))
@@ -135,12 +136,17 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
if (start + len > bdev_nr_bytes(bdev))
return -EINVAL;
+ filemap_invalidate_lock(inode->i_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (err)
- return err;
+ goto fail;
- return blkdev_issue_discard(bdev, start >> 9, len >> 9,
- GFP_KERNEL, flags);
+ err = blkdev_issue_discard(bdev, start >> 9, len >> 9,
+ GFP_KERNEL, flags);
+
+fail:
+ filemap_invalidate_unlock(inode->i_mapping);
+ return err;
}
static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
@@ -148,6 +154,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
{
uint64_t range[2];
uint64_t start, end, len;
+ struct inode *inode = bdev->bd_inode;
int err;
if (!(mode & FMODE_WRITE))
@@ -170,12 +177,17 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
return -EINVAL;
/* Invalidate the page cache, including dirty pages */
+ filemap_invalidate_lock(inode->i_mapping);
err = truncate_bdev_range(bdev, mode, start, end);
if (err)
- return err;
+ goto fail;
+
+ err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
+ BLKDEV_ZERO_NOUNMAP);
- return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
- BLKDEV_ZERO_NOUNMAP);
+fail:
+ filemap_invalidate_unlock(inode->i_mapping);
+ return err;
}
static int put_ushort(unsigned short __user *argp, unsigned short val)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d379fd91fb7b03..a366cb3e8aa184 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -284,6 +284,8 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
if (larval)
list_add(&larval->alg.cra_list, &crypto_alg_list);
+ else
+ alg->cra_flags |= CRYPTO_ALG_TESTED;
crypto_stats_init(alg);
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 1a3309f066f7e7..154a969c83a822 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -18,22 +18,22 @@
#define ZSTD_DEF_LEVEL 3
struct zstd_ctx {
- ZSTD_CCtx *cctx;
- ZSTD_DCtx *dctx;
+ zstd_cctx *cctx;
+ zstd_dctx *dctx;
void *cwksp;
void *dwksp;
};
-static ZSTD_parameters zstd_params(void)
+static zstd_parameters zstd_params(void)
{
- return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0);
+ return zstd_get_params(ZSTD_DEF_LEVEL, 0);
}
static int zstd_comp_init(struct zstd_ctx *ctx)
{
int ret = 0;
- const ZSTD_parameters params = zstd_params();
- const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams);
+ const zstd_parameters params = zstd_params();
+ const size_t wksp_size = zstd_cctx_workspace_bound(&params.cParams);
ctx->cwksp = vzalloc(wksp_size);
if (!ctx->cwksp) {
@@ -41,7 +41,7 @@ static int zstd_comp_init(struct zstd_ctx *ctx)
goto out;
}
- ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size);
+ ctx->cctx = zstd_init_cctx(ctx->cwksp, wksp_size);
if (!ctx->cctx) {
ret = -EINVAL;
goto out_free;
@@ -56,7 +56,7 @@ out_free:
static int zstd_decomp_init(struct zstd_ctx *ctx)
{
int ret = 0;
- const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
+ const size_t wksp_size = zstd_dctx_workspace_bound();
ctx->dwksp = vzalloc(wksp_size);
if (!ctx->dwksp) {
@@ -64,7 +64,7 @@ static int zstd_decomp_init(struct zstd_ctx *ctx)
goto out;
}
- ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size);
+ ctx->dctx = zstd_init_dctx(ctx->dwksp, wksp_size);
if (!ctx->dctx) {
ret = -EINVAL;
goto out_free;
@@ -152,10 +152,10 @@ static int __zstd_compress(const u8 *src, unsigned int slen,
{
size_t out_len;
struct zstd_ctx *zctx = ctx;
- const ZSTD_parameters params = zstd_params();
+ const zstd_parameters params = zstd_params();
- out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params);
- if (ZSTD_isError(out_len))
+ out_len = zstd_compress_cctx(zctx->cctx, dst, *dlen, src, slen, &params);
+ if (zstd_is_error(out_len))
return -EINVAL;
*dlen = out_len;
return 0;
@@ -182,8 +182,8 @@ static int __zstd_decompress(const u8 *src, unsigned int slen,
size_t out_len;
struct zstd_ctx *zctx = ctx;
- out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen);
- if (ZSTD_isError(out_len))
+ out_len = zstd_decompress_dctx(zctx->dctx, dst, *dlen, src, slen);
+ if (zstd_is_error(out_len))
return -EINVAL;
*dlen = out_len;
return 0;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 28430c093a7f4d..8a6835bfd18a27 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -131,7 +131,7 @@ const struct attribute_group *ahci_shost_groups[] = {
};
EXPORT_SYMBOL_GPL(ahci_shost_groups);
-struct attribute *ahci_sdev_attrs[] = {
+static struct attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity.attr,
&dev_attr_unload_heads.attr,
&dev_attr_ncq_prio_supported.attr,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3018ca84a3d8ca..8a0ccb190d7670 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2052,8 +2052,19 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
struct ata_port *ap = dev->link->ap;
unsigned int err, i;
+ if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
+ return false;
+
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
- ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
+ /*
+ * IDENTIFY DEVICE data log is defined as mandatory starting
+ * with ACS-3 (ATA version 10). Warn about the missing log
+ * for drives which implement this ATA level or above.
+ */
+ if (ata_id_major_version(dev->id) >= 10)
+ ata_dev_warn(dev,
+ "ATA Identify Device Log not supported\n");
+ dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
return false;
}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index bf9c4b6c5c3d4e..1d4a6f1e88cd15 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -93,6 +93,12 @@ static const unsigned long ata_eh_identify_timeouts[] = {
ULONG_MAX,
};
+static const unsigned long ata_eh_revalidate_timeouts[] = {
+ 15000, /* Some drives are slow to read log pages when waking-up */
+ 15000, /* combined time till here is enough even for media access */
+ ULONG_MAX,
+};
+
static const unsigned long ata_eh_flush_timeouts[] = {
15000, /* be generous with flush */
15000, /* ditto */
@@ -129,6 +135,8 @@ static const struct ata_eh_cmd_timeout_ent
ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
.timeouts = ata_eh_identify_timeouts, },
+ { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT),
+ .timeouts = ata_eh_revalidate_timeouts, },
{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 8440203e835edf..b29d3f1d64b033 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -469,10 +469,8 @@ static int ahci_highbank_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq\n");
+ if (irq < 0)
return irq;
- }
if (!irq)
return -EINVAL;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 981e72a3dafbed..ff16a36a908bc1 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -677,6 +677,8 @@ void remove_cpu_topology(unsigned int cpu)
cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
for_each_cpu(sibling, topology_sibling_cpumask(cpu))
cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
+ for_each_cpu(sibling, topology_cluster_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
for_each_cpu(sibling, topology_llc_cpumask(cpu))
cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 4c2f7d61cb9b43..183d5cc37d42c0 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -485,7 +485,7 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
#ifdef CONFIG_MIPS
- board_be_handler = brcmstb_bus_error_handler;
+ mips_set_be_handler(brcmstb_bus_error_handler);
#endif
if (list_is_singular(&brcmstb_gisb_arb_device_list)) {
diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c
index f15e2621fa185e..64f316cf7cfcc3 100644
--- a/drivers/clk/actions/owl-factor.c
+++ b/drivers/clk/actions/owl-factor.c
@@ -10,7 +10,6 @@
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/slab.h>
#include "owl-factor.h"
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index bc3be5f3eae155..24dab2312bc6fa 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -51,6 +51,8 @@ static DEFINE_SPINLOCK(aspeed_g6_clk_lock);
static struct clk_hw_onecell_data *aspeed_g6_clk_data;
static void __iomem *scu_g6_base;
+/* AST2600 revision: A0, A1, A2, etc */
+static u8 soc_rev;
/*
* Clocks marked with CLK_IS_CRITICAL:
@@ -191,9 +193,8 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
{
unsigned int mult, div;
- u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV);
- if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) {
+ if (soc_rev >= 2) {
if (val & BIT(24)) {
/* Pass through mode */
mult = div = 1;
@@ -707,7 +708,7 @@ static const u32 ast2600_a1_axi_ahb200_tbl[] = {
static void __init aspeed_g6_cc(struct regmap *map)
{
struct clk_hw *hw;
- u32 val, div, divbits, chip_id, axi_div, ahb_div;
+ u32 val, div, divbits, axi_div, ahb_div;
clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000);
@@ -738,8 +739,7 @@ static void __init aspeed_g6_cc(struct regmap *map)
axi_div = 2;
divbits = (val >> 11) & 0x3;
- regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id);
- if (chip_id & BIT(16)) {
+ if (soc_rev >= 1) {
if (!divbits) {
ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3];
if (val & BIT(16))
@@ -784,6 +784,8 @@ static void __init aspeed_g6_cc_init(struct device_node *np)
if (!scu_g6_base)
return;
+ soc_rev = (readl(scu_g6_base + ASPEED_G6_SILICON_REV) & CHIP_REVISION_ID) >> 16;
+
aspeed_g6_clk_data = kzalloc(struct_size(aspeed_g6_clk_data, hws,
ASPEED_G6_NUM_CLKS), GFP_KERNEL);
if (!aspeed_g6_clk_data)
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index c04ae0e7e4b4b8..b9c5f904f53567 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -97,6 +97,7 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
return ret;
req->rate = tmp_req.rate;
+ req->best_parent_hw = tmp_req.best_parent_hw;
req->best_parent_rate = tmp_req.best_parent_rate;
return 0;
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 57e4597cdf4c20..93fa8c9e11be00 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1,15 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * clk-si5351.c: Silicon Laboratories Si5351A/B/C I2C Clock Generator
+ * clk-si5351.c: Skyworks / Silicon Labs Si5351A/B/C I2C Clock Generator
*
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Rabeeh Khoury <rabeeh@solid-run.com>
*
* References:
* [1] "Si5351A/B/C Data Sheet"
- * https://www.silabs.com/Support%20Documents/TechnicalDocs/Si5351.pdf
- * [2] "Manually Generating an Si5351 Register Map"
- * https://www.silabs.com/Support%20Documents/TechnicalDocs/AN619.pdf
+ * https://www.skyworksinc.com/-/media/Skyworks/SL/documents/public/data-sheets/Si5351-B.pdf
+ * [2] "AN619: Manually Generating an Si5351 Register Map"
+ * https://www.skyworksinc.com/-/media/Skyworks/SL/documents/public/application-notes/AN619.pdf
*/
#include <linux/module.h>
diff --git a/drivers/clk/clk-si5351.h b/drivers/clk/clk-si5351.h
index 73dc8effc5197d..e9e2bfdaaedf89 100644
--- a/drivers/clk/clk-si5351.h
+++ b/drivers/clk/clk-si5351.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * clk-si5351.h: Silicon Laboratories Si5351A/B/C I2C Clock Generator
+ * clk-si5351.h: Skyworks / Silicon Labs Si5351A/B/C I2C Clock Generator
*
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Rabeeh Khoury <rabeeh@solid-run.com>
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index c6d3b1ab3d55c1..e7be3e54b9be43 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -905,7 +905,7 @@ output_error:
static const struct of_device_id clk_vc5_of_match[];
-static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int vc5_probe(struct i2c_client *client)
{
unsigned int oe, sd, src_mask = 0, src_val = 0;
struct vc5_driver_data *vc5;
@@ -1244,7 +1244,7 @@ static struct i2c_driver vc5_driver = {
.pm = &vc5_pm_ops,
.of_match_table = clk_vc5_of_match,
},
- .probe = vc5_probe,
+ .probe_new = vc5_probe,
.remove = vc5_remove,
.id_table = vc5_id,
};
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 819949973db154..7d220a01de1f84 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -391,11 +391,11 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
#define imx8m_clk_hw_composite(name, parent_names, reg) \
_imx8m_clk_hw_composite(name, parent_names, reg, \
- IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
+ 0, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
#define imx8m_clk_hw_composite_critical(name, parent_names, reg) \
_imx8m_clk_hw_composite(name, parent_names, reg, \
- IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
+ 0, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
#define imx8m_clk_hw_composite_bus(name, parent_names, reg) \
_imx8m_clk_hw_composite(name, parent_names, reg, \
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 266c7595d33022..af31633a8862e1 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -453,15 +453,15 @@ ingenic_clk_calc_div(struct clk_hw *hw,
}
/* Impose hardware constraints */
- div = min_t(unsigned, div, 1 << clk_info->div.bits);
- div = max_t(unsigned, div, 1);
+ div = clamp_t(unsigned int, div, clk_info->div.div,
+ clk_info->div.div << clk_info->div.bits);
/*
* If the divider value itself must be divided before being written to
* the divider register, we must ensure we don't have any bits set that
* would be lost as a result of doing so.
*/
- div /= clk_info->div.div;
+ div = DIV_ROUND_UP(div, clk_info->div.div);
div *= clk_info->div.div;
return div;
diff --git a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
index 0e2ac0a30aa04f..4ab312eb26a5d5 100644
--- a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
@@ -10,8 +10,6 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
-#include <dt-bindings/clock/mt8195-clk.h>
-
static const struct mtk_gate_regs imp_iic_wrap_cg_regs = {
.set_ofs = 0xe08,
.clr_ofs = 0xe04,
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 3c3a7ff0456219..9b1674b28d45de 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2937,20 +2937,6 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
},
};
-static struct clk_branch gcc_aggre1_pnoc_ahb_clk = {
- .halt_reg = 0x82014,
- .clkr = {
- .enable_reg = 0x82014,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_aggre1_pnoc_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_aggre2_ufs_axi_clk = {
.halt_reg = 0x83014,
.clkr = {
@@ -3474,7 +3460,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GCC_AGGRE0_CNOC_AHB_CLK] = &gcc_aggre0_cnoc_ahb_clk.clkr,
[GCC_SMMU_AGGRE0_AXI_CLK] = &gcc_smmu_aggre0_axi_clk.clkr,
[GCC_SMMU_AGGRE0_AHB_CLK] = &gcc_smmu_aggre0_ahb_clk.clkr,
- [GCC_AGGRE1_PNOC_AHB_CLK] = &gcc_aggre1_pnoc_ahb_clk.clkr,
[GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr,
[GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr,
[GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr,
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index 2dfd6a38339324..3067bdb6e11916 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -80,14 +80,14 @@ config CLK_RK3368
Build the driver for RK3368 Clock Driver.
config CLK_RK3399
- tristate "Rockchip RK3399 clock controller support"
+ bool "Rockchip RK3399 clock controller support"
depends on ARM64 || COMPILE_TEST
default y
help
Build the driver for RK3399 Clock Driver.
config CLK_RK3568
- tristate "Rockchip RK3568 clock controller support"
+ bool "Rockchip RK3568 clock controller support"
depends on ARM64 || COMPILE_TEST
default y
help
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 7924598747b6ba..306910a3a0d384 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -1630,7 +1630,6 @@ static const struct of_device_id clk_rk3399_match_table[] = {
},
{ }
};
-MODULE_DEVICE_TABLE(of, clk_rk3399_match_table);
static int __init clk_rk3399_probe(struct platform_device *pdev)
{
@@ -1656,7 +1655,4 @@ static struct platform_driver clk_rk3399_driver = {
.suppress_bind_attrs = true,
},
};
-module_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe);
-
-MODULE_DESCRIPTION("Rockchip RK3399 Clock Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe);
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 939e7079c33472..69a9e8069a4864 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -1693,7 +1693,6 @@ static const struct of_device_id clk_rk3568_match_table[] = {
},
{ }
};
-MODULE_DEVICE_TABLE(of, clk_rk3568_match_table);
static int __init clk_rk3568_probe(struct platform_device *pdev)
{
@@ -1719,7 +1718,4 @@ static struct platform_driver clk_rk3568_driver = {
.suppress_bind_attrs = true,
},
};
-module_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe);
-
-MODULE_DESCRIPTION("Rockchip RK3568 Clock Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe);
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 12380236d7ab6c..46c66fac48e6a7 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -132,6 +132,10 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-pxs3-clock",
.data = uniphier_pxs3_sys_clk_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-clock",
+ .data = uniphier_nx1_sys_clk_data,
+ },
/* Media I/O clock, SD clock */
{
.compatible = "socionext,uniphier-ld4-mio-clock",
@@ -165,6 +169,10 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-pxs3-sd-clock",
.data = uniphier_pro5_sd_clk_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
+ },
/* Peripheral clock */
{
.compatible = "socionext,uniphier-ld4-peri-clock",
@@ -198,6 +206,15 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-pxs3-peri-clock",
.data = uniphier_pro4_peri_clk_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-peri-clock",
+ .data = uniphier_pro4_peri_clk_data,
+ },
+ /* SoC-glue clock */
+ {
+ .compatible = "socionext,uniphier-pro4-sg-clock",
+ .data = uniphier_pro4_sg_clk_data,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 32b301724183f8..0180470b24db19 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -20,6 +20,10 @@
UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 10), \
UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15)
+#define UNIPHIER_NX1_SYS_CLK_SD \
+ UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 4), \
+ UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 6)
+
#define UNIPHIER_LD4_SYS_CLK_NAND(idx) \
UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 32), \
UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x2104, 2)
@@ -288,6 +292,8 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x210c, 7),
UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x210c, 8),
UNIPHIER_CLK_GATE("sata-phy", 30, NULL, 0x210c, 21),
+ UNIPHIER_LD11_SYS_CLK_AIO(40),
+ UNIPHIER_LD11_SYS_CLK_EXIV(42),
/* CPU gears */
UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8),
@@ -300,3 +306,44 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
"spll/4", "spll/8", "s2pll/4", "s2pll/8"),
{ /* sentinel */ }
};
+
+const struct uniphier_clk_data uniphier_nx1_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("cpll", -1, "ref", 100, 1), /* ARM: 2500 MHz */
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 32, 1), /* 800 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 6),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_NX1_SYS_CLK_SD,
+ UNIPHIER_CLK_GATE("emmc", 4, NULL, 0x2108, 8),
+ UNIPHIER_CLK_GATE("ether", 6, NULL, 0x210c, 0),
+ UNIPHIER_CLK_GATE("usb30-0", 12, NULL, 0x210c, 16), /* =GIO */
+ UNIPHIER_CLK_GATE("usb30-1", 13, NULL, 0x210c, 20), /* =GIO1P */
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x210c, 24),
+ UNIPHIER_CLK_GATE("usb30-ssphy0", 17, NULL, 0x210c, 25),
+ UNIPHIER_CLK_GATE("usb30-ssphy1", 18, NULL, 0x210c, 26),
+ UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 8),
+ UNIPHIER_CLK_GATE("voc", 52, NULL, 0x2110, 0),
+ UNIPHIER_CLK_GATE("hdmitx", 58, NULL, 0x2110, 8),
+ /* CPU gears */
+ UNIPHIER_CLK_DIV5("cpll", 2, 4, 8, 16, 32),
+ UNIPHIER_CLK_CPUGEAR("cpu-ca53", 33, 0x8080, 0xf, 5,
+ "cpll/2", "cpll/4", "cpll/8", "cpll/16",
+ "cpll/32"),
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_pro4_sg_clk_data[] = {
+ UNIPHIER_CLK_DIV("gpll", 4),
+ {
+ .name = "sata-ref",
+ .type = UNIPHIER_CLK_TYPE_MUX,
+ .idx = 0,
+ .data.mux = {
+ .parent_names = { "gpll/4", "ref", },
+ .num_parents = 2,
+ .reg = 0x1a28,
+ .masks = { 0x1, 0x1, },
+ .vals = { 0x0, 0x1, },
+ },
+ },
+ { /* sentinel */ }
+};
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 9e30362e55e13c..dea0c7829aeefd 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -119,6 +119,10 @@ struct uniphier_clk_data {
UNIPHIER_CLK_DIV2(parent, div0, div1), \
UNIPHIER_CLK_DIV2(parent, div2, div3)
+#define UNIPHIER_CLK_DIV5(parent, div0, div1, div2, div3, div4) \
+ UNIPHIER_CLK_DIV4(parent, div0, div1, div2, div3), \
+ UNIPHIER_CLK_DIV(parent, div4)
+
struct clk_hw *uniphier_clk_register_cpugear(struct device *dev,
struct regmap *regmap,
const char *name,
@@ -146,9 +150,11 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_nx1_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld4_mio_clk_data[];
extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[];
extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro4_sg_clk_data[];
#endif /* __CLK_UNIPHIER_H__ */
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 2ea59cb8edcdc9..6437b2e978fb9f 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -67,12 +67,9 @@ static void dma_buf_release(struct dentry *dentry)
BUG_ON(dmabuf->vmapping_counter);
/*
- * Any fences that a dma-buf poll can wait on should be signaled
- * before releasing dma-buf. This is the responsibility of each
- * driver that uses the reservation objects.
- *
- * If you hit this BUG() it means someone dropped their ref to the
- * dma-buf while still having pending operation to the buffer.
+ * If you hit this BUG() it could mean:
+ * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
+ * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
*/
BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
@@ -200,6 +197,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
+ struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
unsigned long flags;
spin_lock_irqsave(&dcb->poll->lock, flags);
@@ -207,21 +205,18 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags);
dma_fence_put(fence);
+ /* Paired with get_file in dma_buf_poll */
+ fput(dmabuf->file);
}
-static bool dma_buf_poll_shared(struct dma_resv *resv,
+static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
struct dma_buf_poll_cb_t *dcb)
{
- struct dma_resv_list *fobj = dma_resv_shared_list(resv);
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
- int i, r;
-
- if (!fobj)
- return false;
+ int r;
- for (i = 0; i < fobj->shared_count; ++i) {
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(resv));
+ dma_resv_for_each_fence(&cursor, resv, write, fence) {
dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
@@ -232,24 +227,6 @@ static bool dma_buf_poll_shared(struct dma_resv *resv,
return false;
}
-static bool dma_buf_poll_excl(struct dma_resv *resv,
- struct dma_buf_poll_cb_t *dcb)
-{
- struct dma_fence *fence = dma_resv_excl_fence(resv);
- int r;
-
- if (!fence)
- return false;
-
- dma_fence_get(fence);
- r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
- if (!r)
- return true;
- dma_fence_put(fence);
-
- return false;
-}
-
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
@@ -282,8 +259,10 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
spin_unlock_irq(&dmabuf->poll.lock);
if (events & EPOLLOUT) {
- if (!dma_buf_poll_shared(resv, dcb) &&
- !dma_buf_poll_excl(resv, dcb))
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
+
+ if (!dma_buf_poll_add_cb(resv, true, dcb))
/* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
else
@@ -303,7 +282,10 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
spin_unlock_irq(&dmabuf->poll.lock);
if (events & EPOLLIN) {
- if (!dma_buf_poll_excl(resv, dcb))
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
+
+ if (!dma_buf_poll_add_cb(resv, false, dcb))
/* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
else
@@ -1356,10 +1338,9 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj;
- struct dma_resv *robj;
- struct dma_resv_list *fobj;
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
- int count = 0, attach_count, shared_count, i;
+ int count = 0, attach_count;
size_t size = 0;
int ret;
@@ -1378,6 +1359,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
if (ret)
goto error_unlock;
+
+ spin_lock(&buf_obj->name_lock);
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
@@ -1385,22 +1368,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
buf_obj->exp_name,
file_inode(buf_obj->file)->i_ino,
buf_obj->name ?: "");
+ spin_unlock(&buf_obj->name_lock);
- robj = buf_obj->resv;
- fence = dma_resv_excl_fence(robj);
- if (fence)
- seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- dma_fence_is_signaled(fence) ? "" : "un");
-
- fobj = rcu_dereference_protected(robj->fence,
- dma_resv_held(robj));
- shared_count = fobj ? fobj->shared_count : 0;
- for (i = 0; i < shared_count; i++) {
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(robj));
- seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
+ dma_resv_for_each_fence(&cursor, buf_obj->resv, true, fence) {
+ seq_printf(s, "\t%s fence: %s %s %ssignalled\n",
+ dma_resv_iter_is_exclusive(&cursor) ?
+ "Exclusive" : "Shared",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
dma_fence_is_signaled(fence) ? "" : "un");
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index a480af9581bd13..9eb2baa387d4af 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -333,10 +333,14 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
{
cursor->seq = read_seqcount_begin(&cursor->obj->seq);
cursor->index = -1;
- if (cursor->all_fences)
+ cursor->shared_count = 0;
+ if (cursor->all_fences) {
cursor->fences = dma_resv_shared_list(cursor->obj);
- else
+ if (cursor->fences)
+ cursor->shared_count = cursor->fences->shared_count;
+ } else {
cursor->fences = NULL;
+ }
cursor->is_restarted = true;
}
@@ -363,7 +367,7 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
continue;
} else if (!cursor->fences ||
- cursor->index >= cursor->fences->shared_count) {
+ cursor->index >= cursor->shared_count) {
cursor->fence = NULL;
break;
@@ -424,6 +428,57 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
/**
+ * dma_resv_iter_first - first fence from a locked dma_resv object
+ * @cursor: cursor to record the current position
+ *
+ * Return the first fence in the dma_resv object while holding the
+ * &dma_resv.lock.
+ */
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
+{
+ struct dma_fence *fence;
+
+ dma_resv_assert_held(cursor->obj);
+
+ cursor->index = 0;
+ if (cursor->all_fences)
+ cursor->fences = dma_resv_shared_list(cursor->obj);
+ else
+ cursor->fences = NULL;
+
+ fence = dma_resv_excl_fence(cursor->obj);
+ if (!fence)
+ fence = dma_resv_iter_next(cursor);
+
+ cursor->is_restarted = true;
+ return fence;
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_first);
+
+/**
+ * dma_resv_iter_next - next fence from a locked dma_resv object
+ * @cursor: cursor to record the current position
+ *
+ * Return the next fences from the dma_resv object while holding the
+ * &dma_resv.lock.
+ */
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
+{
+ unsigned int idx;
+
+ dma_resv_assert_held(cursor->obj);
+
+ cursor->is_restarted = false;
+ if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
+ return NULL;
+
+ idx = cursor->index++;
+ return rcu_dereference_protected(cursor->fences->shared[idx],
+ dma_resv_held(cursor->obj));
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_next);
+
+/**
* dma_resv_copy_fences - Copy all fences from src to dst.
* @dst: the destination reservation object
* @src: the source reservation object
@@ -448,10 +503,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
dma_resv_list_free(list);
dma_fence_put(excl);
- if (cursor.fences) {
- unsigned int cnt = cursor.fences->shared_count;
-
- list = dma_resv_list_alloc(cnt);
+ if (cursor.shared_count) {
+ list = dma_resv_list_alloc(cursor.shared_count);
if (!list) {
dma_resv_iter_end(&cursor);
return -ENOMEM;
@@ -522,7 +575,7 @@ int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
if (fence_excl)
dma_fence_put(*fence_excl);
- count = cursor.fences ? cursor.fences->shared_count : 0;
+ count = cursor.shared_count;
count += fence_excl ? 0 : 1;
/* Eventually re-allocate the array */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2a926d0de42386..0039df26854ba0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -100,11 +100,25 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
This has the potential to use a lot of memory and print some very
large kernel messages. If in doubt, say "N".
+config DRM_DEBUG_MODESET_LOCK
+ bool "Enable backtrace history for lock contention"
+ depends on STACKTRACE_SUPPORT
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ select STACKDEPOT
+ default y if DEBUG_WW_MUTEX_SLOWPATH
+ help
+ Enable debug tracing of failures to gracefully handle drm modeset lock
+ contention. A history of each drm modeset lock path hitting -EDEADLK
+ will be saved until gracefully handled, and the backtrace will be
+ printed when attempting to lock a contended lock.
+
+ If in doubt, say "N".
+
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
- depends on DRM
- depends on FB=y || FB=DRM
- select DRM_KMS_HELPER
+ depends on DRM_KMS_HELPER
+ depends on FB=y || FB=DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 751557af09bbb6..a15a4787c7ee79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -297,7 +297,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd);
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
-void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
#else
static inline
@@ -312,7 +312,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
static inline
-void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
{
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 0e9cfe99ae9ec3..71acd577803ec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -207,7 +207,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
-void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u32 domain = bo->preferred_domains;
@@ -219,6 +219,8 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
}
unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
+
+ kfree(bo->kfd_bo);
}
@@ -734,14 +736,19 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
}
/* Add BO to VM internal data structures */
+ ret = amdgpu_bo_reserve(bo[i], false);
+ if (ret) {
+ pr_debug("Unable to reserve BO during memory attach");
+ goto unwind;
+ }
attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
+ amdgpu_bo_unreserve(bo[i]);
if (unlikely(!attachment[i]->bo_va)) {
ret = -ENOMEM;
pr_err("Failed to add BO object to VM. ret == %d\n",
ret);
goto unwind;
}
-
attachment[i]->va = va;
attachment[i]->pte_flags = get_pte_flags(adev, mem);
attachment[i]->adev = adev;
@@ -757,7 +764,9 @@ unwind:
if (!attachment[i])
continue;
if (attachment[i]->bo_va) {
+ amdgpu_bo_reserve(bo[i], true);
amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
+ amdgpu_bo_unreserve(bo[i]);
list_del(&attachment[i]->list);
}
if (bo[i])
@@ -1568,12 +1577,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
mem->va + bo_size * (1 + mem->aql_queue));
- ret = unreserve_bo_and_vms(&ctx, false, false);
-
/* Remove from VM internal data structures */
list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
kfd_mem_detach(entry);
+ ret = unreserve_bo_and_vms(&ctx, false, false);
+
/* Free the sync object */
amdgpu_sync_free(&mem->sync);
@@ -1600,9 +1609,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
if (mem->dmabuf)
dma_buf_put(mem->dmabuf);
- drm_gem_object_put(&mem->bo->tbo.base);
mutex_destroy(&mem->lock);
- kfree(mem);
+
+ /* If this releases the last reference, it will end up calling
+ * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
+ * this needs to be the last call here.
+ */
+ drm_gem_object_put(&mem->bo->tbo.base);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6e40cc1bc6dcf9..5625f7736e3787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2398,10 +2398,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.pending_reset)
amdgpu_amdkfd_device_init(adev);
- r = amdgpu_amdkfd_resume_iommu(adev);
- if (r)
- goto init_failed;
-
amdgpu_fru_get_product_info(adev);
init_failed:
@@ -3171,11 +3167,21 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC)
-#if defined(CONFIG_DRM_AMD_DC_SI)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
+ /*
+ * We have systems in the wild with these ASICs that require
+ * LVDS and VGA support which is not supported with DC.
+ *
+ * Fallback to the non-DC driver here by default so as not to
+ * cause regressions.
+ */
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ return amdgpu_dc > 0;
+#else
+ return false;
#endif
case CHIP_BONAIRE:
case CHIP_KAVERI:
@@ -4287,8 +4293,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- amdgpu_amdkfd_pre_reset(adev);
-
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
@@ -4850,6 +4854,9 @@ static void amdgpu_device_recheck_guilty_jobs(
/* clear job's guilty and depend the folowing step to decide the real one */
drm_sched_reset_karma(s_job);
+ /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
+ * to make sure fence is balanced */
+ dma_fence_get(s_job->s_fence->parent);
drm_sched_resubmit_jobs_ext(&ring->sched, 1);
ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
@@ -4885,6 +4892,7 @@ retry:
/* got the hw fence, signal finished fence */
atomic_dec(ring->sched.score);
+ dma_fence_put(s_job->s_fence->parent);
dma_fence_get(&s_job->s_fence->finished);
dma_fence_signal(&s_job->s_fence->finished);
dma_fence_put(&s_job->s_fence->finished);
@@ -5020,8 +5028,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
- if (!amdgpu_sriov_vf(tmp_adev))
- amdgpu_amdkfd_pre_reset(tmp_adev);
+ amdgpu_amdkfd_pre_reset(tmp_adev);
/*
* Mark these ASICs to be reseted as untracked first
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index d7c8d9e3c2038a..ff70bc233489f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -867,7 +867,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(2, 0, 2):
case IP_VERSION(2, 2, 0):
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
case IP_VERSION(2, 0, 3):
break;
@@ -881,6 +882,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 16):
+ case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 0, 2):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a573424a6e0b4b..a1e63ba4c54a59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -60,9 +60,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
goto unlock;
}
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT, 1);
- drm_dev_exit(idx);
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
+
+ drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index dfe667ea8b058e..651c7abfde0365 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1423,6 +1423,8 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context;
+ uint8_t smu_minor, smu_debug;
+ uint16_t smu_major;
int ret, i;
static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
@@ -1568,8 +1570,11 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
- seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
- fw_info.feature, fw_info.ver);
+ smu_major = (fw_info.ver >> 16) & 0xffff;
+ smu_minor = (fw_info.ver >> 8) & 0xff;
+ smu_debug = (fw_info.ver >> 0) & 0xff;
+ seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n",
+ fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug);
/* SDMA */
query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index aeb92e5677acec..4fcfc2313b8ce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1274,7 +1274,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
abo = ttm_to_amdgpu_bo(bo);
if (abo->kfd_bo)
- amdgpu_amdkfd_unreserve_memory_limit(abo);
+ amdgpu_amdkfd_release_notify(abo);
/* We only remove the fence if the resv has individualized. */
WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 2658414c503d3e..4f7c70845785a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 64):
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
fw_name = FIRMWARE_SIENNA_CICHLID;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 978ac927ac11db..0fad2bf854ae9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -806,9 +806,9 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev,
xgmi23_pcs_err_status_reg_aldebaran[i]);
- for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
+ for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev,
- xgmi23_pcs_err_status_reg_aldebaran[i]);
+ xgmi3x16_pcs_err_status_reg_aldebaran[i]);
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev,
walf_pcs_err_status_reg_aldebaran[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 90a834dc40084e..e7dfeb466a0e4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -8249,6 +8249,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+
/* not for *_SOC15 */
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
@@ -8263,6 +8266,8 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -8316,11 +8321,8 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 1):
- data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
- WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
- break;
case IP_VERSION(10, 3, 3):
- data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
+ data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 37b4a3db63602c..d17a6f39934703 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3575,12 +3575,16 @@ static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 data;
+ amdgpu_gfx_off_ctrl(adev, false);
+
data = RREG32(mmRLC_SPM_VMID);
data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
WREG32(mmRLC_SPM_VMID, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e0302c23e9a7ec..5f112efda634b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5624,6 +5624,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 data;
+ amdgpu_gfx_off_ctrl(adev, false);
+
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
else
@@ -5636,6 +5638,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
else
WREG32(mmRLC_SPM_VMID, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7f944bb11298e8..b4b80f27b89409 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2462,7 +2462,9 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev);
gfx_v9_0_mec_fini(adev);
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (adev->flags & AMD_IS_APU) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
@@ -5102,6 +5104,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
+ amdgpu_gfx_off_ctrl(adev, false);
+
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
@@ -5115,6 +5119,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index bda1542ef1ddfe..480e41847d7c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -348,6 +348,10 @@ static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
+ if (amdgpu_sriov_vf(adev))
+ /* Avoid write to GMC registers */
+ return;
+
/* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
index 497b86c376c6cc..90f0aefbdb397e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
@@ -54,15 +54,17 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
seg_size = REG_GET_FIELD(
RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE),
MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+ max_region =
+ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, PF_MAX_REGION);
} else {
xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
seg_size = REG_GET_FIELD(
RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+ max_region =
+ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
}
- max_region =
- REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
switch (adev->asic_type) {
@@ -89,9 +91,15 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
return -EINVAL;
- adev->gmc.xgmi.physical_node_id =
- REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
- PF_LFB_REGION);
+ if (adev->asic_type == CHIP_ALDEBARAN) {
+ adev->gmc.xgmi.physical_node_id =
+ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE,
+ PF_LFB_REGION);
+ } else {
+ adev->gmc.xgmi.physical_node_id =
+ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
+ PF_LFB_REGION);
+ }
if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index febc903adf580f..59eafa31c626ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -182,6 +182,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
{
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 64):
if (amdgpu_sriov_vf(adev)) {
if (encode)
*codecs = &sriov_sc_video_codecs_encode;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d5d023a242691f..2d558c2f417df5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -534,6 +534,19 @@ static int uvd_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v6_0_stop(adev);
+
+ return 0;
+}
+
+static int uvd_v6_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -558,17 +571,6 @@ static int uvd_v6_0_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- if (RREG32(mmUVD_STATUS) != 0)
- uvd_v6_0_stop(adev);
-
- return 0;
-}
-
-static int uvd_v6_0_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = uvd_v6_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 0fffaf859c5943..3b119db16003ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -406,7 +406,7 @@ static const struct kfd_device_info aldebaran_device_info = {
static const struct kfd_device_info renoir_device_info = {
.asic_family = CHIP_RENOIR,
.asic_name = "renoir",
- .gfx_target_version = 90002,
+ .gfx_target_version = 90012,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 533b27b35fc987..003ba6a373ff45 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1430,7 +1430,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->sched_running)
return 0;
- if (dqm->is_hws_hang)
+ if (dqm->is_hws_hang || dqm->is_resetting)
return -EIO;
if (!dqm->active_runlist)
return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 2e86692def19a5..d1388896f9c121 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -308,7 +308,7 @@
* 16MB are reserved for kernel use (CWSR trap handler and kernel IB
* for now).
*/
-#define SVM_USER_BASE 0x1000000ull
+#define SVM_USER_BASE (u64)(KFD_CWSR_TBA_TMA_SIZE + 2*PAGE_SIZE)
#define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
#define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 6d8634e40b3b70..9b9c2b9bf2ef08 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -281,6 +281,19 @@ static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
return cpages;
}
+static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
+{
+ unsigned long upages = 0;
+ unsigned long i;
+
+ for (i = 0; i < migrate->npages; i++) {
+ if (migrate->src[i] & MIGRATE_PFN_VALID &&
+ !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
+ upages++;
+ }
+ return upages;
+}
+
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
@@ -317,7 +330,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
svm_migrate_get_vram_page(prange, migrate->dst[i]);
migrate->dst[i] = migrate_pfn(migrate->dst[i]);
- migrate->dst[i] |= MIGRATE_PFN_LOCKED;
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
@@ -610,7 +622,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
- migrate->dst[i] |= MIGRATE_PFN_LOCKED;
j++;
}
@@ -634,10 +645,11 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end)
{
uint64_t npages = (end - start) >> PAGE_SHIFT;
+ unsigned long upages = npages;
+ unsigned long cpages = 0;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
- unsigned long cpages = 0;
dma_addr_t *scratch;
size_t size;
void *buf;
@@ -671,6 +683,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
+ upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free;
}
if (cpages != npages)
@@ -683,8 +696,9 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
scratch, npages);
migrate_vma_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
+ upages = svm_migrate_unsuccessful_pages(&migrate);
+ pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+ upages, cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@@ -698,9 +712,9 @@ out:
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
- return cpages;
+ return upages;
}
- return r;
+ return r ? r : upages;
}
/**
@@ -720,7 +734,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
unsigned long addr;
unsigned long start;
unsigned long end;
- unsigned long cpages = 0;
+ unsigned long upages = 0;
long r = 0;
if (!prange->actual_loc) {
@@ -756,12 +770,12 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
pr_debug("failed %ld to migrate\n", r);
break;
} else {
- cpages += r;
+ upages += r;
}
addr = next;
}
- if (cpages) {
+ if (!upages) {
svm_range_vram_node_free(prange);
prange->actual_loc = 0;
}
@@ -784,7 +798,7 @@ static int
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
struct mm_struct *mm)
{
- int r;
+ int r, retries = 3;
/*
* TODO: for both devices with PCIe large bar or on same xgmi hive, skip
@@ -793,9 +807,14 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
- r = svm_migrate_vram_to_ram(prange, mm);
- if (r)
- return r;
+ do {
+ r = svm_migrate_vram_to_ram(prange, mm);
+ if (r)
+ return r;
+ } while (prange->actual_loc && --retries);
+
+ if (prange->actual_loc)
+ return -EDEADLK;
return svm_migrate_ram_to_vram(prange, best_loc, mm);
}
@@ -840,6 +859,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
pr_debug("failed find process at fault address 0x%lx\n", addr);
return VM_FAULT_SIGBUS;
}
+ if (READ_ONCE(p->svms.faulting_task) == current) {
+ pr_debug("skipping ram migration\n");
+ kfd_unref_process(p);
+ return 0;
+ }
addr >>= PAGE_SHIFT;
pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 4104b167e7215e..94e92c0812db73 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -766,8 +766,10 @@ struct svm_range_list {
struct list_head deferred_range_list;
spinlock_t deferred_list_lock;
atomic_t evicted_ranges;
+ bool drain_pagefaults;
struct delayed_work restore_work;
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
+ struct task_struct *faulting_task;
};
/* Process data */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 457863861d6f37..b993011cfa64a8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1715,7 +1715,11 @@ int kfd_process_evict_queues(struct kfd_process *p)
r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
&pdd->qpd);
- if (r) {
+ /* evict return -EIO if HWS is hang or asic is resetting, in this case
+ * we would like to set all the queues to be in evicted state to prevent
+ * them been add back since they actually not be saved right now.
+ */
+ if (r && r != -EIO) {
pr_err("Failed to evict process queues\n");
goto fail;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index b691c8495d6662..16137c4247bbec 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1496,9 +1496,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
+ WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
addr, npages, &hmm_range,
readonly, true, owner);
+ WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
pr_debug("failed %d to get svm range pages\n", r);
goto unreserve_out;
@@ -2000,20 +2002,28 @@ static void svm_range_deferred_list_work(struct work_struct *work)
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
prange->start, prange->last, prange->work_item.op);
- /* Make sure no stale retry fault coming after range is freed */
- if (prange->work_item.op == SVM_OP_UNMAP_RANGE)
- svm_range_drain_retry_fault(prange->svms);
-
mm = prange->work_item.mm;
+retry:
mmap_write_lock(mm);
mutex_lock(&svms->lock);
- /* Remove from deferred_list must be inside mmap write lock,
+ /* Checking for the need to drain retry faults must be in
+ * mmap write lock to serialize with munmap notifiers.
+ *
+ * Remove from deferred_list must be inside mmap write lock,
* otherwise, svm_range_list_lock_and_flush_work may hold mmap
* write lock, and continue because deferred_list is empty, then
* deferred_list handle is blocked by mmap write lock.
*/
spin_lock(&svms->deferred_list_lock);
+ if (unlikely(svms->drain_pagefaults)) {
+ svms->drain_pagefaults = false;
+ spin_unlock(&svms->deferred_list_lock);
+ mutex_unlock(&svms->lock);
+ mmap_write_unlock(mm);
+ svm_range_drain_retry_fault(svms);
+ goto retry;
+ }
list_del_init(&prange->deferred_list);
spin_unlock(&svms->deferred_list_lock);
@@ -2046,6 +2056,12 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
struct mm_struct *mm, enum svm_work_list_ops op)
{
spin_lock(&svms->deferred_list_lock);
+ /* Make sure pending page faults are drained in the deferred worker
+ * before the range is freed to avoid straggler interrupts on
+ * unmapped memory causing "phantom faults".
+ */
+ if (op == SVM_OP_UNMAP_RANGE)
+ svms->drain_pagefaults = true;
/* if prange is on the deferred list */
if (!list_empty(&prange->deferred_list)) {
pr_debug("update exist prange 0x%p work op %d\n", prange, op);
@@ -2261,7 +2277,7 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
* migration if actual loc is not best location, then update GPU page table
* mapping to the best location.
*
- * If vm fault gpu is range preferred loc, the best_loc is preferred loc.
+ * If the preferred loc is accessible by faulting GPU, use preferred loc.
* If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
* If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
* if range actual loc is cpu, best_loc is cpu
@@ -2278,7 +2294,7 @@ svm_range_best_restore_location(struct svm_range *prange,
struct amdgpu_device *adev,
int32_t *gpuidx)
{
- struct amdgpu_device *bo_adev;
+ struct amdgpu_device *bo_adev, *preferred_adev;
struct kfd_process *p;
uint32_t gpuid;
int r;
@@ -2291,8 +2307,16 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
- if (prange->preferred_loc == gpuid)
+ if (prange->preferred_loc == gpuid ||
+ prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
return prange->preferred_loc;
+ } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
+ preferred_adev = svm_range_get_adev_by_id(prange,
+ prange->preferred_loc);
+ if (amdgpu_xgmi_same_hive(adev, preferred_adev))
+ return prange->preferred_loc;
+ /* fall through */
+ }
if (test_bit(*gpuidx, prange->bitmap_access))
return gpuid;
@@ -2313,7 +2337,8 @@ svm_range_best_restore_location(struct svm_range *prange,
static int
svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
- unsigned long *start, unsigned long *last)
+ unsigned long *start, unsigned long *last,
+ bool *is_heap_stack)
{
struct vm_area_struct *vma;
struct interval_tree_node *node;
@@ -2324,6 +2349,12 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
pr_debug("VMA does not exist in address [0x%llx]\n", addr);
return -EFAULT;
}
+
+ *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
+ vma->vm_end >= vma->vm_mm->start_brk) ||
+ (vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack);
+
start_limit = max(vma->vm_start >> PAGE_SHIFT,
(unsigned long)ALIGN_DOWN(addr, 2UL << 8));
end_limit = min(vma->vm_end >> PAGE_SHIFT,
@@ -2353,9 +2384,9 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
*start = start_limit;
*last = end_limit - 1;
- pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n",
- vma->vm_start >> PAGE_SHIFT, *start,
- vma->vm_end >> PAGE_SHIFT, *last);
+ pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
+ vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
+ *start, *last, *is_heap_stack);
return 0;
}
@@ -2420,11 +2451,13 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
struct svm_range *prange = NULL;
unsigned long start, last;
uint32_t gpuid, gpuidx;
+ bool is_heap_stack;
uint64_t bo_s = 0;
uint64_t bo_l = 0;
int r;
- if (svm_range_get_range_boundaries(p, addr, &start, &last))
+ if (svm_range_get_range_boundaries(p, addr, &start, &last,
+ &is_heap_stack))
return NULL;
r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
@@ -2451,6 +2484,9 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
return NULL;
}
+ if (is_heap_stack)
+ prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
+
svm_range_add_to_svms(prange);
svm_range_add_notifier_locked(mm, prange);
@@ -3076,6 +3112,8 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
struct svm_range *prange =
list_first_entry(&svm_bo->range_list,
struct svm_range, svm_bo_list);
+ int retries = 3;
+
list_del_init(&prange->svm_bo_list);
spin_unlock(&svm_bo->list_lock);
@@ -3083,7 +3121,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
prange->start, prange->last);
mutex_lock(&prange->migrate_mutex);
- svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm);
+ do {
+ svm_migrate_vram_to_ram(prange,
+ svm_bo->eviction_fence->mm);
+ } while (prange->actual_loc && --retries);
+ WARN(prange->actual_loc, "Migration failed during eviction");
mutex_lock(&prange->lock);
prange->svm_bo = NULL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 43e983e42c0f5b..c911b30de6588e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -217,6 +217,7 @@ static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
+static void handle_hpd_rx_irq(void *param);
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
@@ -619,7 +620,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
}
-#endif
+#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
/**
* dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
@@ -669,10 +670,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
return;
}
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
link_index = notify->link_index;
-
link = adev->dm.dc->links[link_index];
drm_connector_list_iter_begin(dev, &iter);
@@ -685,10 +683,13 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
}
}
drm_connector_list_iter_end(&iter);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- if (hpd_aconnector)
- handle_hpd_irq_helper(hpd_aconnector);
+ if (hpd_aconnector) {
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ handle_hpd_irq_helper(hpd_aconnector);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ handle_hpd_rx_irq(hpd_aconnector);
+ }
}
/**
@@ -764,6 +765,10 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
DRM_ERROR("DM: notify type %d invalid!", notify.type);
continue;
}
+ if (!dm->dmub_callback[notify.type]) {
+ DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
+ continue;
+ }
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
@@ -813,7 +818,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
if (count > DMUB_TRACE_MAX_READ)
DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
}
-#endif
+#endif /* CONFIG_DRM_AMD_DC_DCN */
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
@@ -1410,7 +1415,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(2, 1, 0):
init_data.flags.gpu_vm_support = true;
- init_data.flags.disable_dmcu = true;
+ switch (adev->dm.dmcub_fw_version) {
+ case 0: /* development */
+ case 0x1: /* linux-firmware.git hash 6d9f399 */
+ case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
+ init_data.flags.disable_dmcu = false;
+ break;
+ default:
+ init_data.flags.disable_dmcu = true;
+ }
break;
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
@@ -1556,7 +1569,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
-#endif
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ goto error;
+ }
+#endif /* CONFIG_DRM_AMD_DC_DCN */
}
if (amdgpu_dm_initialize_drm_device(adev)) {
@@ -4565,7 +4582,8 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev,
}
-static int fill_dc_scaling_info(const struct drm_plane_state *state,
+static int fill_dc_scaling_info(struct amdgpu_device *adev,
+ const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info)
{
int scale_w, scale_h, min_downscale, max_upscale;
@@ -4579,7 +4597,8 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
/*
* For reasons we don't (yet) fully understand a non-zero
* src_y coordinate into an NV12 buffer can cause a
- * system hang. To avoid hangs (and maybe be overly cautious)
+ * system hang on DCN1x.
+ * To avoid hangs (and maybe be overly cautious)
* let's reject both non-zero src_x and src_y.
*
* We currently know of only one use-case to reproduce a
@@ -4587,10 +4606,10 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
* is to gesture the YouTube Android app into full screen
* on ChromeOS.
*/
- if (state->fb &&
- state->fb->format->format == DRM_FORMAT_NV12 &&
- (scaling_info->src_rect.x != 0 ||
- scaling_info->src_rect.y != 0))
+ if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
+ (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
+ (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
+ (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
return -EINVAL;
scaling_info->src_rect.width = state->src_w >> 16;
@@ -5496,7 +5515,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
int ret;
bool force_disable_dcc = false;
- ret = fill_dc_scaling_info(plane_state, &scaling_info);
+ ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
if (ret)
return ret;
@@ -6070,7 +6089,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
}
-#endif
+#endif /* CONFIG_DRM_AMD_DC_DCN */
/**
* DOC: FreeSync Video
@@ -7241,8 +7260,8 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
- int i, j, clock;
- int vcpi, pbn_div, pbn = 0;
+ int i, j;
+ int vcpi, pbn_div, pbn, slot_num = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -7270,17 +7289,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
if (!stream)
continue;
- if (stream->timing.flags.DSC != 1) {
- drm_dp_mst_atomic_enable_dsc(state,
- aconnector->port,
- dm_conn_state->pbn,
- 0,
- false);
- continue;
- }
-
pbn_div = dm_mst_get_pbn_divider(stream->link);
- clock = stream->timing.pix_clk_100hz / 10;
/* pbn is calculated by compute_mst_dsc_configs_for_state*/
for (j = 0; j < dc_state->stream_count; j++) {
if (vars[j].aconnector == aconnector) {
@@ -7289,6 +7298,23 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
}
}
+ if (j == dc_state->stream_count)
+ continue;
+
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+
+ if (stream->timing.flags.DSC != 1) {
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = slot_num;
+
+ drm_dp_mst_atomic_enable_dsc(state,
+ aconnector->port,
+ dm_conn_state->pbn,
+ 0,
+ false);
+ continue;
+ }
+
vcpi = drm_dp_mst_atomic_enable_dsc(state,
aconnector->port,
pbn, pbn_div,
@@ -7552,7 +7578,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
if (ret)
return ret;
- ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
+ ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
if (ret)
return ret;
@@ -9000,7 +9026,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
}
- fill_dc_scaling_info(new_plane_state,
+ fill_dc_scaling_info(dm->adev, new_plane_state,
&bundle->scaling_infos[planes_count]);
bundle->surface_updates[planes_count].scaling_info =
@@ -10787,7 +10813,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
- return ret;
+ goto fail;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 3655663e079bae..9d43ecb1f692dd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -78,12 +78,10 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
wr_buf_ptr = wr_buf;
- r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
-
- /* r is bytes not be copied */
- if (r >= wr_buf_size) {
- DRM_DEBUG_DRIVER("user data not be read\n");
- return -EINVAL;
+ /* r is bytes not be copied */
+ if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) {
+ DRM_DEBUG_DRIVER("user data could not be read successfully\n");
+ return -EFAULT;
}
/* check number of parameters. isspace could not differ space and \n */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 874a49b605c782..32a5ce09a62a9b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -534,13 +534,14 @@ static int kbps_to_peak_pbn(int kbps)
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars,
- int count)
+ int count,
+ int k)
{
int i;
for (i = 0; i < count; i++) {
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
- if (vars[i].dsc_enabled && dc_dsc_compute_config(
+ if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
@@ -553,7 +554,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
if (params[i].bpp_overwrite)
params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
else
- params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
+ params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16;
if (params[i].num_slices_h)
params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
@@ -586,7 +587,8 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
struct dc_link *dc_link,
struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars,
- int count)
+ int count,
+ int k)
{
int i;
bool bpp_increased[MAX_PIPES];
@@ -601,8 +603,9 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
for (i = 0; i < count; i++) {
- if (vars[i].dsc_enabled) {
- initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
+ if (vars[i + k].dsc_enabled) {
+ initial_slack[i] =
+ kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -629,7 +632,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
link_timeslots_used = 0;
for (i = 0; i < count; i++)
- link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
+ link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot);
fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
@@ -682,7 +685,8 @@ static void try_disable_dsc(struct drm_atomic_state *state,
struct dc_link *dc_link,
struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars,
- int count)
+ int count,
+ int k)
{
int i;
bool tried[MAX_PIPES];
@@ -692,8 +696,8 @@ static void try_disable_dsc(struct drm_atomic_state *state,
int remaining_to_try = 0;
for (i = 0; i < count; i++) {
- if (vars[i].dsc_enabled
- && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16
+ if (vars[i + k].dsc_enabled
+ && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16
&& params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
tried[i] = false;
@@ -748,9 +752,10 @@ static void try_disable_dsc(struct drm_atomic_state *state,
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dc_link *dc_link,
- struct dsc_mst_fairness_vars *vars)
+ struct dsc_mst_fairness_vars *vars,
+ int *link_vars_start_index)
{
- int i;
+ int i, k;
struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
@@ -768,11 +773,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (stream->link != dc_link)
continue;
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ if (!aconnector)
+ continue;
+
+ if (!aconnector->port)
+ continue;
+
stream->timing.flags.DSC = 0;
params[count].timing = &stream->timing;
params[count].sink = stream->sink;
- aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
params[count].aconnector = aconnector;
params[count].port = aconnector->port;
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
@@ -794,44 +805,55 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
count++;
}
+
+ if (count == 0) {
+ ASSERT(0);
+ return true;
+ }
+
+ /* k is start index of vars for current phy link used by mst hub */
+ k = *link_vars_start_index;
+ /* set vars start index for next mst hub phy link */
+ *link_vars_start_index += count;
+
/* Try no compression */
for (i = 0; i < count; i++) {
- vars[i].aconnector = params[i].aconnector;
- vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
- vars[i].dsc_enabled = false;
- vars[i].bpp_x16 = 0;
+ vars[i + k].aconnector = params[i].aconnector;
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].dsc_enabled = false;
+ vars[i + k].bpp_x16 = 0;
if (drm_dp_atomic_find_vcpi_slots(state,
params[i].port->mgr,
params[i].port,
- vars[i].pbn,
+ vars[i + k].pbn,
dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
}
if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
- set_dsc_configs_from_fairness_vars(params, vars, count);
+ set_dsc_configs_from_fairness_vars(params, vars, count, k);
return true;
}
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
- vars[i].dsc_enabled = true;
- vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+ vars[i + k].dsc_enabled = true;
+ vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
if (drm_dp_atomic_find_vcpi_slots(state,
params[i].port->mgr,
params[i].port,
- vars[i].pbn,
+ vars[i + k].pbn,
dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
} else {
- vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
- vars[i].dsc_enabled = false;
- vars[i].bpp_x16 = 0;
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].dsc_enabled = false;
+ vars[i + k].bpp_x16 = 0;
if (drm_dp_atomic_find_vcpi_slots(state,
params[i].port->mgr,
params[i].port,
- vars[i].pbn,
+ vars[i + k].pbn,
dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
}
@@ -840,15 +862,76 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return false;
/* Optimize degree of compression */
- increase_dsc_bpp(state, dc_link, params, vars, count);
+ increase_dsc_bpp(state, dc_link, params, vars, count, k);
- try_disable_dsc(state, dc_link, params, vars, count);
+ try_disable_dsc(state, dc_link, params, vars, count, k);
- set_dsc_configs_from_fairness_vars(params, vars, count);
+ set_dsc_configs_from_fairness_vars(params, vars, count, k);
return true;
}
+static bool is_dsc_need_re_compute(
+ struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dc_link *dc_link)
+{
+ int i;
+ bool is_dsc_need_re_compute = false;
+
+ /* only check phy used by mst branch */
+ if (dc_link->type != dc_connection_mst_branch)
+ return false;
+
+ /* check if there is mode change in new request */
+ for (i = 0; i < dc_state->stream_count; i++) {
+ struct amdgpu_dm_connector *aconnector;
+ struct dc_stream_state *stream;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_connector_state *new_conn_state;
+
+ stream = dc_state->streams[i];
+
+ if (!stream)
+ continue;
+
+ /* check if stream using the same link for mst */
+ if (stream->link != dc_link)
+ continue;
+
+ aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
+ if (!aconnector)
+ continue;
+
+ new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
+
+ if (!new_conn_state)
+ continue;
+
+ if (IS_ERR(new_conn_state))
+ continue;
+
+ if (!new_conn_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (IS_ERR(new_crtc_state))
+ continue;
+
+ if (new_crtc_state->enable && new_crtc_state->active) {
+ if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
+ new_crtc_state->connectors_changed)
+ is_dsc_need_re_compute = true;
+ }
+ }
+
+ return is_dsc_need_re_compute;
+}
+
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
@@ -857,6 +940,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_stream_state *stream;
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
+ int link_vars_start_index = 0;
for (i = 0; i < dc_state->stream_count; i++)
computed_streams[i] = false;
@@ -881,8 +965,12 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return false;
+ if (!is_dsc_need_re_compute(state, dc_state, stream->link))
+ continue;
+
mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link,
+ vars, &link_vars_start_index)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 12e5470fa567ff..0ded4decee05fd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1085,6 +1085,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
bool should_disable = true;
+ bool pipe_split_change =
+ context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
for (j = 0; j < context->stream_count; j++) {
if (old_stream == context->streams[j]) {
@@ -1092,6 +1094,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
break;
}
}
+ if (!should_disable && pipe_split_change)
+ should_disable = true;
+
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
@@ -1887,6 +1892,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
return false;
}
+#ifdef CONFIG_DRM_AMD_DC_DCN
/* Perform updates here which need to be deferred until next vupdate
*
* i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
@@ -1896,7 +1902,6 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
*/
static void process_deferred_updates(struct dc *dc)
{
-#ifdef CONFIG_DRM_AMD_DC_DCN
int i = 0;
if (dc->debug.enable_mem_low_power.bits.cm) {
@@ -1905,8 +1910,8 @@ static void process_deferred_updates(struct dc *dc)
if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
}
-#endif
}
+#endif /* CONFIG_DRM_AMD_DC_DCN */
void dc_post_update_surfaces_to_stream(struct dc *dc)
{
@@ -1933,7 +1938,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+#ifdef CONFIG_DRM_AMD_DC_DCN
process_deferred_updates(dc);
+#endif
dc->hwss.optimize_bandwidth(dc, context);
@@ -3603,7 +3610,8 @@ bool dc_enable_dmub_notifications(struct dc *dc)
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
- dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0)
+ dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+ !dc->debug.dpia_debug.bits.disable_dpia)
return true;
#endif
/* dmub aux needs dmub notifications to be enabled */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 2796bdd17de18d..60544788e911ee 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -4279,6 +4279,8 @@ void core_link_enable_stream(
*/
if (status != DC_FAIL_DP_LINK_TRAINING ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (false == stream->link->link_status.link_active)
+ disable_link(stream->link, pipe_ctx->stream->signal);
BREAK_TO_DEBUGGER();
return;
}
@@ -4768,7 +4770,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
timing->dsc_cfg.bits_per_pixel,
timing->dsc_cfg.num_slices_h,
timing->dsc_cfg.is_dp);
-#endif
+#endif /* CONFIG_DRM_AMD_DC_DCN */
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cc25ba0ec7db0d..cb7bf9148904ed 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -5329,6 +5329,14 @@ bool dc_link_dp_set_test_pattern(
return false;
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE)
+ core_link_write_dpcd(link,
+ DP_LINK_SQUARE_PATTERN,
+ p_custom_pattern,
+ 1);
+
+#endif
/* tell receiver that we are sending qualification
* pattern DP 1.2 or later - DP receiver's link quality
* pattern is set using DPCD LINK_QUAL_LANEx_SET
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 72b0f8594b4a36..25e48a8cbb78d2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -236,6 +236,23 @@ static struct link_encoder *get_link_enc_used_by_link(
return link_enc;
}
+/* Clear all link encoder assignments. */
+static void clear_enc_assignments(struct dc_state *state)
+{
+ int i;
+ enum engine_id eng_id;
+ struct dc_stream_state *stream;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false;
+ eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id;
+ stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream;
+ if (eng_id != ENGINE_ID_UNKNOWN)
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id;
+ if (stream)
+ stream->link_enc = NULL;
+ }
+}
void link_enc_cfg_init(
struct dc *dc,
@@ -250,6 +267,8 @@ void link_enc_cfg_init(
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
}
+ clear_enc_assignments(state);
+
state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
@@ -265,6 +284,9 @@ void link_enc_cfg_link_encs_assign(
ASSERT(state->stream_count == stream_count);
+ if (stream_count == 0)
+ clear_enc_assignments(state);
+
/* Release DIG link encoder resources before running assignment algorithm. */
for (i = 0; i < stream_count; i++)
dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index a5339796902ae8..3aac3f4a285256 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.159"
+#define DC_VER "3.2.160"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -675,6 +675,7 @@ struct dc_debug_options {
#endif
union mem_low_power_enable_options enable_mem_low_power;
union root_clock_optimization_options root_clock_optimization;
+ bool hpo_optimization;
bool force_vblank_alignment;
/* Enable dmub aux for legacy ddc */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index bc87ea0adf94c3..e68e9a86a4d9d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -898,6 +898,9 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
#endif
+#ifndef DP_LINK_SQUARE_PATTERN
+#define DP_LINK_SQUARE_PATTERN 0x10F
+#endif
#ifndef DP_DSC_CONFIGURATION
#define DP_DSC_CONFIGURATION 0x161
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 989f5b6907e21f..a3fee929cd120f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -671,6 +671,7 @@ struct dce_hwseq_registers {
uint32_t MC_VM_FB_LOCATION_BASE;
uint32_t MC_VM_FB_LOCATION_TOP;
uint32_t MC_VM_FB_OFFSET;
+ uint32_t HPO_TOP_HW_CONTROL;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -1152,7 +1153,8 @@ struct dce_hwseq_registers {
type DOMAIN_PGFSM_PWR_STATUS;\
type HPO_HDMISTREAMCLK_G_GATE_DIS;\
type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\
- type I2C_LIGHT_SLEEP_FORCE;
+ type I2C_LIGHT_SLEEP_FORCE;\
+ type HPO_IO_EN;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index af3e68d3e74760..24e47df526f6a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1244,6 +1244,12 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
#endif
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc->hwseq->funcs.setup_hpo_hw_control && is_dp_128b_132b_signal(pipe_ctx))
+ dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, false);
+#endif
+
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index a25732d0722283..0b788d794fb334 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -231,7 +231,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
- "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
+ " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
" %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index cfee456c6c9ab7..4f88376a118f81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -2397,6 +2397,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
* BY this, it is logic clean to separate stream and link
*/
if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control)
+ pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control(
+ pipe_ctx->stream->ctx->dc->hwseq, true);
setup_dp_hpo_stream(pipe_ctx, true);
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream(
pipe_ctx->stream_res.hpo_dp_stream_enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index a82319f4d081d4..95149734378baa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1381,13 +1381,11 @@ int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
}
-static void mpc3_mpc_init(struct mpc *mpc)
+static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
- mpc1_mpc_init(mpc);
-
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) {
REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3);
@@ -1405,7 +1403,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
- .mpc_init = mpc3_mpc_init,
+ .mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
@@ -1432,6 +1430,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
+ .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode,
};
void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index e50c695e3c9603..79a66e0c430392 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -2128,10 +2128,10 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
int pipe_cnt,
int vlevel)
{
+ int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
int i, pipe_idx;
- double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
- dm_dram_clock_change_unsupported;
+ double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
@@ -2207,6 +2207,7 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
}
+
context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index d24ad7754d710a..5dd1ce9ddb539a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -66,6 +66,45 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
+static void enable_memory_low_power(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (dc->debug.enable_mem_low_power.bits.dmcu) {
+ // Force ERAM to shutdown if DMCU is not enabled
+ if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
+ REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
+ }
+ }
+
+ // Set default OPTC memory power states
+ if (dc->debug.enable_mem_low_power.bits.optc) {
+ // Shutdown when unassigned and light sleep in VBLANK
+ REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
+ }
+
+ if (dc->debug.enable_mem_low_power.bits.vga) {
+ // Power down VGA memory
+ REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
+ }
+
+ if (dc->debug.enable_mem_low_power.bits.mpc)
+ dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
+
+
+ if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
+ // Power down VPGs
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++)
+ dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
+ dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
+#endif
+ }
+
+}
+
void dcn31_init_hw(struct dc *dc)
{
struct abm **abms = dc->res_pool->multiple_abms;
@@ -108,35 +147,7 @@ void dcn31_init_hw(struct dc *dc)
if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
- if (dc->debug.enable_mem_low_power.bits.dmcu) {
- // Force ERAM to shutdown if DMCU is not enabled
- if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
- REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
- }
- }
-
- // Set default OPTC memory power states
- if (dc->debug.enable_mem_low_power.bits.optc) {
- // Shutdown when unassigned and light sleep in VBLANK
- REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
- }
-
- if (dc->debug.enable_mem_low_power.bits.vga) {
- // Power down VGA memory
- REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
- }
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
- // Power down VPGs
- for (i = 0; i < dc->res_pool->stream_enc_count; i++)
- dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
-#if defined(CONFIG_DRM_AMD_DC_DP2_0)
- for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
- dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
-#endif
- }
-#endif
+ enable_memory_low_power(dc);
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
@@ -264,6 +275,9 @@ void dcn31_init_hw(struct dc *dc)
if (dc->debug.enable_mem_low_power.bits.i2c)
REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1);
+ if (hws->funcs.setup_hpo_hw_control)
+ hws->funcs.setup_hpo_hw_control(hws, false);
+
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
@@ -597,3 +611,9 @@ void dcn31_reset_hw_ctx_wrap(
/* New dc_state in the process of being applied to hardware. */
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT;
}
+
+void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
+{
+ if (hws->ctx->dc->debug.hpo_optimization)
+ REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index 7ae45dd202d9bb..edfc01d6ad7378 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -54,5 +54,6 @@ void dcn31_reset_hw_ctx_wrap(
bool dcn31_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
+void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index c6a737781ad18c..05335a8c3c2dcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -137,6 +137,7 @@ static const struct hwseq_private_funcs dcn31_private_funcs = {
.dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn30_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
+ .setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
};
void dcn31_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 87b2c242884290..18896294ae12e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -860,7 +860,8 @@ static const struct dccg_mask dccg_mask = {
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
SR(AZALIA_AUDIO_DTO), \
- SR(AZALIA_CONTROLLER_CLOCK_GATING)
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ SR(HPO_TOP_HW_CONTROL)
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN31_REG_LIST()
@@ -898,7 +899,8 @@ static const struct dce_hwseq_registers hwseq_reg = {
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \
- HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh)
+ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
+ HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN31_MASK_SH_LIST(__SHIFT)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index e3d9f1decdfc70..f47d82da115c9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3576,16 +3576,9 @@ static double TruncToValidBPP(
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
- if (Output == dm_hdmi) {
- NonDSCBPP0 = 24;
- NonDSCBPP1 = 24;
- NonDSCBPP2 = 24;
- }
- else {
- NonDSCBPP0 = 16;
- NonDSCBPP1 = 20;
- NonDSCBPP2 = 24;
- }
+ NonDSCBPP0 = 16;
+ NonDSCBPP1 = 20;
+ NonDSCBPP2 = 24;
if (Format == dm_n422) {
MinDSCBPP = 7;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index d58925cff420ed..7e937bdcea00f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -3892,15 +3892,11 @@ static double TruncToValidBPP(
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
- if (Output == dm_hdmi) {
- NonDSCBPP0 = 24;
- NonDSCBPP1 = 24;
- NonDSCBPP2 = 24;
- } else {
- NonDSCBPP0 = 16;
- NonDSCBPP1 = 20;
- NonDSCBPP2 = 24;
- }
+
+ NonDSCBPP0 = 16;
+ NonDSCBPP1 = 20;
+ NonDSCBPP2 = 24;
+
if (Format == dm_n422) {
MinDSCBPP = 7;
MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 04d6ec3f021f08..f5fd2a06732306 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -367,6 +367,7 @@ struct mpc_funcs {
void (*set_bg_color)(struct mpc *mpc,
struct tg_color *bg_color,
int mpcc_id);
+ void (*set_mpc_mem_lp_mode)(struct mpc *mpc);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index f324285394be9c..c2008258c50a55 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -143,6 +143,7 @@ struct hwseq_private_funcs {
const struct dc_plane_state *plane_state);
void (*PLAT_58856_wa)(struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+ void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 717c0e572d2feb..cd204eef073b0b 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -238,6 +238,7 @@ struct dmub_srv_hw_params {
bool load_inst_const;
bool skip_panel_power_sequence;
bool disable_z10;
+ bool power_optimization;
bool dpia_supported;
bool disable_dpia;
};
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 0293c58f0701f3..c29a67ccef17a3 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -46,10 +46,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x9525efb5
+#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 90
+#define DMUB_FW_VERSION_REVISION 91
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 10ebf20eaa4159..fa0569174aecf5 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -340,6 +340,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
+ boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 49fe4155c374d9..41472ed992530c 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2094,6 +2094,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
+ if (!adev->powerplay.pp_funcs->get_power_profile_mode ||
+ amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
+ *states = ATTR_STATE_UNSUPPORTED;
}
switch (asic_type) {
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h
index 1d3447991d0cbf..fc9198846e7036 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h
@@ -51,7 +51,7 @@
#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default
#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
-#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Set active work load type
+#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Deprecated (Not to be used)
#define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF
#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
@@ -63,7 +63,7 @@
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)
-#define PPSMC_MSG_SPARE0 0x16 ///< Spared
+#define PPSMC_MSG_SPARE 0x16 ///< Spare
#define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency
#define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 321215003643b3..8d796ed3b7d16f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -875,34 +875,30 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
static int pp_get_power_profile_mode(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
+ int ret;
- if (!hwmgr || !hwmgr->pm_en || !buf)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
+ return -EOPNOTSUPP;
+ if (!buf)
return -EINVAL;
- if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return snprintf(buf, PAGE_SIZE, "\n");
- }
-
- return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
+ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
}
static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = -EINVAL;
+ int ret = -EOPNOTSUPP;
- if (!hwmgr || !hwmgr->pm_en)
- return ret;
-
- if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
return ret;
- }
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("power profile setting is for manual dpm mode only.\n");
- return ret;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 1de3ae77e03ed9..258c573acc9798 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1024,6 +1024,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t min_freq, max_freq = 0;
uint32_t ret = 0;
+ phm_get_sysfs_buf(&buf, &size);
+
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
@@ -1065,7 +1067,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -1081,7 +1083,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
min_freq, max_freq);
}
@@ -1456,6 +1458,8 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf)
return -EINVAL;
+ phm_get_sysfs_buf(&buf, &size);
+
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
title[1], title[2], title[3], title[4], title[5]);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e7803ce8f67aa4..aceebf58422530 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -4914,6 +4914,8 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
int size = 0;
uint32_t i, now, clock, pcie_speed;
+ phm_get_sysfs_buf(&buf, &size);
+
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
@@ -4963,7 +4965,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
for (i = 0; i < odn_sclk_table->num_of_pl; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
i, odn_sclk_table->entries[i].clock/100,
@@ -4972,7 +4974,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
- size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
for (i = 0; i < odn_mclk_table->num_of_pl; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
i, odn_mclk_table->entries[i].clock/100,
@@ -4981,7 +4983,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
@@ -5518,6 +5520,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf)
return -EINVAL;
+ phm_get_sysfs_buf(&buf, &size);
+
size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
title[0], title[1], title[2], title[3],
title[4], title[5], title[6], title[7]);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index b94a77e4e71473..8e28a8eecefc64 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1550,6 +1550,8 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t i, now;
int size = 0;
+ phm_get_sysfs_buf(&buf, &size);
+
switch (type) {
case PP_SCLK:
now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
index ad33983a8064e9..2a75da1e9f0359 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
@@ -109,6 +109,19 @@ int phm_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+/*
+ * Helper function to make sysfs_emit_at() happy. Align buf to
+ * the current page boundary and record the offset.
+ */
+static inline void phm_get_sysfs_buf(char **buf, int *offset)
+{
+ if (!*buf || !offset)
+ return;
+
+ *offset = offset_in_page(*buf);
+ *buf -= *offset;
+}
+
int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr);
void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index c152a61ddd2c91..c981fc2882f017 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -4548,6 +4548,8 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
int ret = 0;
int size = 0;
+ phm_get_sysfs_buf(&buf, &size);
+
ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret,
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
@@ -4637,6 +4639,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0, count = 0;
+ phm_get_sysfs_buf(&buf, &size);
+
switch (type) {
case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled)
@@ -4717,7 +4721,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
@@ -4727,7 +4731,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
- size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
for (i = 0; i < podn_vdd_dep->count; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
@@ -4737,7 +4741,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
@@ -5112,6 +5116,8 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf)
return -EINVAL;
+ phm_get_sysfs_buf(&buf, &size);
+
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
title[1], title[2], title[3], title[4], title[5]);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index 8558718e15a8f9..f7e783e1c888f3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -2141,6 +2141,8 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
int ret = 0;
int size = 0;
+ phm_get_sysfs_buf(&buf, &size);
+
ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret,
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
@@ -2244,6 +2246,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0;
struct pp_clock_levels_with_latency clocks;
+ phm_get_sysfs_buf(&buf, &size);
+
switch (type) {
case PP_SCLK:
PP_ASSERT_WITH_CODE(
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 0cf39c1244b1c3..03e63be4ee2756 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -3238,6 +3238,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
int ret = 0;
int size = 0;
+ phm_get_sysfs_buf(&buf, &size);
+
ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret,
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
@@ -3364,6 +3366,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
int ret = 0;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
+ phm_get_sysfs_buf(&buf, &size);
+
switch (type) {
case PP_SCLK:
ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
@@ -3479,7 +3483,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
od_table->GfxclkFmin);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -3489,7 +3493,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_MCLK:
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
- size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
od_table->UclkFmax);
}
@@ -3503,7 +3507,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
- size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
od_table->GfxclkFreq1,
od_table->GfxclkVolt1 / VOLTAGE_SCALE);
@@ -3518,7 +3522,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_RANGE:
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
@@ -4003,6 +4007,8 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf)
return -EINVAL;
+ phm_get_sysfs_buf(&buf, &size);
+
size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9], title[10]);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index b06c59dcc1b46d..01168b8955bff3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
dev_err(adev->dev, "Failed to disable smu features.\n");
}
- if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0) &&
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
@@ -2534,13 +2534,15 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
struct smu_context *smu = handle;
int ret = 0;
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
+ !smu->ppt_funcs->get_power_profile_mode)
return -EOPNOTSUPP;
+ if (!buf)
+ return -EINVAL;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->get_power_profile_mode)
- ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
+ ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
mutex_unlock(&smu->mutex);
@@ -2554,7 +2556,8 @@ static int smu_set_power_profile_mode(void *handle,
struct smu_context *smu = handle;
int ret = 0;
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
+ !smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index a403657151ba19..8215bbf5ed7c2a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -64,7 +64,6 @@ static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
- MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
@@ -135,14 +134,6 @@ static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = {
TAB_MAP_VALID(CUSTOM_DPM),
TAB_MAP_VALID(DPMCLOCKS),
};
-
-static struct cmn2asic_mapping yellow_carp_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
-};
static int yellow_carp_init_smc_tables(struct smu_context *smu)
{
@@ -543,81 +534,6 @@ static int yellow_carp_set_watermarks_table(struct smu_context *smu,
return 0;
}
-static int yellow_carp_get_power_profile_mode(struct smu_context *smu,
- char *buf)
-{
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
- uint32_t i, size = 0;
- int16_t workload_type = 0;
-
- if (!buf)
- return -EINVAL;
-
- for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
- /*
- * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT.
- * Not all profile modes are supported on yellow carp.
- */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- i);
-
- if (workload_type < 0)
- continue;
-
- size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
- }
-
- return size;
-}
-
-static int yellow_carp_set_power_profile_mode(struct smu_context *smu,
- long *input, uint32_t size)
-{
- int workload_type, ret;
- uint32_t profile_mode = input[size];
-
- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
- return -EINVAL;
- }
-
- if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
- return 0;
-
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- profile_mode);
- if (workload_type < 0) {
- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on YELLOWCARP\n",
- profile_mode);
- return -EINVAL;
- }
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- 1 << workload_type,
- NULL);
- if (ret) {
- dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
- workload_type);
- return ret;
- }
-
- smu->power_profile_mode = profile_mode;
-
- return 0;
-}
-
static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1238,8 +1154,6 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
.read_sensor = yellow_carp_read_sensor,
.is_dpm_running = yellow_carp_is_dpm_running,
.set_watermarks_table = yellow_carp_set_watermarks_table,
- .get_power_profile_mode = yellow_carp_get_power_profile_mode,
- .set_power_profile_mode = yellow_carp_set_power_profile_mode,
.get_gpu_metrics = yellow_carp_get_gpu_metrics,
.get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
@@ -1261,6 +1175,5 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu)
smu->message_map = yellow_carp_message_map;
smu->feature_map = yellow_carp_feature_mask_map;
smu->table_map = yellow_carp_table_map;
- smu->workload_map = yellow_carp_workload_map;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 3cac16db970f0b..010657ea7af782 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -167,9 +167,10 @@ static void lt9611uxc_hpd_work(struct work_struct *work)
struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
bool connected;
- if (lt9611uxc->connector.dev)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- else {
+ if (lt9611uxc->connector.dev) {
+ if (lt9611uxc->connector.dev->mode_config.funcs)
+ drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
+ } else {
mutex_lock(&lt9611uxc->ocm_lock);
connected = lt9611uxc->hdmi_connected;
@@ -339,6 +340,8 @@ static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc
return -ENODEV;
}
+ lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
drm_connector_helper_add(&lt9611uxc->connector,
&lt9611uxc_bridge_connector_helper_funcs);
ret = drm_connector_init(bridge->dev, &lt9611uxc->connector,
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index dcf579a4cf833b..ad460b96c0a3b7 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
@@ -22,6 +23,7 @@ struct lvds_codec {
struct regulator *vcc;
struct gpio_desc *powerdown_gpio;
u32 connector_type;
+ unsigned int bus_format;
};
static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
@@ -74,12 +76,50 @@ static const struct drm_bridge_funcs funcs = {
.disable = lvds_codec_disable,
};
+#define MAX_INPUT_SEL_FORMATS 1
+static u32 *
+lvds_codec_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = lvds_codec->bus_format;
+ *num_input_fmts = MAX_INPUT_SEL_FORMATS;
+
+ return input_fmts;
+}
+
+static const struct drm_bridge_funcs funcs_decoder = {
+ .attach = lvds_codec_attach,
+ .enable = lvds_codec_enable,
+ .disable = lvds_codec_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_get_input_bus_fmts = lvds_codec_atomic_get_input_bus_fmts,
+};
+
static int lvds_codec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *panel_node;
+ struct device_node *bus_node;
struct drm_panel *panel;
struct lvds_codec *lvds_codec;
+ const char *mapping;
+ int ret;
lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
if (!lvds_codec)
@@ -119,13 +159,47 @@ static int lvds_codec_probe(struct platform_device *pdev)
if (IS_ERR(lvds_codec->panel_bridge))
return PTR_ERR(lvds_codec->panel_bridge);
+ lvds_codec->bridge.funcs = &funcs;
+
+ /*
+ * Decoder input LVDS format is a property of the decoder chip or even
+ * its strapping. Handle data-mapping the same way lvds-panel does. In
+ * case data-mapping is not present, do nothing, since there are still
+ * legacy bindings which do not specify this property.
+ */
+ if (lvds_codec->connector_type != DRM_MODE_CONNECTOR_LVDS) {
+ bus_node = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+ if (!bus_node) {
+ dev_dbg(dev, "bus DT node not found\n");
+ return -ENXIO;
+ }
+
+ ret = of_property_read_string(bus_node, "data-mapping",
+ &mapping);
+ of_node_put(bus_node);
+ if (ret < 0) {
+ dev_warn(dev, "missing 'data-mapping' DT property\n");
+ } else {
+ if (!strcmp(mapping, "jeida-18")) {
+ lvds_codec->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG;
+ } else if (!strcmp(mapping, "jeida-24")) {
+ lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
+ } else if (!strcmp(mapping, "vesa-24")) {
+ lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
+ } else {
+ dev_err(dev, "invalid 'data-mapping' DT property\n");
+ return -EINVAL;
+ }
+ lvds_codec->bridge.funcs = &funcs_decoder;
+ }
+ }
+
/*
* The panel_bridge bridge is attached to the panel's of_node,
* but we need a bridge attached to our of_node for our user
* to look up.
*/
lvds_codec->bridge.of_node = dev->of_node;
- lvds_codec->bridge.funcs = &funcs;
drm_bridge_add(&lvds_codec->bridge);
platform_set_drvdata(pdev, lvds_codec);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index ed8ac5059cd265..a7389a0facfb4e 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -939,6 +939,40 @@ static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0);
}
+static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts, input_fmt;
+
+ *num_input_fmts = 0;
+
+ switch (output_fmt) {
+ /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
+ case MEDIA_BUS_FMT_FIXED:
+ input_fmt = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ input_fmt = output_fmt;
+ break;
+ default:
+ return NULL;
+ }
+
+ input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+ input_fmts[0] = input_fmt;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -946,6 +980,7 @@ static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
.atomic_check = nwl_dsi_bridge_atomic_check,
.atomic_enable = nwl_dsi_bridge_atomic_enable,
.atomic_disable = nwl_dsi_bridge_atomic_disable,
+ .atomic_get_input_bus_fmts = nwl_bridge_atomic_get_input_bus_fmts,
.mode_set = nwl_dsi_bridge_mode_set,
.mode_valid = nwl_dsi_bridge_mode_valid,
.attach = nwl_dsi_bridge_attach,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index a32f70bc68ea4e..ba1160ec6d6e8a 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -288,6 +288,19 @@ err_dsi_attach:
return ret;
}
+static void sn65dsi83_detach(struct drm_bridge *bridge)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+ if (!ctx->dsi)
+ return;
+
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+ drm_bridge_remove(&ctx->bridge);
+ ctx->dsi = NULL;
+}
+
static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
@@ -583,6 +596,7 @@ sn65dsi83_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs sn65dsi83_funcs = {
.attach = sn65dsi83_attach,
+ .detach = sn65dsi83_detach,
.atomic_pre_enable = sn65dsi83_atomic_pre_enable,
.atomic_enable = sn65dsi83_atomic_enable,
.atomic_disable = sn65dsi83_atomic_disable,
@@ -697,9 +711,6 @@ static int sn65dsi83_remove(struct i2c_client *client)
{
struct sn65dsi83 *ctx = i2c_get_clientdata(client);
- mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
- drm_bridge_remove(&ctx->bridge);
of_node_put(ctx->host_node);
return 0;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 3bc782b630b9eb..52e20c68813b1d 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -625,6 +625,8 @@ int drm_connector_register_all(struct drm_device *dev)
*
* In contrast to the other drm_get_*_name functions this one here returns a
* const pointer and hence is threadsafe.
+ *
+ * Returns: connector status string
*/
const char *drm_get_connector_status_name(enum drm_connector_status status)
{
@@ -707,7 +709,7 @@ __drm_connector_put_safe(struct drm_connector *conn)
* drm_connector_list_iter_next - return next connector
* @iter: connector_list iterator
*
- * Returns the next connector for @iter, or NULL when the list walk has
+ * Returns: the next connector for @iter, or NULL when the list walk has
* completed.
*/
struct drm_connector *
@@ -780,6 +782,8 @@ static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
*
* Note you could abuse this and return something out of bounds, but that
* would be a caller error. No unscrubbed user data should make it here.
+ *
+ * Returns: string describing an enumerated subpixel property
*/
const char *drm_get_subpixel_order_name(enum subpixel_order order)
{
@@ -809,6 +813,9 @@ static const struct drm_prop_enum_list drm_link_status_enum_list[] = {
* Store the supported bus formats in display info structure.
* See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
* a full list of available formats.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
int drm_display_info_set_bus_formats(struct drm_display_info *info,
const u32 *formats,
@@ -1326,6 +1333,8 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
* @dev: DRM device
*
* Called by a driver the first time a DVI-I connector is made.
+ *
+ * Returns: %0
*/
int drm_mode_create_dvi_i_properties(struct drm_device *dev)
{
@@ -1397,6 +1406,8 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* Game:
* Content type is game
*
+ * The meaning of each content type is defined in CTA-861-G table 15.
+ *
* Drivers can set up this property by calling
* drm_connector_attach_content_type_property(). Decoding to
* infoframe values is done through drm_hdmi_avi_infoframe_content_type().
@@ -1407,6 +1418,8 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* @connector: connector to attach content type property on.
*
* Called by a driver the first time a HDMI connector is made.
+ *
+ * Returns: %0
*/
int drm_connector_attach_content_type_property(struct drm_connector *connector)
{
@@ -1487,6 +1500,9 @@ EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties);
* creates the TV margin properties for a given device. No need to call this
* function for an SDTV connector, it's already called from
* drm_mode_create_tv_properties().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
int drm_mode_create_tv_margin_properties(struct drm_device *dev)
{
@@ -1527,6 +1543,9 @@ EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
* the TV specific connector properties for a given device. Caller is
* responsible for allocating a list of format names and passing them to
* this routine.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes,
@@ -1622,6 +1641,8 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties);
* Atomic drivers should use drm_connector_attach_scaling_mode_property()
* instead to correctly assign &drm_connector_state.scaling_mode
* in the atomic state.
+ *
+ * Returns: %0
*/
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
{
@@ -1939,6 +1960,9 @@ EXPORT_SYMBOL(drm_mode_create_content_type_property);
* @dev: DRM device
*
* Create the suggested x/y offset property for connectors.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
{
@@ -2312,8 +2336,8 @@ int drm_connector_set_panel_orientation(
EXPORT_SYMBOL(drm_connector_set_panel_orientation);
/**
- * drm_connector_set_panel_orientation_with_quirk -
- * set the connector's panel_orientation after checking for quirks
+ * drm_connector_set_panel_orientation_with_quirk - set the
+ * connector's panel_orientation after checking for quirks
* @connector: connector for which to init the panel-orientation property.
* @panel_orientation: drm_panel_orientation value to set
* @width: width in pixels of the panel, used for panel quirk detection
@@ -2597,7 +2621,7 @@ struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode)
/**
* drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
- * @connector: connector to report the event on
+ * @connector_fwnode: fwnode_handle to report the event on
*
* On some hardware a hotplug event notification may come from outside the display
* driver / device. An example of this is some USB Type-C setups where the hardware
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 09c8200458594f..4dcdec6487bb57 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1340,31 +1340,15 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
struct drm_gem_object *obj,
bool write)
{
- int ret;
- struct dma_fence **fences;
- unsigned int i, fence_count;
-
- if (!write) {
- struct dma_fence *fence =
- dma_resv_get_excl_unlocked(obj->resv);
-
- return drm_gem_fence_array_add(fence_array, fence);
- }
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ int ret = 0;
- ret = dma_resv_get_fences(obj->resv, NULL,
- &fence_count, &fences);
- if (ret || !fence_count)
- return ret;
-
- for (i = 0; i < fence_count; i++) {
- ret = drm_gem_fence_array_add(fence_array, fences[i]);
+ dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+ ret = drm_gem_fence_array_add(fence_array, fence);
if (ret)
break;
}
-
- for (; i < fence_count; i++)
- dma_fence_put(fences[i]);
- kfree(fences);
return ret;
}
EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index bf8a6e823a1507..c97323365675c9 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -25,6 +25,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_modeset_lock.h>
+#include <drm/drm_print.h>
/**
* DOC: kms locking
@@ -77,6 +78,45 @@
static DEFINE_WW_CLASS(crtc_ww_class);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK)
+static noinline depot_stack_handle_t __drm_stack_depot_save(void)
+{
+ unsigned long entries[8];
+ unsigned int n;
+
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+
+ return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
+}
+
+static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
+{
+ struct drm_printer p = drm_debug_printer("drm_modeset_lock");
+ unsigned long *entries;
+ unsigned int nr_entries;
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
+ if (!buf)
+ return;
+
+ nr_entries = stack_depot_fetch(stack_depot, &entries);
+ stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 2);
+
+ drm_printf(&p, "attempting to lock a contended lock without backoff:\n%s", buf);
+
+ kfree(buf);
+}
+#else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
+static depot_stack_handle_t __drm_stack_depot_save(void)
+{
+ return 0;
+}
+static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
+{
+}
+#endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: DRM device
@@ -225,7 +265,9 @@ EXPORT_SYMBOL(drm_modeset_acquire_fini);
*/
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
{
- WARN_ON(ctx->contended);
+ if (WARN_ON(ctx->contended))
+ __drm_stack_depot_print(ctx->stack_depot);
+
while (!list_empty(&ctx->locked)) {
struct drm_modeset_lock *lock;
@@ -243,7 +285,8 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
{
int ret;
- WARN_ON(ctx->contended);
+ if (WARN_ON(ctx->contended))
+ __drm_stack_depot_print(ctx->stack_depot);
if (ctx->trylock_only) {
lockdep_assert_held(&ctx->ww_ctx);
@@ -274,6 +317,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
ret = 0;
} else if (ret == -EDEADLK) {
ctx->contended = lock;
+ ctx->stack_depot = __drm_stack_depot_save();
}
return ret;
@@ -296,6 +340,7 @@ int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
struct drm_modeset_lock *contended = ctx->contended;
ctx->contended = NULL;
+ ctx->stack_depot = 0;
if (WARN_ON(!contended))
return 0;
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 5b2d0ca03705cc..838b32b70bce6b 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -123,7 +123,6 @@ static int drm_plane_helper_check_update(struct drm_plane *plane,
.crtc_w = drm_rect_width(dst),
.crtc_h = drm_rect_height(dst),
.rotation = rotation,
- .visible = *visible,
};
struct drm_crtc_state crtc_state = {
.crtc = crtc,
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index d8ba95744410ed..c773d3dfb1aba8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -722,11 +722,13 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
if (obj->funcs && obj->funcs->mmap) {
vma->vm_ops = obj->funcs->vm_ops;
+ drm_gem_object_get(obj);
ret = obj->funcs->mmap(obj, vma);
- if (ret)
+ if (ret) {
+ drm_gem_object_put(obj);
return ret;
+ }
vma->vm_private_data = obj;
- drm_gem_object_get(obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 88c427f3c3467b..f5b4dd5b42757a 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -584,6 +584,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
else
intel_encoder->enable = g4x_enable_hdmi;
}
+ intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index b99907c656bbf5..2b1423a43437d3 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1707,6 +1707,39 @@ static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
child->aux_channel = 0;
}
+static u8 dvo_port_type(u8 dvo_port)
+{
+ switch (dvo_port) {
+ case DVO_PORT_HDMIA:
+ case DVO_PORT_HDMIB:
+ case DVO_PORT_HDMIC:
+ case DVO_PORT_HDMID:
+ case DVO_PORT_HDMIE:
+ case DVO_PORT_HDMIF:
+ case DVO_PORT_HDMIG:
+ case DVO_PORT_HDMIH:
+ case DVO_PORT_HDMII:
+ return DVO_PORT_HDMIA;
+ case DVO_PORT_DPA:
+ case DVO_PORT_DPB:
+ case DVO_PORT_DPC:
+ case DVO_PORT_DPD:
+ case DVO_PORT_DPE:
+ case DVO_PORT_DPF:
+ case DVO_PORT_DPG:
+ case DVO_PORT_DPH:
+ case DVO_PORT_DPI:
+ return DVO_PORT_DPA;
+ case DVO_PORT_MIPIA:
+ case DVO_PORT_MIPIB:
+ case DVO_PORT_MIPIC:
+ case DVO_PORT_MIPID:
+ return DVO_PORT_MIPIA;
+ default:
+ return dvo_port;
+ }
+}
+
static enum port __dvo_port_to_port(int n_ports, int n_dvo,
const int port_mapping[][3], u8 dvo_port)
{
@@ -1930,50 +1963,6 @@ static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devd
}
}
-static enum port get_edp_port(struct drm_i915_private *i915)
-{
- const struct intel_bios_encoder_data *devdata;
- enum port port;
-
- for_each_port(port) {
- devdata = i915->vbt.ports[port];
-
- if (devdata && intel_bios_encoder_supports_edp(devdata))
- return port;
- }
-
- return PORT_NONE;
-}
-
-/*
- * FIXME: The power sequencer and backlight code currently do not support more
- * than one set registers, at least not on anything other than VLV/CHV. It will
- * clobber the registers. As a temporary workaround, gracefully prevent more
- * than one eDP from being registered.
- */
-static void sanitize_dual_edp(struct intel_bios_encoder_data *devdata,
- enum port port)
-{
- struct drm_i915_private *i915 = devdata->i915;
- struct child_device_config *child = &devdata->child;
- enum port p;
-
- /* CHV might not clobber PPS registers. */
- if (IS_CHERRYVIEW(i915))
- return;
-
- p = get_edp_port(i915);
- if (p == PORT_NONE)
- return;
-
- drm_dbg_kms(&i915->drm, "both ports %c and %c configured as eDP, "
- "disabling port %c eDP\n", port_name(p), port_name(port),
- port_name(port));
-
- child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
- child->device_type &= ~DEVICE_TYPE_INTERNAL_CONNECTOR;
-}
-
static bool is_port_valid(struct drm_i915_private *i915, enum port port)
{
/*
@@ -2031,9 +2020,6 @@ static void parse_ddi_port(struct drm_i915_private *i915,
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
- if (is_edp)
- sanitize_dual_edp(devdata, port);
-
if (is_dvi)
sanitize_ddc_pin(devdata, port);
@@ -2670,35 +2656,17 @@ bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
return false;
}
-static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
- enum port port)
+static bool child_dev_is_dp_dual_mode(const struct child_device_config *child)
{
- static const struct {
- u16 dp, hdmi;
- } port_mapping[] = {
- /*
- * Buggy VBTs may declare DP ports as having
- * HDMI type dvo_port :( So let's check both.
- */
- [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
- [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
- [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
- [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
- [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
- };
-
- if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
- return false;
-
if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
(DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
- if (child->dvo_port == port_mapping[port].dp)
+ if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA)
return true;
/* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
- if (child->dvo_port == port_mapping[port].hdmi &&
+ if (dvo_port_type(child->dvo_port) == DVO_PORT_HDMIA &&
child->aux_channel != 0)
return true;
@@ -2708,10 +2676,36 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915,
enum port port)
{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ /*
+ * Buggy VBTs may declare DP ports as having
+ * HDMI type dvo_port :( So let's check both.
+ */
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
+ };
const struct intel_bios_encoder_data *devdata;
+ if (HAS_DDI(i915)) {
+ const struct intel_bios_encoder_data *devdata;
+
+ devdata = intel_bios_encoder_data_lookup(i915, port);
+
+ return devdata && child_dev_is_dp_dual_mode(&devdata->child);
+ }
+
+ if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
- if (child_dev_is_dp_dual_mode(&devdata->child, port))
+ if ((devdata->child.dvo_port == port_mapping[port].dp ||
+ devdata->child.dvo_port == port_mapping[port].hdmi) &&
+ child_dev_is_dp_dual_mode(&devdata->child))
return true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 9e466d829019a3..868dd43a7542f8 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2885,7 +2885,7 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
return freq;
}
-static struct intel_cdclk_funcs tgl_cdclk_funcs = {
+static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
@@ -2893,7 +2893,7 @@ static struct intel_cdclk_funcs tgl_cdclk_funcs = {
.calc_voltage_level = tgl_calc_voltage_level,
};
-static struct intel_cdclk_funcs ehl_cdclk_funcs = {
+static const struct intel_cdclk_funcs ehl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
@@ -2901,7 +2901,7 @@ static struct intel_cdclk_funcs ehl_cdclk_funcs = {
.calc_voltage_level = ehl_calc_voltage_level,
};
-static struct intel_cdclk_funcs icl_cdclk_funcs = {
+static const struct intel_cdclk_funcs icl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
@@ -2909,7 +2909,7 @@ static struct intel_cdclk_funcs icl_cdclk_funcs = {
.calc_voltage_level = icl_calc_voltage_level,
};
-static struct intel_cdclk_funcs bxt_cdclk_funcs = {
+static const struct intel_cdclk_funcs bxt_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
@@ -2917,54 +2917,54 @@ static struct intel_cdclk_funcs bxt_cdclk_funcs = {
.calc_voltage_level = bxt_calc_voltage_level,
};
-static struct intel_cdclk_funcs skl_cdclk_funcs = {
+static const struct intel_cdclk_funcs skl_cdclk_funcs = {
.get_cdclk = skl_get_cdclk,
.set_cdclk = skl_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
.modeset_calc_cdclk = skl_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs bdw_cdclk_funcs = {
+static const struct intel_cdclk_funcs bdw_cdclk_funcs = {
.get_cdclk = bdw_get_cdclk,
.set_cdclk = bdw_set_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = bdw_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs chv_cdclk_funcs = {
+static const struct intel_cdclk_funcs chv_cdclk_funcs = {
.get_cdclk = vlv_get_cdclk,
.set_cdclk = chv_set_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = vlv_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs vlv_cdclk_funcs = {
+static const struct intel_cdclk_funcs vlv_cdclk_funcs = {
.get_cdclk = vlv_get_cdclk,
.set_cdclk = vlv_set_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = vlv_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs hsw_cdclk_funcs = {
+static const struct intel_cdclk_funcs hsw_cdclk_funcs = {
.get_cdclk = hsw_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
/* SNB, IVB, 965G, 945G */
-static struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = {
+static const struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = {
.get_cdclk = fixed_400mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs ilk_cdclk_funcs = {
+static const struct intel_cdclk_funcs ilk_cdclk_funcs = {
.get_cdclk = fixed_450mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs gm45_cdclk_funcs = {
+static const struct intel_cdclk_funcs gm45_cdclk_funcs = {
.get_cdclk = gm45_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
@@ -2972,7 +2972,7 @@ static struct intel_cdclk_funcs gm45_cdclk_funcs = {
/* G45 uses G33 */
-static struct intel_cdclk_funcs i965gm_cdclk_funcs = {
+static const struct intel_cdclk_funcs i965gm_cdclk_funcs = {
.get_cdclk = i965gm_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
@@ -2980,19 +2980,19 @@ static struct intel_cdclk_funcs i965gm_cdclk_funcs = {
/* i965G uses fixed 400 */
-static struct intel_cdclk_funcs pnv_cdclk_funcs = {
+static const struct intel_cdclk_funcs pnv_cdclk_funcs = {
.get_cdclk = pnv_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs g33_cdclk_funcs = {
+static const struct intel_cdclk_funcs g33_cdclk_funcs = {
.get_cdclk = g33_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs i945gm_cdclk_funcs = {
+static const struct intel_cdclk_funcs i945gm_cdclk_funcs = {
.get_cdclk = i945gm_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
@@ -3000,37 +3000,37 @@ static struct intel_cdclk_funcs i945gm_cdclk_funcs = {
/* i945G uses fixed 400 */
-static struct intel_cdclk_funcs i915gm_cdclk_funcs = {
+static const struct intel_cdclk_funcs i915gm_cdclk_funcs = {
.get_cdclk = i915gm_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs i915g_cdclk_funcs = {
+static const struct intel_cdclk_funcs i915g_cdclk_funcs = {
.get_cdclk = fixed_333mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs i865g_cdclk_funcs = {
+static const struct intel_cdclk_funcs i865g_cdclk_funcs = {
.get_cdclk = fixed_266mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs i85x_cdclk_funcs = {
+static const struct intel_cdclk_funcs i85x_cdclk_funcs = {
.get_cdclk = i85x_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs i845g_cdclk_funcs = {
+static const struct intel_cdclk_funcs i845g_cdclk_funcs = {
.get_cdclk = fixed_200mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
-static struct intel_cdclk_funcs i830_cdclk_funcs = {
+static const struct intel_cdclk_funcs i830_cdclk_funcs = {
.get_cdclk = fixed_133mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 1dcfe31e6c6f5f..cfb567df71b317 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -4361,6 +4361,7 @@ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(i915, encoder->port);
intel_dp_encoder_shutdown(encoder);
+ intel_hdmi_encoder_shutdown(encoder);
if (!intel_phy_is_tc(i915, phy))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index ff598b6cd95303..ec403e46a32881 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -848,9 +848,16 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info
int i;
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ unsigned int plane_size;
+
+ plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+ if (plane_size == 0)
+ continue;
+
if (rem_info->plane_alignment)
size = ALIGN(size, rem_info->plane_alignment);
- size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+
+ size += plane_size;
}
return size;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 23de500d56b526..be883469d2fcc3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -120,6 +120,12 @@ bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
return crtc_state->port_clock >= 1000000;
}
+static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
+{
+ intel_dp->sink_rates[0] = 162000;
+ intel_dp->num_sink_rates = 1;
+}
+
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
@@ -281,7 +287,7 @@ intel_dp_max_data_rate(int max_link_rate, int max_lanes)
*/
int max_link_rate_kbps = max_link_rate * 10;
- max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 9671, 10000);
+ max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000);
max_link_rate = max_link_rate_kbps / 8;
}
@@ -1858,6 +1864,12 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
intel_dp->lane_count = lane_count;
}
+static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp)
+{
+ intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
+ intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
+}
+
/* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -2017,8 +2029,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
intel_dp_get_dpcd(intel_dp);
- intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
- intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
+ intel_dp_reset_max_link_params(intel_dp);
}
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
@@ -2556,6 +2567,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/
intel_psr_init_dpcd(intel_dp);
+ /* Clear the default sink rates */
+ intel_dp->num_sink_rates = 0;
+
/* Read the eDP 1.4+ supported link rates. */
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -2591,6 +2605,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp);
+ intel_dp_reset_max_link_params(intel_dp);
/* Read the eDP DSC DPCD registers */
if (DISPLAY_VER(dev_priv) >= 10)
@@ -4332,12 +4347,7 @@ intel_dp_detect(struct drm_connector *connector,
* supports link training fallback params.
*/
if (intel_dp->reset_link_params || intel_dp->is_mst) {
- /* Initial max link lane count */
- intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
-
- /* Initial max link rate */
- intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
-
+ intel_dp_reset_max_link_params(intel_dp);
intel_dp->reset_link_params = false;
}
@@ -5003,6 +5013,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
}
intel_dp_set_source_rates(intel_dp);
+ intel_dp_set_default_sink_rates(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+ intel_dp_reset_max_link_params(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index fa1f375e696bfb..cb511b2b706992 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -378,8 +378,8 @@ static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_pl
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane);
intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane);
- *w = main_width / main_hsub / hsub;
- *h = main_height / main_vsub / vsub;
+ *w = DIV_ROUND_UP(main_width, main_hsub * hsub);
+ *h = DIV_ROUND_UP(main_height, main_vsub * vsub);
}
static u32 intel_adjust_tile_offset(int *x, int *y,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index d2e61f6c6e08e8..371736bdc01fa4 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1246,12 +1246,13 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
{
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
- struct i2c_adapter *adapter =
- intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+ struct i2c_adapter *adapter;
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return;
+ adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+
drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
enable ? "Enabling" : "Disabling");
@@ -2258,6 +2259,17 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return 0;
}
+void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ /*
+ * Give a hand to buggy BIOSen which forget to turn
+ * the TMDS output buffers back on after a reboot.
+ */
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+}
+
static void
intel_hdmi_unset_edid(struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index b43a180d007e01..2bf440eb400ab1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -28,6 +28,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
+void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder);
bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
struct drm_connector *connector,
bool high_tmds_clock_ratio,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index eae09d323049a4..e8a58c9971703e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -9,6 +9,8 @@
#include <linux/dma-resv.h>
#include <linux/module.h>
+#include <asm/smp.h>
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index f17383e76eb719..57c97554393b9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -1396,6 +1396,9 @@ remap_pages(struct drm_i915_gem_object *obj,
{
unsigned int row;
+ if (!width || !height)
+ return sg;
+
if (alignment_pad) {
st->nents++;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index d7710debcd4787..38b47e73e35dbd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2373,6 +2373,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
unsigned long flags;
bool disabled;
+ lockdep_assert_held(&guc->submission_state.lock);
GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
@@ -2388,7 +2389,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) {
- release_guc_id(guc, ce);
+ __release_guc_id(guc, ce);
__guc_context_destroy(ce);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 2c3cd6e635b5db..820a1f38b271e0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1537,38 +1537,14 @@ i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write)
{
- struct dma_fence *excl;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
int ret = 0;
- if (write) {
- struct dma_fence **shared;
- unsigned int count, i;
-
- ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
- &shared);
+ dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) {
+ ret = i915_request_await_dma_fence(to, fence);
if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- ret = i915_request_await_dma_fence(to, shared[i]);
- if (ret)
- break;
-
- dma_fence_put(shared[i]);
- }
-
- for (; i < count; i++)
- dma_fence_put(shared[i]);
- kfree(shared);
- } else {
- excl = dma_resv_get_excl_unlocked(obj->base.resv);
- }
-
- if (excl) {
- if (ret == 0)
- ret = i915_request_await_dma_fence(to, excl);
-
- dma_fence_put(excl);
+ break;
}
return ret;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9558e9e1b431bb..cb685fe2039b40 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -81,7 +81,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_plane_state *old_plane_state, *new_plane_state;
bool plane_disabling = false;
int i;
- bool fence_cookie = dma_fence_begin_signalling();
drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -112,7 +111,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
}
drm_atomic_helper_commit_hw_done(state);
- dma_fence_end_signalling(fence_cookie);
}
static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index 89dd618d78f31a..0655582ae8ed64 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -88,7 +88,7 @@ static void mxsfb_set_formats(struct mxsfb_drm_private *mxsfb,
ctrl |= CTRL_BUS_WIDTH_24;
break;
default:
- dev_err(drm->dev, "Unknown media bus format %d\n", bus_format);
+ dev_err(drm->dev, "Unknown media bus format 0x%x\n", bus_format);
break;
}
@@ -362,6 +362,12 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
drm_atomic_get_new_bridge_state(state,
mxsfb->bridge);
bus_format = bridge_state->input_bus_cfg.format;
+ if (bus_format == MEDIA_BUS_FMT_FIXED) {
+ dev_warn_once(drm->dev,
+ "Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
+ "Please fix bridge driver by handling atomic_get_input_bus_fmts.\n");
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ }
}
/* If there is no bridge, use bus format from connector */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 12b107acb6ee9e..fa73fe57f97be1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1249,7 +1249,6 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev,
{
struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
- struct device *dev;
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (ttm_tt_is_populated(ttm))
@@ -1262,7 +1261,6 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev,
}
drm = nouveau_bdev(bdev);
- dev = drm->dev->dev;
return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
}
@@ -1272,7 +1270,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct nouveau_drm *drm;
- struct device *dev;
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (slave)
@@ -1281,7 +1278,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
nouveau_ttm_tt_unbind(bdev, ttm);
drm = nouveau_bdev(bdev);
- dev = drm->dev->dev;
return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 92987daa5e17d0..3828aafd3ac46f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
goto error_dma_unmap;
mutex_unlock(&svmm->mutex);
- args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
+ args->dst[0] = migrate_pfn(page_to_pfn(dpage));
return 0;
error_dma_unmap:
@@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE)
*pfn |= NVIF_VMM_PFNMAP_V0_W;
- return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
+ return migrate_pfn(page_to_pfn(dpage));
out_dma_unmap:
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6109cd9e339918..e7efd9ede8e4be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -562,6 +562,7 @@ nouveau_drm_device_init(struct drm_device *dev)
nvkm_dbgopt(nouveau_debug, "DRM");
INIT_LIST_HEAD(&drm->clients);
+ mutex_init(&drm->clients_lock);
spin_lock_init(&drm->tile.lock);
/* workaround an odd issue on nvc1 by disabling the device's
@@ -632,6 +633,7 @@ fail_alloc:
static void
nouveau_drm_device_fini(struct drm_device *dev)
{
+ struct nouveau_cli *cli, *temp_cli;
struct nouveau_drm *drm = nouveau_drm(dev);
if (nouveau_pmops_runtime()) {
@@ -656,9 +658,28 @@ nouveau_drm_device_fini(struct drm_device *dev)
nouveau_ttm_fini(drm);
nouveau_vga_fini(drm);
+ /*
+ * There may be existing clients from as-yet unclosed files. For now,
+ * clean them up here rather than deferring until the file is closed,
+ * but this likely not correct if we want to support hot-unplugging
+ * properly.
+ */
+ mutex_lock(&drm->clients_lock);
+ list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
+ list_del(&cli->head);
+ mutex_lock(&cli->mutex);
+ if (cli->abi16)
+ nouveau_abi16_fini(cli->abi16);
+ mutex_unlock(&cli->mutex);
+ nouveau_cli_fini(cli);
+ kfree(cli);
+ }
+ mutex_unlock(&drm->clients_lock);
+
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
nvif_parent_dtor(&drm->parent);
+ mutex_destroy(&drm->clients_lock);
kfree(drm);
}
@@ -796,7 +817,7 @@ nouveau_drm_device_remove(struct drm_device *dev)
struct nvkm_client *client;
struct nvkm_device *device;
- drm_dev_unregister(dev);
+ drm_dev_unplug(dev);
client = nvxx_client(&drm->client.base);
device = nvkm_device_find(client->device);
@@ -1090,9 +1111,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
fpriv->driver_priv = cli;
- mutex_lock(&drm->client.mutex);
+ mutex_lock(&drm->clients_lock);
list_add(&cli->head, &drm->clients);
- mutex_unlock(&drm->client.mutex);
+ mutex_unlock(&drm->clients_lock);
done:
if (ret && cli) {
@@ -1110,6 +1131,16 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
{
struct nouveau_cli *cli = nouveau_cli(fpriv);
struct nouveau_drm *drm = nouveau_drm(dev);
+ int dev_index;
+
+ /*
+ * The device is gone, and as it currently stands all clients are
+ * cleaned up in the removal codepath. In the future this may change
+ * so that we can support hot-unplugging, but for now we immediately
+ * return to avoid a double-free situation.
+ */
+ if (!drm_dev_enter(dev, &dev_index))
+ return;
pm_runtime_get_sync(dev->dev);
@@ -1118,14 +1149,15 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
nouveau_abi16_fini(cli->abi16);
mutex_unlock(&cli->mutex);
- mutex_lock(&drm->client.mutex);
+ mutex_lock(&drm->clients_lock);
list_del(&cli->head);
- mutex_unlock(&drm->client.mutex);
+ mutex_unlock(&drm->clients_lock);
nouveau_cli_fini(cli);
kfree(cli);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
+ drm_dev_exit(dev_index);
}
static const struct drm_ioctl_desc
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ba65f136cf481d..b2a970aa9bf4b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -139,6 +139,11 @@ struct nouveau_drm {
struct list_head clients;
+ /**
+ * @clients_lock: Protects access to the @clients list of &struct nouveau_cli.
+ */
+ struct mutex clients_lock;
+
u8 old_pm_cap;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 8c2ecc28272322..9416bee9214100 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -56,7 +56,7 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
nouveau_bo_del_io_reserve_lru(bo);
prot = vm_get_page_prot(vma->vm_flags);
- ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
nouveau_bo_add_io_reserve_lru(bo);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@@ -337,7 +337,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
- uint32_t pref_domains = 0;;
+ uint32_t pref_domains = 0;
if (!domains)
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 1a896a24288aa2..266809e511e2c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -162,10 +162,14 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
mm = get_task_mm(current);
+ if (!mm) {
+ return -EINVAL;
+ }
mmap_read_lock(mm);
if (!cli->svm.svmm) {
mmap_read_unlock(mm);
+ mmput(mm);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index 704df0f2d1f16c..09a112af2f8935 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -78,6 +78,6 @@ int
gt215_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(&gt215_ce, device, type, inst,
+ return nvkm_falcon_new_(&gt215_ce, device, type, -1,
(device->chipset != 0xaf), 0x104000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index ca75c5f6ecaf80..b51d690f375ff4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -3147,8 +3147,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \
for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \
if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \
- int inst = (device->chip->ptr.inst == 1) ? -1 : (j); \
- ret = device->chip->ptr.ctor(device, (type), inst, &device->ptr[j]); \
+ ret = device->chip->ptr.ctor(device, (type), (j), &device->ptr[j]); \
subdev = nvkm_device_subdev(device, (type), (j)); \
if (ret) { \
nvkm_subdev_del(&subdev); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
index c39e797dc7c942..cf5dcfda7b2538 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
@@ -21,7 +21,6 @@
*/
#include "priv.h"
-#include "priv.h"
#include <core/firmware.h>
static void *
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index d6a1f8d04c09c5..186b4e63e559ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -299,7 +299,7 @@ nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
page = uvmm->vmm->func->page;
for (nr = 0; page[nr].shift; nr++);
- if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if (!(nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
if ((index = args->v0.index) >= nr)
return -EINVAL;
type = page[index].type;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index b5e733783b5b39..17899fc95b2d99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -488,7 +488,7 @@ gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc)
struct gp100_vmm_fault_cancel_v0 v0;
} *args = argv;
int ret = -ENOSYS;
- u32 inst, aper;
+ u32 aper;
if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
return ret;
@@ -502,7 +502,7 @@ gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc)
args->v0.inst |= 0x80000000;
if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) {
- if ((inst = nvkm_gr_ctxsw_inst(device)) == args->v0.inst) {
+ if (nvkm_gr_ctxsw_inst(device) == args->v0.inst) {
gf100_vmm_invalidate(vmm, 0x0000001b
/* CANCEL_TARGETED. */ |
(args->v0.hub << 20) |
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 2cb8eba76af88e..cfc8d644cedfd2 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -520,6 +520,16 @@ config DRM_PANEL_SHARP_LS043T1LE01
Say Y here if you want to enable support for Sharp LS043T1LE01 qHD
(540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard
+config DRM_PANEL_SHARP_LS060T1SX01
+ tristate "Sharp LS060T1SX01 FullHD video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Sharp LS060T1SX01 6.0"
+ FullHD (1080x1920) DSI panel as found in Dragonboard Display Adapter
+ Bundle.
+
config DRM_PANEL_SITRONIX_ST7701
tristate "Sitronix ST7701 panel driver"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 6e30640b90993a..bca4cc1f2715e7 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 30f28ad4df6b60..31daae1da9c9c6 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -8,6 +8,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
@@ -220,6 +221,10 @@ static const struct drm_display_mode default_mode_ys = {
.height_mm = 130,
};
+static const u32 mantix_bus_formats[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static int mantix_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -241,6 +246,10 @@ static int mantix_get_modes(struct drm_panel *panel,
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
+ drm_display_info_set_bus_formats(&connector->display_info,
+ mantix_bus_formats,
+ ARRAY_SIZE(mantix_bus_formats));
+
return 1;
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
index e0b1a7e354f368..e0f773678168d8 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
@@ -116,7 +116,8 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
static int s6e63m0_dsi_remove(struct mipi_dsi_device *dsi)
{
mipi_dsi_detach(dsi);
- return s6e63m0_remove(&dsi->dev);
+ s6e63m0_remove(&dsi->dev);
+ return 0;
}
static const struct of_device_id s6e63m0_dsi_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
index 3669cc3719cef1..c178d962b0d51d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
@@ -64,7 +64,8 @@ static int s6e63m0_spi_probe(struct spi_device *spi)
static int s6e63m0_spi_remove(struct spi_device *spi)
{
- return s6e63m0_remove(&spi->dev);
+ s6e63m0_remove(&spi->dev);
+ return 0;
}
static const struct of_device_id s6e63m0_spi_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 35d72ac663d6ef..b34fa4d5de0778 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -749,13 +749,11 @@ int s6e63m0_probe(struct device *dev, void *trsp,
}
EXPORT_SYMBOL_GPL(s6e63m0_probe);
-int s6e63m0_remove(struct device *dev)
+void s6e63m0_remove(struct device *dev)
{
struct s6e63m0 *ctx = dev_get_drvdata(dev);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
EXPORT_SYMBOL_GPL(s6e63m0_remove);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h
index 306605ed1117e0..c926eca1c8176d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h
@@ -35,6 +35,6 @@ int s6e63m0_probe(struct device *dev, void *trsp,
const u8 *data,
size_t len),
bool dsi_mode);
-int s6e63m0_remove(struct device *dev);
+void s6e63m0_remove(struct device *dev);
#endif /* _PANEL_SAMSUNG_S6E63M0_H */
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
new file mode 100644
index 00000000000000..e12570561629c2
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021 Linaro Ltd.
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct sharp_ls060 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator *vddi_supply;
+ struct regulator *vddh_supply;
+ struct regulator *avdd_supply;
+ struct regulator *avee_supply;
+ struct gpio_desc *reset_gpio;
+ bool prepared;
+};
+
+static inline struct sharp_ls060 *to_sharp_ls060(struct drm_panel *panel)
+{
+ return container_of(panel, struct sharp_ls060, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, seq...) ({ \
+ static const u8 d[] = { seq }; \
+ \
+ mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ })
+
+static void sharp_ls060_reset(struct sharp_ls060 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int sharp_ls060_on(struct sharp_ls060 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = dsi_dcs_write_seq(dsi, 0xbb, 0x13);
+ if (ret < 0) {
+ dev_err(dev, "Failed to send command: %d\n", ret);
+ return ret;
+ }
+
+ ret = dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
+ if (ret < 0) {
+ dev_err(dev, "Failed to send command: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(50);
+
+ return 0;
+}
+
+static int sharp_ls060_off(struct sharp_ls060 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ usleep_range(2000, 3000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(121);
+
+ return 0;
+}
+
+static int sharp_ls060_prepare(struct drm_panel *panel)
+{
+ struct sharp_ls060 *ctx = to_sharp_ls060(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_enable(ctx->vddi_supply);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_enable(ctx->avdd_supply);
+ if (ret < 0)
+ goto err_avdd;
+
+ usleep_range(1000, 2000);
+
+ ret = regulator_enable(ctx->avee_supply);
+ if (ret < 0)
+ goto err_avee;
+
+ usleep_range(10000, 11000);
+
+ ret = regulator_enable(ctx->vddh_supply);
+ if (ret < 0)
+ goto err_vddh;
+
+ usleep_range(10000, 11000);
+
+ sharp_ls060_reset(ctx);
+
+ ret = sharp_ls060_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ goto err_on;
+ }
+
+ ctx->prepared = true;
+
+ return 0;
+
+err_on:
+ regulator_disable(ctx->vddh_supply);
+
+ usleep_range(10000, 11000);
+
+err_vddh:
+ regulator_disable(ctx->avee_supply);
+
+err_avee:
+ regulator_disable(ctx->avdd_supply);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+err_avdd:
+ regulator_disable(ctx->vddi_supply);
+
+ return ret;
+}
+
+static int sharp_ls060_unprepare(struct drm_panel *panel)
+{
+ struct sharp_ls060 *ctx = to_sharp_ls060(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = sharp_ls060_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ regulator_disable(ctx->vddh_supply);
+
+ usleep_range(10000, 11000);
+
+ regulator_disable(ctx->avee_supply);
+ regulator_disable(ctx->avdd_supply);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_disable(ctx->vddi_supply);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode sharp_ls060_mode = {
+ .clock = (1080 + 96 + 16 + 64) * (1920 + 4 + 1 + 16) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 96,
+ .hsync_end = 1080 + 96 + 16,
+ .htotal = 1080 + 96 + 16 + 64,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 4,
+ .vsync_end = 1920 + 4 + 1,
+ .vtotal = 1920 + 4 + 1 + 16,
+ .width_mm = 75,
+ .height_mm = 132,
+};
+
+static int sharp_ls060_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &sharp_ls060_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs sharp_ls060_panel_funcs = {
+ .prepare = sharp_ls060_prepare,
+ .unprepare = sharp_ls060_unprepare,
+ .get_modes = sharp_ls060_get_modes,
+};
+
+static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct sharp_ls060 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->vddi_supply = devm_regulator_get(dev, "vddi");
+ if (IS_ERR(ctx->vddi_supply))
+ return PTR_ERR(ctx->vddi_supply);
+
+ ctx->vddh_supply = devm_regulator_get(dev, "vddh");
+ if (IS_ERR(ctx->vddh_supply))
+ return PTR_ERR(ctx->vddh_supply);
+
+ ctx->avdd_supply = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(ctx->avdd_supply))
+ return PTR_ERR(ctx->avdd_supply);
+
+ ctx->avee_supply = devm_regulator_get(dev, "avee");
+ if (IS_ERR(ctx->avee_supply))
+ return PTR_ERR(ctx->avee_supply);
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &sharp_ls060_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sharp_ls060_remove(struct mipi_dsi_device *dsi)
+{
+ struct sharp_ls060 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id sharp_ls060t1sx01_of_match[] = {
+ { .compatible = "sharp,ls060t1sx01" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sharp_ls060t1sx01_of_match);
+
+static struct mipi_dsi_driver sharp_ls060_driver = {
+ .probe = sharp_ls060_probe,
+ .remove = sharp_ls060_remove,
+ .driver = {
+ .name = "panel-sharp-ls060t1sx01",
+ .of_match_table = sharp_ls060t1sx01_of_match,
+ },
+};
+module_mipi_dsi_driver(sharp_ls060_driver);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_DESCRIPTION("DRM driver for Sharp LS060T1SX01 1080p video mode dsi panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 7f3e1b84b5f5c5..eb475a3a774b70 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2370,6 +2370,38 @@ static const struct panel_desc logictechno_lt170410_2whc = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct drm_display_mode logictechno_lttd800480070_l2rt_mode = {
+ .clock = 33000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 112,
+ .hsync_end = 800 + 112 + 3,
+ .htotal = 800 + 112 + 3 + 85,
+ .vdisplay = 480,
+ .vsync_start = 480 + 38,
+ .vsync_end = 480 + 38 + 3,
+ .vtotal = 480 + 38 + 3 + 29,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc logictechno_lttd800480070_l2rt = {
+ .modes = &logictechno_lttd800480070_l2rt_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 45,
+ .enable = 100,
+ .disable = 100,
+ .unprepare = 45
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode logictechno_lttd800480070_l6wh_rt_mode = {
.clock = 33000,
.hdisplay = 800,
@@ -3751,6 +3783,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "logictechno,lt170410-2whc",
.data = &logictechno_lt170410_2whc,
}, {
+ .compatible = "logictechno,lttd800480070-l2rt",
+ .data = &logictechno_lttd800480070_l2rt,
+ }, {
.compatible = "logictechno,lttd800480070-l6wh-rt",
.data = &logictechno_lttd800480070_l6wh_rt,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index a2c303e5732c0d..73f69c929a7566 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -453,6 +453,10 @@ disable_vcc:
return ret;
}
+static const u32 mantix_bus_formats[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static int st7703_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -474,6 +478,10 @@ static int st7703_get_modes(struct drm_panel *panel,
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
+ drm_display_info_set_bus_formats(&connector->display_info,
+ mantix_bus_formats,
+ ARRAY_SIZE(mantix_bus_formats));
+
return 1;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 458f92a7088797..a36a4f2c76b097 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -61,7 +61,7 @@ static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
goto unlock_resv;
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT, 1);
+ TTM_BO_VM_NUM_PREFAULT);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
goto unlock_mclk;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 042c16b5d54a0d..5bc5f775abe1ef 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -699,30 +699,16 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
struct drm_gem_object *obj,
bool write)
{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
int ret;
- struct dma_fence **fences;
- unsigned int i, fence_count;
-
- if (!write) {
- struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
-
- return drm_sched_job_add_dependency(job, fence);
- }
-
- ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
- if (ret || !fence_count)
- return ret;
- for (i = 0; i < fence_count; i++) {
- ret = drm_sched_job_add_dependency(job, fences[i]);
+ dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+ ret = drm_sched_job_add_dependency(job, fence);
if (ret)
- break;
+ return ret;
}
-
- for (; i < fence_count; i++)
- dma_fence_put(fences[i]);
- kfree(fences);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d62b2013c36716..739f11c0109cbe 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -269,23 +269,15 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct dma_resv *resv = &bo->base._resv;
- struct dma_resv_list *fobj;
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
- int i;
-
- rcu_read_lock();
- fobj = dma_resv_shared_list(resv);
- fence = dma_resv_excl_fence(resv);
- if (fence && !fence->ops->signaled)
- dma_fence_enable_sw_signaling(fence);
-
- for (i = 0; fobj && i < fobj->shared_count; ++i) {
- fence = rcu_dereference(fobj->shared[i]);
+ dma_resv_iter_begin(&cursor, resv, true);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
}
- rcu_read_unlock();
+ dma_resv_iter_end(&cursor);
}
/**
@@ -627,7 +619,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
*busy = !ret;
}
- if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) {
+ if (ret && place && (bo->resource->mem_type != place->mem_type ||
+ !bo->bdev->funcs->eviction_valuable(bo, place))) {
ret = false;
if (*locked) {
dma_resv_unlock(bo->base.resv);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 33680c94127c18..08ba083a80d23b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -173,89 +173,6 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/**
- * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
- * @vmf: Fault data
- * @bo: The buffer object
- * @page_offset: Page offset from bo start
- * @fault_page_size: The size of the fault in pages.
- * @pgprot: The page protections.
- * Does additional checking whether it's possible to insert a PUD or PMD
- * pfn and performs the insertion.
- *
- * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
- * a huge fault was not possible, or on insertion error.
- */
-static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
- struct ttm_buffer_object *bo,
- pgoff_t page_offset,
- pgoff_t fault_page_size,
- pgprot_t pgprot)
-{
- pgoff_t i;
- vm_fault_t ret;
- unsigned long pfn;
- pfn_t pfnt;
- struct ttm_tt *ttm = bo->ttm;
- bool write = vmf->flags & FAULT_FLAG_WRITE;
-
- /* Fault should not cross bo boundary. */
- page_offset &= ~(fault_page_size - 1);
- if (page_offset + fault_page_size > bo->resource->num_pages)
- goto out_fallback;
-
- if (bo->resource->bus.is_iomem)
- pfn = ttm_bo_io_mem_pfn(bo, page_offset);
- else
- pfn = page_to_pfn(ttm->pages[page_offset]);
-
- /* pfn must be fault_page_size aligned. */
- if ((pfn & (fault_page_size - 1)) != 0)
- goto out_fallback;
-
- /* Check that memory is contiguous. */
- if (!bo->resource->bus.is_iomem) {
- for (i = 1; i < fault_page_size; ++i) {
- if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
- goto out_fallback;
- }
- } else if (bo->bdev->funcs->io_mem_pfn) {
- for (i = 1; i < fault_page_size; ++i) {
- if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
- goto out_fallback;
- }
- }
-
- pfnt = __pfn_to_pfn_t(pfn, PFN_DEV);
- if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT))
- ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write);
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
- else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT))
- ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write);
-#endif
- else
- WARN_ON_ONCE(ret = VM_FAULT_FALLBACK);
-
- if (ret != VM_FAULT_NOPAGE)
- goto out_fallback;
-
- return VM_FAULT_NOPAGE;
-out_fallback:
- count_vm_event(THP_FAULT_FALLBACK);
- return VM_FAULT_FALLBACK;
-}
-#else
-static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
- struct ttm_buffer_object *bo,
- pgoff_t page_offset,
- pgoff_t fault_page_size,
- pgprot_t pgprot)
-{
- return VM_FAULT_FALLBACK;
-}
-#endif
-
/**
* ttm_bo_vm_fault_reserved - TTM fault helper
* @vmf: The struct vm_fault given as argument to the fault callback
@@ -263,7 +180,6 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
* @num_prefault: Maximum number of prefault pages. The caller may want to
* specify this based on madvice settings and the size of the GPU object
* backed by the memory.
- * @fault_page_size: The size of the fault in pages.
*
* This function inserts one or more page table entries pointing to the
* memory backing the buffer object, and then returns a return code
@@ -277,8 +193,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
*/
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
- pgoff_t num_prefault,
- pgoff_t fault_page_size)
+ pgoff_t num_prefault)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
@@ -329,11 +244,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
prot = pgprot_decrypted(prot);
}
- /* We don't prefault on huge faults. Yet. */
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
- return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
- fault_page_size, prot);
-
/*
* Speculatively prefault a number of pages. Only error on
* first page.
@@ -429,7 +339,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
prot = vma->vm_page_prot;
if (drm_dev_enter(ddev, &idx)) {
- ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, prot);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 3750fd21613177..930574ad2bca9d 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -30,7 +30,7 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
int bval = (i + block * EDID_LENGTH) << 8;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x02, (0x80 | (0x02 << 5)), bval,
- 0xA1, read_buff, 2, HZ);
+ 0xA1, read_buff, 2, 1000);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
kfree(read_buff);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 6a000d77c568bb..e47ae40a865a80 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -487,8 +487,8 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
for (i = 0; i < se->in_sync_count; i++) {
struct drm_v3d_sem in;
- ret = copy_from_user(&in, handle++, sizeof(in));
- if (ret) {
+ if (copy_from_user(&in, handle++, sizeof(in))) {
+ ret = -EFAULT;
DRM_DEBUG("Failed to copy wait dep handle.\n");
goto fail_deps;
}
@@ -609,8 +609,8 @@ v3d_get_multisync_post_deps(struct drm_file *file_priv,
for (i = 0; i < count; i++) {
struct drm_v3d_sem out;
- ret = copy_from_user(&out, post_deps++, sizeof(out));
- if (ret) {
+ if (copy_from_user(&out, post_deps++, sizeof(out))) {
+ ret = -EFAULT;
DRM_DEBUG("Failed to copy post dep handles\n");
goto fail;
}
@@ -646,9 +646,8 @@ v3d_get_multisync_submit_deps(struct drm_file *file_priv,
struct v3d_submit_ext *se = data;
int ret;
- ret = copy_from_user(&multisync, ext, sizeof(multisync));
- if (ret)
- return ret;
+ if (copy_from_user(&multisync, ext, sizeof(multisync)))
+ return -EFAULT;
if (multisync.pad)
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index a6caebd4a0dd6e..5b00310ac4cd4b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -308,8 +308,10 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
- if (virtio_gpu_fb == NULL)
+ if (virtio_gpu_fb == NULL) {
+ drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);
+ }
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 749db18dcfa212..d86e1ad4a97260 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -163,10 +163,11 @@ static __poll_t virtio_gpu_poll(struct file *filp,
struct drm_file *drm_file = filp->private_data;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_device *dev = drm_file->minor->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_pending_event *e = NULL;
__poll_t mask = 0;
- if (!vfpriv->ring_idx_mask)
+ if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
return drm_poll(filp, wait);
poll_wait(filp, &drm_file->event_wait, wait);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a833751099b559..858aff99a3fe53 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1550,10 +1550,6 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size);
-#endif
/* Transparent hugepage support - vmwgfx_thp.c */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index e5a9a5cbd01a7c..922317d1acc8a0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -477,7 +477,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
else
prot = vm_get_page_prot(vma->vm_flags);
- ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@@ -486,73 +486,3 @@ out_unlock:
return ret;
}
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
- vma->vm_private_data;
- struct vmw_buffer_object *vbo =
- container_of(bo, struct vmw_buffer_object, base);
- pgprot_t prot;
- vm_fault_t ret;
- pgoff_t fault_page_size;
- bool write = vmf->flags & FAULT_FLAG_WRITE;
-
- switch (pe_size) {
- case PE_SIZE_PMD:
- fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
- break;
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
- case PE_SIZE_PUD:
- fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
- break;
-#endif
- default:
- WARN_ON_ONCE(1);
- return VM_FAULT_FALLBACK;
- }
-
- /* Always do write dirty-tracking and COW on PTE level. */
- if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags)))
- return VM_FAULT_FALLBACK;
-
- ret = ttm_bo_vm_reserve(bo, vmf);
- if (ret)
- return ret;
-
- if (vbo->dirty) {
- pgoff_t allowed_prefault;
- unsigned long page_offset;
-
- page_offset = vmf->pgoff -
- drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->resource->num_pages ||
- vmw_resources_clean(vbo, page_offset,
- page_offset + PAGE_SIZE,
- &allowed_prefault)) {
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
-
- /*
- * Write protect, so we get a new fault on write, and can
- * split.
- */
- prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
- } else {
- prot = vm_get_page_prot(vma->vm_flags);
- }
-
- ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
-
-out_unlock:
- dma_resv_unlock(bo->base.resv);
-
- return ret;
-}
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index e6b1f98ec99f09..0a4c340252ec4a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -61,9 +61,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
.fault = vmw_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- .huge_fault = vmw_bo_vm_huge_fault,
-#endif
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 882c3c8ba39991..3088c5b829f07a 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -19,6 +19,7 @@
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
+#include <linux/seq_buf.h>
#include <linux/timex.h>
#include <linux/timekeeping.h>
@@ -338,23 +339,24 @@ static void analog_calibrate_timer(struct analog_port *port)
static void analog_name(struct analog *analog)
{
- snprintf(analog->name, sizeof(analog->name), "Analog %d-axis %d-button",
+ struct seq_buf s;
+
+ seq_buf_init(&s, analog->name, sizeof(analog->name));
+ seq_buf_printf(&s, "Analog %d-axis %d-button",
hweight8(analog->mask & ANALOG_AXES_STD),
hweight8(analog->mask & ANALOG_BTNS_STD) + !!(analog->mask & ANALOG_BTNS_CHF) * 2 +
hweight16(analog->mask & ANALOG_BTNS_GAMEPAD) + !!(analog->mask & ANALOG_HBTN_CHF) * 4);
if (analog->mask & ANALOG_HATS_ALL)
- snprintf(analog->name, sizeof(analog->name), "%s %d-hat",
- analog->name, hweight16(analog->mask & ANALOG_HATS_ALL));
+ seq_buf_printf(&s, " %d-hat",
+ hweight16(analog->mask & ANALOG_HATS_ALL));
if (analog->mask & ANALOG_HAT_FCS)
- strlcat(analog->name, " FCS", sizeof(analog->name));
+ seq_buf_printf(&s, " FCS");
if (analog->mask & ANALOG_ANY_CHF)
- strlcat(analog->name, (analog->mask & ANALOG_SAITEK) ? " Saitek" : " CHF",
- sizeof(analog->name));
+ seq_buf_printf(&s, (analog->mask & ANALOG_SAITEK) ? " Saitek" : " CHF");
- strlcat(analog->name, (analog->mask & ANALOG_GAMEPAD) ? " gamepad": " joystick",
- sizeof(analog->name));
+ seq_buf_printf(&s, (analog->mask & ANALOG_GAMEPAD) ? " gamepad" : " joystick");
}
/*
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 6c554c11a7ac3f..ea58805c480fa9 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -92,7 +92,7 @@ static int iforce_usb_get_id(struct iforce *iforce, u8 id,
id,
USB_TYPE_VENDOR | USB_DIR_IN |
USB_RECIP_INTERFACE,
- 0, 0, buf, IFORCE_MAX_LENGTH, HZ);
+ 0, 0, buf, IFORCE_MAX_LENGTH, 1000);
if (status < 0) {
dev_err(&iforce_usb->intf->dev,
"usb_submit_urb failed: %d\n", status);
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index f89e9aa6d328a1..7416de84b955c4 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -83,7 +83,7 @@ static const struct tmdc_model {
const signed char *axes;
const short *buttons;
} tmdc_models[] = {
- { 1, "ThrustMaster Millenium 3D Inceptor", 6, 2, { 4, 2 }, { 4, 6 }, tmdc_abs, tmdc_btn_joy },
+ { 1, "ThrustMaster Millennium 3D Inceptor", 6, 2, { 4, 2 }, { 4, 6 }, tmdc_abs, tmdc_btn_joy },
{ 3, "ThrustMaster Rage 3D Gamepad", 2, 0, { 8, 2 }, { 0, 0 }, tmdc_abs, tmdc_btn_pad },
{ 4, "ThrustMaster Attack Throttle", 5, 2, { 4, 6 }, { 4, 2 }, tmdc_abs_at, tmdc_btn_at },
{ 8, "ThrustMaster FragMaster", 4, 0, { 8, 2 }, { 0, 0 }, tmdc_abs_fm, tmdc_btn_fm },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index e75650e98c9eff..0c607da9ee10d3 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -788,4 +788,14 @@ config KEYBOARD_MTK_PMIC
To compile this driver as a module, choose M here: the
module will be called pmic-keys.
+config KEYBOARD_CYPRESS_SF
+ tristate "Cypress StreetFighter touchkey support"
+ depends on I2C
+ help
+ Say Y here if you want to enable support for Cypress StreetFighter
+ touchkeys.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cypress-sf.
+
endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 1d689fdd5c00f9..e3c8648f834ef4 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_KEYBOARD_BCM) += bcm-keypad.o
obj-$(CONFIG_KEYBOARD_CAP11XX) += cap11xx.o
obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o
obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
+obj-$(CONFIG_KEYBOARD_CYPRESS_SF) += cypress-sf.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_DLINK_DIR685) += dlink-dir685-touchkeys.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 688e2bef682e30..7c85343cd32f90 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -91,18 +91,21 @@ struct cap11xx_hw_model {
u8 product_id;
unsigned int num_channels;
unsigned int num_leds;
+ bool no_gain;
};
enum {
CAP1106,
CAP1126,
CAP1188,
+ CAP1206,
};
static const struct cap11xx_hw_model cap11xx_devices[] = {
- [CAP1106] = { .product_id = 0x55, .num_channels = 6, .num_leds = 0 },
- [CAP1126] = { .product_id = 0x53, .num_channels = 6, .num_leds = 2 },
- [CAP1188] = { .product_id = 0x50, .num_channels = 8, .num_leds = 8 },
+ [CAP1106] = { .product_id = 0x55, .num_channels = 6, .num_leds = 0, .no_gain = false },
+ [CAP1126] = { .product_id = 0x53, .num_channels = 6, .num_leds = 2, .no_gain = false },
+ [CAP1188] = { .product_id = 0x50, .num_channels = 8, .num_leds = 8, .no_gain = false },
+ [CAP1206] = { .product_id = 0x67, .num_channels = 6, .num_leds = 0, .no_gain = true },
};
static const struct reg_default cap11xx_reg_defaults[] = {
@@ -378,17 +381,24 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
node = dev->of_node;
if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
- if (is_power_of_2(gain32) && gain32 <= 8)
+ if (cap->no_gain)
+ dev_warn(dev,
+ "This version doesn't support sensor gain\n");
+ else if (is_power_of_2(gain32) && gain32 <= 8)
gain = ilog2(gain32);
else
dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
}
- if (of_property_read_bool(node, "microchip,irq-active-high")) {
- error = regmap_update_bits(priv->regmap, CAP11XX_REG_CONFIG2,
- CAP11XX_REG_CONFIG2_ALT_POL, 0);
- if (error)
- return error;
+ if (id->driver_data != CAP1206) {
+ if (of_property_read_bool(node, "microchip,irq-active-high")) {
+ error = regmap_update_bits(priv->regmap,
+ CAP11XX_REG_CONFIG2,
+ CAP11XX_REG_CONFIG2_ALT_POL,
+ 0);
+ if (error)
+ return error;
+ }
}
/* Provide some useful defaults */
@@ -398,11 +408,14 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
of_property_read_u32_array(node, "linux,keycodes",
priv->keycodes, cap->num_channels);
- error = regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
- CAP11XX_REG_MAIN_CONTROL_GAIN_MASK,
- gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT);
- if (error)
- return error;
+ if (!cap->no_gain) {
+ error = regmap_update_bits(priv->regmap,
+ CAP11XX_REG_MAIN_CONTROL,
+ CAP11XX_REG_MAIN_CONTROL_GAIN_MASK,
+ gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT);
+ if (error)
+ return error;
+ }
/* Disable autorepeat. The Linux input system has its own handling. */
error = regmap_write(priv->regmap, CAP11XX_REG_REPEAT_RATE, 0);
@@ -470,6 +483,7 @@ static const struct of_device_id cap11xx_dt_ids[] = {
{ .compatible = "microchip,cap1106", },
{ .compatible = "microchip,cap1126", },
{ .compatible = "microchip,cap1188", },
+ { .compatible = "microchip,cap1206", },
{}
};
MODULE_DEVICE_TABLE(of, cap11xx_dt_ids);
@@ -478,6 +492,7 @@ static const struct i2c_device_id cap11xx_i2c_ids[] = {
{ "cap1106", CAP1106 },
{ "cap1126", CAP1126 },
{ "cap1188", CAP1188 },
+ { "cap1206", CAP1206 },
{}
};
MODULE_DEVICE_TABLE(i2c, cap11xx_i2c_ids);
diff --git a/drivers/input/keyboard/cypress-sf.c b/drivers/input/keyboard/cypress-sf.c
new file mode 100644
index 00000000000000..c28996028e8030
--- /dev/null
+++ b/drivers/input/keyboard/cypress-sf.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Cypress StreetFighter Touchkey Driver
+ *
+ * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#define CYPRESS_SF_DEV_NAME "cypress-sf"
+
+#define CYPRESS_SF_REG_BUTTON_STATUS 0x4a
+
+struct cypress_sf_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct regulator_bulk_data regulators[2];
+ u32 *keycodes;
+ unsigned long keystates;
+ int num_keys;
+};
+
+static irqreturn_t cypress_sf_irq_handler(int irq, void *devid)
+{
+ struct cypress_sf_data *touchkey = devid;
+ unsigned long keystates, changed;
+ bool new_state;
+ int val, key;
+
+ val = i2c_smbus_read_byte_data(touchkey->client,
+ CYPRESS_SF_REG_BUTTON_STATUS);
+ if (val < 0) {
+ dev_err(&touchkey->client->dev,
+ "Failed to read button status: %d", val);
+ return IRQ_NONE;
+ }
+ keystates = val;
+
+ bitmap_xor(&changed, &keystates, &touchkey->keystates,
+ touchkey->num_keys);
+
+ for_each_set_bit(key, &changed, touchkey->num_keys) {
+ new_state = keystates & BIT(key);
+ dev_dbg(&touchkey->client->dev,
+ "Key %d changed to %d", key, new_state);
+ input_report_key(touchkey->input_dev,
+ touchkey->keycodes[key], new_state);
+ }
+
+ input_sync(touchkey->input_dev);
+ touchkey->keystates = keystates;
+
+ return IRQ_HANDLED;
+}
+
+static int cypress_sf_probe(struct i2c_client *client)
+{
+ struct cypress_sf_data *touchkey;
+ int key, error;
+
+ touchkey = devm_kzalloc(&client->dev, sizeof(*touchkey), GFP_KERNEL);
+ if (!touchkey)
+ return -ENOMEM;
+
+ touchkey->client = client;
+ i2c_set_clientdata(client, touchkey);
+
+ touchkey->regulators[0].supply = "vdd";
+ touchkey->regulators[1].supply = "avdd";
+
+ error = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(touchkey->regulators),
+ touchkey->regulators);
+ if (error) {
+ dev_err(&client->dev, "Failed to get regulators: %d\n", error);
+ return error;
+ }
+
+ touchkey->num_keys = device_property_read_u32_array(&client->dev,
+ "linux,keycodes",
+ NULL, 0);
+ if (touchkey->num_keys < 0) {
+ /* Default key count */
+ touchkey->num_keys = 2;
+ }
+
+ touchkey->keycodes = devm_kcalloc(&client->dev,
+ touchkey->num_keys,
+ sizeof(*touchkey->keycodes),
+ GFP_KERNEL);
+ if (!touchkey->keycodes)
+ return -ENOMEM;
+
+ error = device_property_read_u32_array(&client->dev, "linux,keycodes",
+ touchkey->keycodes,
+ touchkey->num_keys);
+
+ if (error) {
+ dev_warn(&client->dev,
+ "Failed to read keycodes: %d, using defaults\n",
+ error);
+
+ /* Default keycodes */
+ touchkey->keycodes[0] = KEY_BACK;
+ touchkey->keycodes[1] = KEY_MENU;
+ }
+
+ error = regulator_bulk_enable(ARRAY_SIZE(touchkey->regulators),
+ touchkey->regulators);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable regulators: %d\n", error);
+ return error;
+ }
+
+ touchkey->input_dev = devm_input_allocate_device(&client->dev);
+ if (!touchkey->input_dev) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ touchkey->input_dev->name = CYPRESS_SF_DEV_NAME;
+ touchkey->input_dev->id.bustype = BUS_I2C;
+
+ for (key = 0; key < touchkey->num_keys; ++key)
+ input_set_capability(touchkey->input_dev,
+ EV_KEY, touchkey->keycodes[key]);
+
+ error = input_register_device(touchkey->input_dev);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, cypress_sf_irq_handler,
+ IRQF_ONESHOT,
+ CYPRESS_SF_DEV_NAME, touchkey);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register threaded irq: %d", error);
+ return error;
+ }
+
+ return 0;
+};
+
+static int __maybe_unused cypress_sf_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cypress_sf_data *touchkey = i2c_get_clientdata(client);
+ int error;
+
+ disable_irq(client->irq);
+
+ error = regulator_bulk_disable(ARRAY_SIZE(touchkey->regulators),
+ touchkey->regulators);
+ if (error) {
+ dev_err(dev, "Failed to disable regulators: %d", error);
+ enable_irq(client->irq);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused cypress_sf_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cypress_sf_data *touchkey = i2c_get_clientdata(client);
+ int error;
+
+ error = regulator_bulk_enable(ARRAY_SIZE(touchkey->regulators),
+ touchkey->regulators);
+ if (error) {
+ dev_err(dev, "Failed to enable regulators: %d", error);
+ return error;
+ }
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cypress_sf_pm_ops,
+ cypress_sf_suspend, cypress_sf_resume);
+
+static struct i2c_device_id cypress_sf_id_table[] = {
+ { CYPRESS_SF_DEV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cypress_sf_id_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id cypress_sf_of_match[] = {
+ { .compatible = "cypress,sf3155", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cypress_sf_of_match);
+#endif
+
+static struct i2c_driver cypress_sf_driver = {
+ .driver = {
+ .name = CYPRESS_SF_DEV_NAME,
+ .pm = &cypress_sf_pm_ops,
+ .of_match_table = of_match_ptr(cypress_sf_of_match),
+ },
+ .id_table = cypress_sf_id_table,
+ .probe_new = cypress_sf_probe,
+};
+module_i2c_driver(cypress_sf_driver);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("Cypress StreetFighter Touchkey Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index e0e931e796fa3a..272a4f1c6e8191 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -17,6 +17,7 @@
* flag.
*/
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -26,6 +27,7 @@
#include <linux/slab.h>
#include <linux/soc/cirrus/ep93xx.h>
#include <linux/platform_data/keypad-ep93xx.h>
+#include <linux/pm_wakeirq.h>
/*
* Keypad Interface Register offsets
@@ -35,28 +37,28 @@
#define KEY_REG 0x08 /* Key Value Capture register */
/* Key Scan Initialization Register bit defines */
-#define KEY_INIT_DBNC_MASK (0x00ff0000)
-#define KEY_INIT_DBNC_SHIFT (16)
-#define KEY_INIT_DIS3KY (1<<15)
-#define KEY_INIT_DIAG (1<<14)
-#define KEY_INIT_BACK (1<<13)
-#define KEY_INIT_T2 (1<<12)
-#define KEY_INIT_PRSCL_MASK (0x000003ff)
-#define KEY_INIT_PRSCL_SHIFT (0)
+#define KEY_INIT_DBNC_MASK GENMASK(23, 16)
+#define KEY_INIT_DBNC_SHIFT 16
+#define KEY_INIT_DIS3KY BIT(15)
+#define KEY_INIT_DIAG BIT(14)
+#define KEY_INIT_BACK BIT(13)
+#define KEY_INIT_T2 BIT(12)
+#define KEY_INIT_PRSCL_MASK GENMASK(9, 0)
+#define KEY_INIT_PRSCL_SHIFT 0
/* Key Scan Diagnostic Register bit defines */
-#define KEY_DIAG_MASK (0x0000003f)
-#define KEY_DIAG_SHIFT (0)
+#define KEY_DIAG_MASK GENMASK(5, 0)
+#define KEY_DIAG_SHIFT 0
/* Key Value Capture Register bit defines */
-#define KEY_REG_K (1<<15)
-#define KEY_REG_INT (1<<14)
-#define KEY_REG_2KEYS (1<<13)
-#define KEY_REG_1KEY (1<<12)
-#define KEY_REG_KEY2_MASK (0x00000fc0)
-#define KEY_REG_KEY2_SHIFT (6)
-#define KEY_REG_KEY1_MASK (0x0000003f)
-#define KEY_REG_KEY1_SHIFT (0)
+#define KEY_REG_K BIT(15)
+#define KEY_REG_INT BIT(14)
+#define KEY_REG_2KEYS BIT(13)
+#define KEY_REG_1KEY BIT(12)
+#define KEY_REG_KEY2_MASK GENMASK(11, 6)
+#define KEY_REG_KEY2_SHIFT 6
+#define KEY_REG_KEY1_MASK GENMASK(5, 0)
+#define KEY_REG_KEY1_SHIFT 0
#define EP93XX_MATRIX_SIZE (EP93XX_MATRIX_ROWS * EP93XX_MATRIX_COLS)
@@ -175,8 +177,7 @@ static void ep93xx_keypad_close(struct input_dev *pdev)
}
-#ifdef CONFIG_PM_SLEEP
-static int ep93xx_keypad_suspend(struct device *dev)
+static int __maybe_unused ep93xx_keypad_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
@@ -191,21 +192,15 @@ static int ep93xx_keypad_suspend(struct device *dev)
mutex_unlock(&input_dev->mutex);
- if (device_may_wakeup(&pdev->dev))
- enable_irq_wake(keypad->irq);
-
return 0;
}
-static int ep93xx_keypad_resume(struct device *dev)
+static int __maybe_unused ep93xx_keypad_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
- if (device_may_wakeup(&pdev->dev))
- disable_irq_wake(keypad->irq);
-
mutex_lock(&input_dev->mutex);
if (input_device_enabled(input_dev)) {
@@ -220,11 +215,17 @@ static int ep93xx_keypad_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops,
ep93xx_keypad_suspend, ep93xx_keypad_resume);
+static void ep93xx_keypad_release_gpio_action(void *_pdev)
+{
+ struct platform_device *pdev = _pdev;
+
+ ep93xx_keypad_release_gpio(pdev);
+}
+
static int ep93xx_keypad_probe(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad;
@@ -233,61 +234,46 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
struct resource *res;
int err;
- keypad = kzalloc(sizeof(struct ep93xx_keypad), GFP_KERNEL);
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
if (!keypad)
return -ENOMEM;
keypad->pdata = dev_get_platdata(&pdev->dev);
- if (!keypad->pdata) {
- err = -EINVAL;
- goto failed_free;
- }
+ if (!keypad->pdata)
+ return -EINVAL;
keymap_data = keypad->pdata->keymap_data;
- if (!keymap_data) {
- err = -EINVAL;
- goto failed_free;
- }
+ if (!keymap_data)
+ return -EINVAL;
keypad->irq = platform_get_irq(pdev, 0);
- if (keypad->irq < 0) {
- err = keypad->irq;
- goto failed_free;
- }
+ if (keypad->irq < 0)
+ return keypad->irq;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENXIO;
- goto failed_free;
- }
+ if (!res)
+ return -ENXIO;
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- err = -EBUSY;
- goto failed_free;
- }
-
- keypad->mmio_base = ioremap(res->start, resource_size(res));
- if (keypad->mmio_base == NULL) {
- err = -ENXIO;
- goto failed_free_mem;
- }
+ keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(keypad->mmio_base))
+ return PTR_ERR(keypad->mmio_base);
err = ep93xx_keypad_acquire_gpio(pdev);
if (err)
- goto failed_free_io;
+ return err;
- keypad->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(keypad->clk)) {
- err = PTR_ERR(keypad->clk);
- goto failed_free_gpio;
- }
+ err = devm_add_action_or_reset(&pdev->dev,
+ ep93xx_keypad_release_gpio_action, pdev);
+ if (err)
+ return err;
- input_dev = input_allocate_device();
- if (!input_dev) {
- err = -ENOMEM;
- goto failed_put_clk;
- }
+ keypad->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(keypad->clk))
+ return PTR_ERR(keypad->clk);
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
keypad->input_dev = input_dev;
@@ -295,70 +281,40 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
input_dev->id.bustype = BUS_HOST;
input_dev->open = ep93xx_keypad_open;
input_dev->close = ep93xx_keypad_close;
- input_dev->dev.parent = &pdev->dev;
err = matrix_keypad_build_keymap(keymap_data, NULL,
EP93XX_MATRIX_ROWS, EP93XX_MATRIX_COLS,
keypad->keycodes, input_dev);
if (err)
- goto failed_free_dev;
+ return err;
if (keypad->pdata->flags & EP93XX_KEYPAD_AUTOREPEAT)
__set_bit(EV_REP, input_dev->evbit);
input_set_drvdata(input_dev, keypad);
- err = request_irq(keypad->irq, ep93xx_keypad_irq_handler,
- 0, pdev->name, keypad);
+ err = devm_request_irq(&pdev->dev, keypad->irq,
+ ep93xx_keypad_irq_handler,
+ 0, pdev->name, keypad);
if (err)
- goto failed_free_dev;
+ return err;
err = input_register_device(input_dev);
if (err)
- goto failed_free_irq;
+ return err;
platform_set_drvdata(pdev, keypad);
+
device_init_wakeup(&pdev->dev, 1);
+ err = dev_pm_set_wake_irq(&pdev->dev, keypad->irq);
+ if (err)
+ dev_warn(&pdev->dev, "failed to set up wakeup irq: %d\n", err);
return 0;
-
-failed_free_irq:
- free_irq(keypad->irq, keypad);
-failed_free_dev:
- input_free_device(input_dev);
-failed_put_clk:
- clk_put(keypad->clk);
-failed_free_gpio:
- ep93xx_keypad_release_gpio(pdev);
-failed_free_io:
- iounmap(keypad->mmio_base);
-failed_free_mem:
- release_mem_region(res->start, resource_size(res));
-failed_free:
- kfree(keypad);
- return err;
}
static int ep93xx_keypad_remove(struct platform_device *pdev)
{
- struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(keypad->irq, keypad);
-
- if (keypad->enabled)
- clk_disable(keypad->clk);
- clk_put(keypad->clk);
-
- input_unregister_device(keypad->input_dev);
-
- ep93xx_keypad_release_gpio(pdev);
-
- iounmap(keypad->mmio_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- kfree(keypad);
+ dev_pm_clear_wake_irq(&pdev->dev);
return 0;
}
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 40d6e5087cdef0..230ab3d50b9e93 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -107,9 +107,9 @@ static struct regulator *mpr121_vdd_supply_init(struct device *dev)
return ERR_PTR(err);
}
- err = devm_add_action(dev, mpr121_vdd_supply_disable, vdd_supply);
+ err = devm_add_action_or_reset(dev, mpr121_vdd_supply_disable,
+ vdd_supply);
if (err) {
- regulator_disable(vdd_supply);
dev_err(dev, "failed to add disable regulator action: %d\n",
err);
return ERR_PTR(err);
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index dbe836c7ff4735..eb3a687796e721 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -190,8 +190,7 @@ static int omap_kp_probe(struct platform_device *pdev)
row_shift = get_count_order(pdata->cols);
keycodemax = pdata->rows << row_shift;
- omap_kp = kzalloc(sizeof(struct omap_kp) +
- keycodemax * sizeof(unsigned short), GFP_KERNEL);
+ omap_kp = kzalloc(struct_size(omap_kp, keymap, keycodemax), GFP_KERNEL);
input_dev = input_allocate_device();
if (!omap_kp || !input_dev) {
kfree(omap_kp);
diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c
index 6218b1c682ef66..632cd6c1c8d4d0 100644
--- a/drivers/input/keyboard/tm2-touchkey.c
+++ b/drivers/input/keyboard/tm2-touchkey.c
@@ -156,6 +156,8 @@ static irqreturn_t tm2_touchkey_irq_handler(int irq, void *devid)
goto out;
}
+ input_event(touchkey->input_dev, EV_MSC, MSC_SCAN, index);
+
if (data & TM2_TOUCHKEY_BIT_PRESS_EV) {
for (i = 0; i < touchkey->num_keycodes; i++)
input_report_key(touchkey->input_dev,
@@ -250,6 +252,11 @@ static int tm2_touchkey_probe(struct i2c_client *client,
touchkey->input_dev->name = TM2_TOUCHKEY_DEV_NAME;
touchkey->input_dev->id.bustype = BUS_I2C;
+ touchkey->input_dev->keycode = touchkey->keycodes;
+ touchkey->input_dev->keycodemax = touchkey->num_keycodes;
+ touchkey->input_dev->keycodesize = sizeof(touchkey->keycodes[0]);
+
+ input_set_capability(touchkey->input_dev, EV_MSC, MSC_SCAN);
for (i = 0; i < touchkey->num_keycodes; i++)
input_set_capability(touchkey->input_dev, EV_KEY,
touchkey->keycodes[i]);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index e64368a633466b..a3b5f88d2bd160 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -103,7 +103,9 @@ static int adxl34x_i2c_remove(struct i2c_client *client)
{
struct adxl34x *ac = i2c_get_clientdata(client);
- return adxl34x_remove(ac);
+ adxl34x_remove(ac);
+
+ return 0;
}
static int __maybe_unused adxl34x_i2c_suspend(struct device *dev)
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index df6afa455e4650..6e51c9bc619f2b 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -91,7 +91,9 @@ static int adxl34x_spi_remove(struct spi_device *spi)
{
struct adxl34x *ac = spi_get_drvdata(spi);
- return adxl34x_remove(ac);
+ adxl34x_remove(ac);
+
+ return 0;
}
static int __maybe_unused adxl34x_spi_suspend(struct device *dev)
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 4cc4e8ff42b33f..a4af314392a9f7 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -237,7 +237,7 @@ static const struct adxl34x_platform_data adxl34x_default_init = {
static void adxl34x_get_triple(struct adxl34x *ac, struct axis_triple *axis)
{
- short buf[3];
+ __le16 buf[3];
ac->bops->read_block(ac->dev, DATAX0, DATAZ1 - DATAX0 + 1, buf);
@@ -896,15 +896,13 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
}
EXPORT_SYMBOL_GPL(adxl34x_probe);
-int adxl34x_remove(struct adxl34x *ac)
+void adxl34x_remove(struct adxl34x *ac)
{
sysfs_remove_group(&ac->dev->kobj, &adxl34x_attr_group);
free_irq(ac->irq, ac);
input_unregister_device(ac->input);
dev_dbg(ac->dev, "unregistered accelerometer\n");
kfree(ac);
-
- return 0;
}
EXPORT_SYMBOL_GPL(adxl34x_remove);
diff --git a/drivers/input/misc/adxl34x.h b/drivers/input/misc/adxl34x.h
index 83a0eeccf61337..febf85270fff49 100644
--- a/drivers/input/misc/adxl34x.h
+++ b/drivers/input/misc/adxl34x.h
@@ -25,6 +25,6 @@ void adxl34x_resume(struct adxl34x *ac);
struct adxl34x *adxl34x_probe(struct device *dev, int irq,
bool fifo_delay_default,
const struct adxl34x_bus_ops *bops);
-int adxl34x_remove(struct adxl34x *ac);
+void adxl34x_remove(struct adxl34x *ac);
#endif
diff --git a/drivers/input/misc/ariel-pwrbutton.c b/drivers/input/misc/ariel-pwrbutton.c
index 17bbaac8b80c89..cdc80715b5fd68 100644
--- a/drivers/input/misc/ariel-pwrbutton.c
+++ b/drivers/input/misc/ariel-pwrbutton.c
@@ -149,12 +149,19 @@ static const struct of_device_id ariel_pwrbutton_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match);
+static const struct spi_device_id ariel_pwrbutton_spi_ids[] = {
+ { .name = "wyse-ariel-ec-input" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_spi_ids);
+
static struct spi_driver ariel_pwrbutton_driver = {
.driver = {
.name = "dell-wyse-ariel-ec-input",
.of_match_table = ariel_pwrbutton_of_match,
},
.probe = ariel_pwrbutton_probe,
+ .id_table = ariel_pwrbutton_spi_ids,
};
module_spi_driver(ariel_pwrbutton_driver);
diff --git a/drivers/input/misc/cpcap-pwrbutton.c b/drivers/input/misc/cpcap-pwrbutton.c
index 0abef63217e21e..879790bbf9fe23 100644
--- a/drivers/input/misc/cpcap-pwrbutton.c
+++ b/drivers/input/misc/cpcap-pwrbutton.c
@@ -54,9 +54,13 @@ static irqreturn_t powerbutton_irq(int irq, void *_button)
static int cpcap_power_button_probe(struct platform_device *pdev)
{
struct cpcap_power_button *button;
- int irq = platform_get_irq(pdev, 0);
+ int irq;
int err;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
button = devm_kmalloc(&pdev->dev, sizeof(*button), GFP_KERNEL);
if (!button)
return -ENOMEM;
@@ -73,7 +77,6 @@ static int cpcap_power_button_probe(struct platform_device *pdev)
button->idev->name = "cpcap-pwrbutton";
button->idev->phys = "cpcap-pwrbutton/input0";
- button->idev->dev.parent = button->dev;
input_set_capability(button->idev, EV_KEY, KEY_POWER);
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 0d09ffeafeea1f..4369d3c04d387a 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -424,5 +424,4 @@ module_platform_driver(max77693_haptic_driver);
MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("MAXIM 77693/77843 Haptic driver");
-MODULE_ALIAS("platform:max77693-haptic");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index ffab4a490c7544..4770cb55631aae 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -1,4 +1,4 @@
-/**
+/*
* MAX8925 ONKEY driver
*
* Copyright (C) 2009 Marvell International Ltd.
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
index 1e1baed6392908..f9b05cf09ff53a 100644
--- a/drivers/input/misc/palmas-pwrbutton.c
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -210,6 +210,11 @@ static int palmas_pwron_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&pwron->input_work, palmas_power_button_work);
pwron->irq = platform_get_irq(pdev, 0);
+ if (pwron->irq < 0) {
+ error = pwron->irq;
+ goto err_free_input;
+ }
+
error = request_threaded_irq(pwron->irq, NULL, pwron_irq,
IRQF_TRIGGER_HIGH |
IRQF_TRIGGER_LOW |
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 33609603245dc0..89af52498c9696 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -29,6 +29,7 @@
#define PON_PS_HOLD_RST_CTL2 0x5b
#define PON_PS_HOLD_ENABLE BIT(7)
#define PON_PS_HOLD_TYPE_MASK 0x0f
+#define PON_PS_HOLD_TYPE_WARM_RESET 1
#define PON_PS_HOLD_TYPE_SHUTDOWN 4
#define PON_PS_HOLD_TYPE_HARD_RESET 7
@@ -99,7 +100,10 @@ static int pm8941_reboot_notify(struct notifier_block *nb,
break;
case SYS_RESTART:
default:
- reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
+ if (reboot_mode == REBOOT_WARM)
+ reset_type = PON_PS_HOLD_TYPE_WARM_RESET;
+ else
+ reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
break;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 2d0bc029619ff9..956d9cd3479648 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -517,6 +517,19 @@ static void elantech_report_trackpoint(struct psmouse *psmouse,
case 0x16008020U:
case 0x26800010U:
case 0x36808000U:
+
+ /*
+ * This firmware misreport coordinates for trackpoint
+ * occasionally. Discard packets outside of [-127, 127] range
+ * to prevent cursor jumps.
+ */
+ if (packet[4] == 0x80 || packet[5] == 0x80 ||
+ packet[1] >> 7 == packet[4] >> 7 ||
+ packet[2] >> 7 == packet[5] >> 7) {
+ elantech_debug("discarding packet [%6ph]\n", packet);
+ break;
+
+ }
x = packet[4] - (int)((packet[1]^0x80) << 1);
y = (int)((packet[2]^0x80) << 1) - packet[5];
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index 24f31a5c0e04af..50a0134b6901b7 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -90,6 +90,7 @@ int rmi_register_transport_device(struct rmi_transport_dev *xport)
rmi_dev->dev.bus = &rmi_bus_type;
rmi_dev->dev.type = &rmi_device_type;
+ rmi_dev->dev.parent = xport->dev;
xport->rmi_dev = rmi_dev;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a5a0035536462a..aedd0554104435 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -273,6 +273,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Fujitsu Lifebook T725 laptop */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ },
+ },
+ {
/* Fujitsu Lifebook U745 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -841,6 +848,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
},
},
{
+ /* Fujitsu Lifebook T725 laptop */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ },
+ },
+ {
/* Fujitsu U574 laptop */
/* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index d4e74738c5a82f..2f6adfb7b938f8 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -425,6 +425,7 @@ config TOUCHSCREEN_HYCON_HY46XX
config TOUCHSCREEN_ILI210X
tristate "Ilitek ILI210X based touchscreen"
depends on I2C
+ select CRC_CCITT
help
Say Y here if you have a ILI210X based touchscreen
controller. This driver supports models ILI2102,
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7d34100f7f223f..39a8127cf6a55f 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -6,6 +6,7 @@
# Each configuration option enables a list of files.
wm97xx-ts-y := wm97xx-core.o
+goodix_ts-y := goodix.o goodix_fwupload.o
obj-$(CONFIG_TOUCHSCREEN_88PM860X) += 88pm860x-ts.o
obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
@@ -44,7 +45,7 @@ obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX_SERIAL) += egalax_ts_serial.o
obj-$(CONFIG_TOUCHSCREEN_EXC3000) += exc3000.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
-obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
+obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix_ts.o
obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_ILITEK) += ilitek_ts_i2c.o
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index f113a27aeb1ef1..a25a77dd9a32dc 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -101,10 +101,6 @@ struct ads7846 {
struct spi_device *spi;
struct regulator *reg;
-#if IS_ENABLED(CONFIG_HWMON)
- struct device *hwmon;
-#endif
-
u16 model;
u16 vref_mv;
u16 vref_delay_usecs;
@@ -142,13 +138,18 @@ struct ads7846 {
int (*filter)(void *data, int data_idx, int *val);
void *filter_data;
- void (*filter_cleanup)(void *data);
int (*get_pendown_state)(void);
int gpio_pendown;
void (*wait_for_sync)(void);
};
+enum ads7846_filter {
+ ADS7846_FILTER_OK,
+ ADS7846_FILTER_REPEAT,
+ ADS7846_FILTER_IGNORE,
+};
+
/* leave chip selected when we're done, for quicker re-select? */
#if 0
#define CS_CHANGE(xfer) ((xfer).cs_change = 1)
@@ -549,6 +550,8 @@ __ATTRIBUTE_GROUPS(ads7846_attr);
static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts)
{
+ struct device *hwmon;
+
/* hwmon sensors need a reference voltage */
switch (ts->model) {
case 7846:
@@ -569,17 +572,11 @@ static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts)
break;
}
- ts->hwmon = hwmon_device_register_with_groups(&spi->dev, spi->modalias,
- ts, ads7846_attr_groups);
+ hwmon = devm_hwmon_device_register_with_groups(&spi->dev,
+ spi->modalias, ts,
+ ads7846_attr_groups);
- return PTR_ERR_OR_ZERO(ts->hwmon);
-}
-
-static void ads784x_hwmon_unregister(struct spi_device *spi,
- struct ads7846 *ts)
-{
- if (ts->hwmon)
- hwmon_device_unregister(ts->hwmon);
+ return PTR_ERR_OR_ZERO(hwmon);
}
#else
@@ -588,11 +585,6 @@ static inline int ads784x_hwmon_register(struct spi_device *spi,
{
return 0;
}
-
-static inline void ads784x_hwmon_unregister(struct spi_device *spi,
- struct ads7846 *ts)
-{
-}
#endif
static ssize_t ads7846_pen_down_show(struct device *dev,
@@ -1014,8 +1006,8 @@ static int ads7846_setup_pendown(struct spi_device *spi,
ts->get_pendown_state = pdata->get_pendown_state;
} else if (gpio_is_valid(pdata->gpio_pendown)) {
- err = gpio_request_one(pdata->gpio_pendown, GPIOF_IN,
- "ads7846_pendown");
+ err = devm_gpio_request_one(&spi->dev, pdata->gpio_pendown,
+ GPIOF_IN, "ads7846_pendown");
if (err) {
dev_err(&spi->dev,
"failed to request/setup pendown GPIO%d: %d\n",
@@ -1212,24 +1204,30 @@ static const struct ads7846_platform_data *ads7846_probe_dt(struct device *dev)
}
#endif
+static void ads7846_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
static int ads7846_probe(struct spi_device *spi)
{
const struct ads7846_platform_data *pdata;
struct ads7846 *ts;
+ struct device *dev = &spi->dev;
struct ads7846_packet *packet;
struct input_dev *input_dev;
unsigned long irq_flags;
int err;
if (!spi->irq) {
- dev_dbg(&spi->dev, "no IRQ?\n");
+ dev_dbg(dev, "no IRQ?\n");
return -EINVAL;
}
/* don't exceed max specified sample rate */
if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) {
- dev_err(&spi->dev, "f(sample) %d KHz?\n",
- (spi->max_speed_hz/SAMPLE_BITS)/1000);
+ dev_err(dev, "f(sample) %d KHz?\n",
+ (spi->max_speed_hz/SAMPLE_BITS)/1000);
return -EINVAL;
}
@@ -1245,13 +1243,17 @@ static int ads7846_probe(struct spi_device *spi)
if (err < 0)
return err;
- ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL);
- packet = kzalloc(sizeof(struct ads7846_packet), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !packet || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
- }
+ ts = devm_kzalloc(dev, sizeof(struct ads7846), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ packet = devm_kzalloc(dev, sizeof(struct ads7846_packet), GFP_KERNEL);
+ if (!packet)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(dev);
+ if (!input_dev)
+ return -ENOMEM;
spi_set_drvdata(spi, ts);
@@ -1262,13 +1264,11 @@ static int ads7846_probe(struct spi_device *spi)
mutex_init(&ts->lock);
init_waitqueue_head(&ts->wait);
- pdata = dev_get_platdata(&spi->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- pdata = ads7846_probe_dt(&spi->dev);
- if (IS_ERR(pdata)) {
- err = PTR_ERR(pdata);
- goto err_free_mem;
- }
+ pdata = ads7846_probe_dt(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
}
ts->model = pdata->model ? : 7846;
@@ -1276,15 +1276,7 @@ static int ads7846_probe(struct spi_device *spi)
ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
ts->vref_mv = pdata->vref_mv;
- if (pdata->filter != NULL) {
- if (pdata->filter_init != NULL) {
- err = pdata->filter_init(pdata, &ts->filter_data);
- if (err < 0)
- goto err_free_mem;
- }
- ts->filter = pdata->filter;
- ts->filter_cleanup = pdata->filter_cleanup;
- } else if (pdata->debounce_max) {
+ if (pdata->debounce_max) {
ts->debounce_max = pdata->debounce_max;
if (ts->debounce_max < 2)
ts->debounce_max = 2;
@@ -1298,7 +1290,7 @@ static int ads7846_probe(struct spi_device *spi)
err = ads7846_setup_pendown(spi, ts, pdata);
if (err)
- goto err_cleanup_filter;
+ return err;
if (pdata->penirq_recheck_delay_usecs)
ts->penirq_recheck_delay_usecs =
@@ -1306,15 +1298,16 @@ static int ads7846_probe(struct spi_device *spi)
ts->wait_for_sync = pdata->wait_for_sync ? : null_wait_for_sync;
- snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev));
+ snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));
snprintf(ts->name, sizeof(ts->name), "ADS%d Touchscreen", ts->model);
input_dev->name = ts->name;
input_dev->phys = ts->phys;
- input_dev->dev.parent = &spi->dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_dev->id.bustype = BUS_SPI;
+ input_dev->id.product = pdata->model;
+
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X,
pdata->x_min ? : 0,
pdata->x_max ? : MAX_12BIT,
@@ -1345,125 +1338,84 @@ static int ads7846_probe(struct spi_device *spi)
ads7846_setup_spi_msg(ts, pdata);
- ts->reg = regulator_get(&spi->dev, "vcc");
+ ts->reg = devm_regulator_get(dev, "vcc");
if (IS_ERR(ts->reg)) {
err = PTR_ERR(ts->reg);
- dev_err(&spi->dev, "unable to get regulator: %d\n", err);
- goto err_free_gpio;
+ dev_err(dev, "unable to get regulator: %d\n", err);
+ return err;
}
err = regulator_enable(ts->reg);
if (err) {
- dev_err(&spi->dev, "unable to enable regulator: %d\n", err);
- goto err_put_regulator;
+ dev_err(dev, "unable to enable regulator: %d\n", err);
+ return err;
}
+ err = devm_add_action_or_reset(dev, ads7846_regulator_disable, ts->reg);
+ if (err)
+ return err;
+
irq_flags = pdata->irq_flags ? : IRQF_TRIGGER_FALLING;
irq_flags |= IRQF_ONESHOT;
- err = request_threaded_irq(spi->irq, ads7846_hard_irq, ads7846_irq,
- irq_flags, spi->dev.driver->name, ts);
- if (err && !pdata->irq_flags) {
- dev_info(&spi->dev,
+ err = devm_request_threaded_irq(dev, spi->irq,
+ ads7846_hard_irq, ads7846_irq,
+ irq_flags, dev->driver->name, ts);
+ if (err && err != -EPROBE_DEFER && !pdata->irq_flags) {
+ dev_info(dev,
"trying pin change workaround on irq %d\n", spi->irq);
irq_flags |= IRQF_TRIGGER_RISING;
- err = request_threaded_irq(spi->irq,
- ads7846_hard_irq, ads7846_irq,
- irq_flags, spi->dev.driver->name, ts);
+ err = devm_request_threaded_irq(dev, spi->irq,
+ ads7846_hard_irq, ads7846_irq,
+ irq_flags, dev->driver->name,
+ ts);
}
if (err) {
- dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq);
- goto err_disable_regulator;
+ dev_dbg(dev, "irq %d busy?\n", spi->irq);
+ return err;
}
err = ads784x_hwmon_register(spi, ts);
if (err)
- goto err_free_irq;
+ return err;
- dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq);
+ dev_info(dev, "touchscreen, irq %d\n", spi->irq);
/*
* Take a first sample, leaving nPENIRQ active and vREF off; avoid
* the touchscreen, in case it's not connected.
*/
if (ts->model == 7845)
- ads7845_read12_ser(&spi->dev, PWRDOWN);
+ ads7845_read12_ser(dev, PWRDOWN);
else
- (void) ads7846_read12_ser(&spi->dev, READ_12BIT_SER(vaux));
+ (void) ads7846_read12_ser(dev, READ_12BIT_SER(vaux));
- err = sysfs_create_group(&spi->dev.kobj, &ads784x_attr_group);
+ err = devm_device_add_group(dev, &ads784x_attr_group);
if (err)
- goto err_remove_hwmon;
+ return err;
err = input_register_device(input_dev);
if (err)
- goto err_remove_attr_group;
+ return err;
- device_init_wakeup(&spi->dev, pdata->wakeup);
+ device_init_wakeup(dev, pdata->wakeup);
/*
* If device does not carry platform data we must have allocated it
* when parsing DT data.
*/
- if (!dev_get_platdata(&spi->dev))
- devm_kfree(&spi->dev, (void *)pdata);
+ if (!dev_get_platdata(dev))
+ devm_kfree(dev, (void *)pdata);
return 0;
-
- err_remove_attr_group:
- sysfs_remove_group(&spi->dev.kobj, &ads784x_attr_group);
- err_remove_hwmon:
- ads784x_hwmon_unregister(spi, ts);
- err_free_irq:
- free_irq(spi->irq, ts);
- err_disable_regulator:
- regulator_disable(ts->reg);
- err_put_regulator:
- regulator_put(ts->reg);
- err_free_gpio:
- if (!ts->get_pendown_state)
- gpio_free(ts->gpio_pendown);
- err_cleanup_filter:
- if (ts->filter_cleanup)
- ts->filter_cleanup(ts->filter_data);
- err_free_mem:
- input_free_device(input_dev);
- kfree(packet);
- kfree(ts);
- return err;
}
static int ads7846_remove(struct spi_device *spi)
{
struct ads7846 *ts = spi_get_drvdata(spi);
- sysfs_remove_group(&spi->dev.kobj, &ads784x_attr_group);
-
- ads7846_disable(ts);
- free_irq(ts->spi->irq, ts);
-
- input_unregister_device(ts->input);
-
- ads784x_hwmon_unregister(spi, ts);
-
- regulator_put(ts->reg);
-
- if (!ts->get_pendown_state) {
- /*
- * If we are not using specialized pendown method we must
- * have been relying on gpio we set up ourselves.
- */
- gpio_free(ts->gpio_pendown);
- }
-
- if (ts->filter_cleanup)
- ts->filter_cleanup(ts->filter_data);
-
- kfree(ts->packet);
- kfree(ts);
-
- dev_dbg(&spi->dev, "unregistered touchscreen\n");
+ ads7846_stop(ts);
return 0;
}
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 68f542bb809f4a..7e13a66a8a95c8 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1439,11 +1439,11 @@ static int elants_i2c_probe(struct i2c_client *client)
if (error)
return error;
- error = devm_add_action(&client->dev, elants_i2c_power_off, ts);
+ error = devm_add_action_or_reset(&client->dev,
+ elants_i2c_power_off, ts);
if (error) {
dev_err(&client->dev,
"failed to install power off action: %d\n", error);
- elants_i2c_power_off(ts);
return error;
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 4f53d3c57e6980..b5cc9178819530 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -14,20 +14,15 @@
#include <linux/kernel.h>
#include <linux/dmi.h>
#include <linux/firmware.h>
-#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
-#include <linux/input.h>
-#include <linux/input/mt.h>
-#include <linux/input/touchscreen.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/of.h>
#include <asm/unaligned.h>
+#include "goodix.h"
#define GOODIX_GPIO_INT_NAME "irq"
#define GOODIX_GPIO_RST_NAME "reset"
@@ -38,22 +33,11 @@
#define GOODIX_CONTACT_SIZE 8
#define GOODIX_MAX_CONTACT_SIZE 9
#define GOODIX_MAX_CONTACTS 10
-#define GOODIX_MAX_KEYS 7
#define GOODIX_CONFIG_MIN_LENGTH 186
#define GOODIX_CONFIG_911_LENGTH 186
#define GOODIX_CONFIG_967_LENGTH 228
#define GOODIX_CONFIG_GT9X_LENGTH 240
-#define GOODIX_CONFIG_MAX_LENGTH 240
-
-/* Register defines */
-#define GOODIX_REG_COMMAND 0x8040
-#define GOODIX_CMD_SCREEN_OFF 0x05
-
-#define GOODIX_READ_COOR_ADDR 0x814E
-#define GOODIX_GT1X_REG_CONFIG_DATA 0x8050
-#define GOODIX_GT9X_REG_CONFIG_DATA 0x8047
-#define GOODIX_REG_ID 0x8140
#define GOODIX_BUFFER_STATUS_READY BIT(7)
#define GOODIX_HAVE_KEY BIT(4)
@@ -68,55 +52,11 @@
#define ACPI_GPIO_SUPPORT
#endif
-struct goodix_ts_data;
-
-enum goodix_irq_pin_access_method {
- IRQ_PIN_ACCESS_NONE,
- IRQ_PIN_ACCESS_GPIO,
- IRQ_PIN_ACCESS_ACPI_GPIO,
- IRQ_PIN_ACCESS_ACPI_METHOD,
-};
-
-struct goodix_chip_data {
- u16 config_addr;
- int config_len;
- int (*check_config)(struct goodix_ts_data *ts, const u8 *cfg, int len);
- void (*calc_config_checksum)(struct goodix_ts_data *ts);
-};
-
struct goodix_chip_id {
const char *id;
const struct goodix_chip_data *data;
};
-#define GOODIX_ID_MAX_LEN 4
-
-struct goodix_ts_data {
- struct i2c_client *client;
- struct input_dev *input_dev;
- const struct goodix_chip_data *chip;
- struct touchscreen_properties prop;
- unsigned int max_touch_num;
- unsigned int int_trigger_type;
- struct regulator *avdd28;
- struct regulator *vddio;
- struct gpio_desc *gpiod_int;
- struct gpio_desc *gpiod_rst;
- int gpio_count;
- int gpio_int_idx;
- char id[GOODIX_ID_MAX_LEN + 1];
- u16 version;
- const char *cfg_name;
- bool reset_controller_at_probe;
- bool load_cfg_from_disk;
- struct completion firmware_loading_complete;
- unsigned long irq_flags;
- enum goodix_irq_pin_access_method irq_pin_access_method;
- unsigned int contact_size;
- u8 config[GOODIX_CONFIG_MAX_LENGTH];
- unsigned short keymap[GOODIX_MAX_KEYS];
-};
-
static int goodix_check_cfg_8(struct goodix_ts_data *ts,
const u8 *cfg, int len);
static int goodix_check_cfg_16(struct goodix_ts_data *ts,
@@ -215,8 +155,7 @@ static const struct dmi_system_id inverted_x_screen[] = {
* @buf: raw write data buffer.
* @len: length of the buffer to write
*/
-static int goodix_i2c_read(struct i2c_client *client,
- u16 reg, u8 *buf, int len)
+int goodix_i2c_read(struct i2c_client *client, u16 reg, u8 *buf, int len)
{
struct i2c_msg msgs[2];
__be16 wbuf = cpu_to_be16(reg);
@@ -233,7 +172,13 @@ static int goodix_i2c_read(struct i2c_client *client,
msgs[1].buf = buf;
ret = i2c_transfer(client->adapter, msgs, 2);
- return ret < 0 ? ret : (ret != ARRAY_SIZE(msgs) ? -EIO : 0);
+ if (ret >= 0)
+ ret = (ret == ARRAY_SIZE(msgs) ? 0 : -EIO);
+
+ if (ret)
+ dev_err(&client->dev, "Error reading %d bytes from 0x%04x: %d\n",
+ len, reg, ret);
+ return ret;
}
/**
@@ -244,8 +189,7 @@ static int goodix_i2c_read(struct i2c_client *client,
* @buf: raw data buffer to write.
* @len: length of the buffer to write
*/
-static int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf,
- unsigned len)
+int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf, int len)
{
u8 *addr_buf;
struct i2c_msg msg;
@@ -265,11 +209,18 @@ static int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf,
msg.len = len + 2;
ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret >= 0)
+ ret = (ret == 1 ? 0 : -EIO);
+
kfree(addr_buf);
- return ret < 0 ? ret : (ret != 1 ? -EIO : 0);
+
+ if (ret)
+ dev_err(&client->dev, "Error writing %d bytes to 0x%04x: %d\n",
+ len, reg, ret);
+ return ret;
}
-static int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value)
+int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value)
{
return goodix_i2c_write(client, reg, &value, sizeof(value));
}
@@ -308,11 +259,8 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
do {
error = goodix_i2c_read(ts->client, addr, data,
header_contact_keycode_size);
- if (error) {
- dev_err(&ts->client->dev, "I2C transfer error: %d\n",
- error);
+ if (error)
return error;
- }
if (data[0] & GOODIX_BUFFER_STATUS_READY) {
touch_num = data[0] & 0x0f;
@@ -333,6 +281,11 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return touch_num;
}
+ if (data[0] == 0 && ts->firmware_name) {
+ if (goodix_handle_fw_request(ts))
+ return 0;
+ }
+
usleep_range(1000, 2000); /* Poll every 1 - 2 ms */
} while (time_before(jiffies, max_timeout));
@@ -435,9 +388,7 @@ static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
struct goodix_ts_data *ts = dev_id;
goodix_process_events(ts);
-
- if (goodix_i2c_write_u8(ts->client, GOODIX_READ_COOR_ADDR, 0) < 0)
- dev_err(&ts->client->dev, "I2C write end_cmd error\n");
+ goodix_i2c_write_u8(ts->client, GOODIX_READ_COOR_ADDR, 0);
return IRQ_HANDLED;
}
@@ -553,7 +504,7 @@ static int goodix_check_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len)
* @cfg: config firmware to write to device
* @len: config data length
*/
-static int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len)
+int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len)
{
int error;
@@ -562,11 +513,9 @@ static int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len)
return error;
error = goodix_i2c_write(ts->client, ts->chip->config_addr, cfg, len);
- if (error) {
- dev_err(&ts->client->dev, "Failed to write config data: %d",
- error);
+ if (error)
return error;
- }
+
dev_dbg(&ts->client->dev, "Config sent successfully.");
/* Let the firmware reconfigure itself, so sleep for 10ms */
@@ -651,62 +600,82 @@ static int goodix_irq_direction_input(struct goodix_ts_data *ts)
return -EINVAL; /* Never reached */
}
-static int goodix_int_sync(struct goodix_ts_data *ts)
+int goodix_int_sync(struct goodix_ts_data *ts)
{
int error;
error = goodix_irq_direction_output(ts, 0);
if (error)
- return error;
+ goto error;
msleep(50); /* T5: 50ms */
error = goodix_irq_direction_input(ts);
if (error)
- return error;
+ goto error;
return 0;
+
+error:
+ dev_err(&ts->client->dev, "Controller irq sync failed.\n");
+ return error;
}
/**
- * goodix_reset - Reset device during power on
+ * goodix_reset_no_int_sync - Reset device, leaving interrupt line in output mode
*
* @ts: goodix_ts_data pointer
*/
-static int goodix_reset(struct goodix_ts_data *ts)
+int goodix_reset_no_int_sync(struct goodix_ts_data *ts)
{
int error;
/* begin select I2C slave addr */
error = gpiod_direction_output(ts->gpiod_rst, 0);
if (error)
- return error;
+ goto error;
msleep(20); /* T2: > 10ms */
/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
error = goodix_irq_direction_output(ts, ts->client->addr == 0x14);
if (error)
- return error;
+ goto error;
usleep_range(100, 2000); /* T3: > 100us */
error = gpiod_direction_output(ts->gpiod_rst, 1);
if (error)
- return error;
+ goto error;
usleep_range(6000, 10000); /* T4: > 5ms */
/* end select I2C slave addr */
error = gpiod_direction_input(ts->gpiod_rst);
if (error)
- return error;
+ goto error;
- error = goodix_int_sync(ts);
+ return 0;
+
+error:
+ dev_err(&ts->client->dev, "Controller reset failed.\n");
+ return error;
+}
+
+/**
+ * goodix_reset - Reset device during power on
+ *
+ * @ts: goodix_ts_data pointer
+ */
+static int goodix_reset(struct goodix_ts_data *ts)
+{
+ int error;
+
+ error = goodix_reset_no_int_sync(ts);
if (error)
return error;
- return 0;
+ return goodix_int_sync(ts);
}
#ifdef ACPI_GPIO_SUPPORT
@@ -931,14 +900,19 @@ static void goodix_read_config(struct goodix_ts_data *ts)
int x_max, y_max;
int error;
- error = goodix_i2c_read(ts->client, ts->chip->config_addr,
- ts->config, ts->chip->config_len);
- if (error) {
- dev_warn(&ts->client->dev, "Error reading config: %d\n",
- error);
- ts->int_trigger_type = GOODIX_INT_TRIGGER;
- ts->max_touch_num = GOODIX_MAX_CONTACTS;
- return;
+ /*
+ * On controllers where we need to upload the firmware
+ * (controllers without flash) ts->config already has the config
+ * at this point and the controller itself does not have it yet!
+ */
+ if (!ts->firmware_name) {
+ error = goodix_i2c_read(ts->client, ts->chip->config_addr,
+ ts->config, ts->chip->config_len);
+ if (error) {
+ ts->int_trigger_type = GOODIX_INT_TRIGGER;
+ ts->max_touch_num = GOODIX_MAX_CONTACTS;
+ return;
+ }
}
ts->int_trigger_type = ts->config[TRIGGER_LOC] & 0x03;
@@ -966,10 +940,8 @@ static int goodix_read_version(struct goodix_ts_data *ts)
char id_str[GOODIX_ID_MAX_LEN + 1];
error = goodix_i2c_read(ts->client, GOODIX_REG_ID, buf, sizeof(buf));
- if (error) {
- dev_err(&ts->client->dev, "read version failed: %d\n", error);
+ if (error)
return error;
- }
memcpy(id_str, buf, GOODIX_ID_MAX_LEN);
id_str[GOODIX_ID_MAX_LEN] = 0;
@@ -995,13 +967,10 @@ static int goodix_i2c_test(struct i2c_client *client)
u8 test;
while (retry++ < 2) {
- error = goodix_i2c_read(client, GOODIX_REG_ID,
- &test, 1);
+ error = goodix_i2c_read(client, GOODIX_REG_ID, &test, 1);
if (!error)
return 0;
- dev_err(&client->dev, "i2c test failed attempt %d: %d\n",
- retry, error);
msleep(20);
}
@@ -1130,7 +1099,16 @@ static void goodix_config_cb(const struct firmware *cfg, void *ctx)
struct goodix_ts_data *ts = ctx;
int error;
- if (cfg) {
+ if (ts->firmware_name) {
+ if (!cfg)
+ goto err_release_cfg;
+
+ error = goodix_check_cfg(ts, cfg->data, cfg->size);
+ if (error)
+ goto err_release_cfg;
+
+ memcpy(ts->config, cfg->data, cfg->size);
+ } else if (cfg) {
/* send device configuration to the firmware */
error = goodix_send_cfg(ts, cfg->data, cfg->size);
if (error)
@@ -1156,6 +1134,7 @@ static int goodix_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct goodix_ts_data *ts;
+ const char *cfg_name;
int error;
dev_dbg(&client->dev, "I2C Address: 0x%02x\n", client->addr);
@@ -1205,10 +1184,8 @@ reset:
if (ts->reset_controller_at_probe) {
/* reset the controller */
error = goodix_reset(ts);
- if (error) {
- dev_err(&client->dev, "Controller reset failed.\n");
+ if (error)
return error;
- }
}
error = goodix_i2c_test(client);
@@ -1223,20 +1200,27 @@ reset:
return error;
}
+ error = goodix_firmware_check(ts);
+ if (error)
+ return error;
+
error = goodix_read_version(ts);
- if (error) {
- dev_err(&client->dev, "Read version failed.\n");
+ if (error)
return error;
- }
ts->chip = goodix_get_chip_data(ts->id);
if (ts->load_cfg_from_disk) {
/* update device config */
- ts->cfg_name = devm_kasprintf(&client->dev, GFP_KERNEL,
- "goodix_%s_cfg.bin", ts->id);
- if (!ts->cfg_name)
- return -ENOMEM;
+ error = device_property_read_string(&client->dev,
+ "goodix,config-name",
+ &cfg_name);
+ if (!error)
+ snprintf(ts->cfg_name, sizeof(ts->cfg_name),
+ "goodix/%s", cfg_name);
+ else
+ snprintf(ts->cfg_name, sizeof(ts->cfg_name),
+ "goodix_%s_cfg.bin", ts->id);
error = request_firmware_nowait(THIS_MODULE, true, ts->cfg_name,
&client->dev, GFP_KERNEL, ts,
@@ -1286,6 +1270,9 @@ static int __maybe_unused goodix_suspend(struct device *dev)
/* Free IRQ as IRQ pin is used as output in the suspend sequence */
goodix_free_irq(ts);
+ /* Save reference (calibration) info if necessary */
+ goodix_save_bak_ref(ts);
+
/* Output LOW on the INT pin for 5 ms */
error = goodix_irq_direction_output(ts, 0);
if (error) {
@@ -1298,7 +1285,6 @@ static int __maybe_unused goodix_suspend(struct device *dev)
error = goodix_i2c_write_u8(ts->client, GOODIX_REG_COMMAND,
GOODIX_CMD_SCREEN_OFF);
if (error) {
- dev_err(&ts->client->dev, "Screen off command failed\n");
goodix_irq_direction_input(ts);
goodix_request_irq(ts);
return -EAGAIN;
@@ -1341,19 +1327,14 @@ static int __maybe_unused goodix_resume(struct device *dev)
error = goodix_i2c_read(ts->client, ts->chip->config_addr,
&config_ver, 1);
- if (error)
- dev_warn(dev, "Error reading config version: %d, resetting controller\n",
- error);
- else if (config_ver != ts->config[0])
+ if (!error && config_ver != ts->config[0])
dev_info(dev, "Config version mismatch %d != %d, resetting controller\n",
config_ver, ts->config[0]);
if (error != 0 || config_ver != ts->config[0]) {
error = goodix_reset(ts);
- if (error) {
- dev_err(dev, "Controller reset failed.\n");
+ if (error)
return error;
- }
error = goodix_send_cfg(ts, ts->config, ts->chip->config_len);
if (error)
diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h
new file mode 100644
index 00000000000000..62138f930d1aaa
--- /dev/null
+++ b/drivers/input/touchscreen/goodix.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __GOODIX_H__
+#define __GOODIX_H__
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/regulator/consumer.h>
+
+/* Register defines */
+#define GOODIX_REG_MISCTL_DSP_CTL 0x4010
+#define GOODIX_REG_MISCTL_SRAM_BANK 0x4048
+#define GOODIX_REG_MISCTL_MEM_CD_EN 0x4049
+#define GOODIX_REG_MISCTL_CACHE_EN 0x404B
+#define GOODIX_REG_MISCTL_TMR0_EN 0x40B0
+#define GOODIX_REG_MISCTL_SWRST 0x4180
+#define GOODIX_REG_MISCTL_CPU_SWRST_PULSE 0x4184
+#define GOODIX_REG_MISCTL_BOOTCTL 0x4190
+#define GOODIX_REG_MISCTL_BOOT_OPT 0x4218
+#define GOODIX_REG_MISCTL_BOOT_CTL 0x5094
+
+#define GOODIX_REG_FW_SIG 0x8000
+#define GOODIX_FW_SIG_LEN 10
+
+#define GOODIX_REG_MAIN_CLK 0x8020
+#define GOODIX_MAIN_CLK_LEN 6
+
+#define GOODIX_REG_COMMAND 0x8040
+#define GOODIX_CMD_SCREEN_OFF 0x05
+
+#define GOODIX_REG_SW_WDT 0x8041
+
+#define GOODIX_REG_REQUEST 0x8043
+#define GOODIX_RQST_RESPONDED 0x00
+#define GOODIX_RQST_CONFIG 0x01
+#define GOODIX_RQST_BAK_REF 0x02
+#define GOODIX_RQST_RESET 0x03
+#define GOODIX_RQST_MAIN_CLOCK 0x04
+/*
+ * Unknown request which gets send by the controller aprox.
+ * every 34 seconds once it is up and running.
+ */
+#define GOODIX_RQST_UNKNOWN 0x06
+#define GOODIX_RQST_IDLE 0xFF
+
+#define GOODIX_REG_STATUS 0x8044
+
+#define GOODIX_GT1X_REG_CONFIG_DATA 0x8050
+#define GOODIX_GT9X_REG_CONFIG_DATA 0x8047
+#define GOODIX_REG_ID 0x8140
+#define GOODIX_READ_COOR_ADDR 0x814E
+#define GOODIX_REG_BAK_REF 0x99D0
+
+#define GOODIX_ID_MAX_LEN 4
+#define GOODIX_CONFIG_MAX_LENGTH 240
+#define GOODIX_MAX_KEYS 7
+
+enum goodix_irq_pin_access_method {
+ IRQ_PIN_ACCESS_NONE,
+ IRQ_PIN_ACCESS_GPIO,
+ IRQ_PIN_ACCESS_ACPI_GPIO,
+ IRQ_PIN_ACCESS_ACPI_METHOD,
+};
+
+struct goodix_ts_data;
+
+struct goodix_chip_data {
+ u16 config_addr;
+ int config_len;
+ int (*check_config)(struct goodix_ts_data *ts, const u8 *cfg, int len);
+ void (*calc_config_checksum)(struct goodix_ts_data *ts);
+};
+
+struct goodix_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ const struct goodix_chip_data *chip;
+ const char *firmware_name;
+ struct touchscreen_properties prop;
+ unsigned int max_touch_num;
+ unsigned int int_trigger_type;
+ struct regulator *avdd28;
+ struct regulator *vddio;
+ struct gpio_desc *gpiod_int;
+ struct gpio_desc *gpiod_rst;
+ int gpio_count;
+ int gpio_int_idx;
+ char id[GOODIX_ID_MAX_LEN + 1];
+ char cfg_name[64];
+ u16 version;
+ bool reset_controller_at_probe;
+ bool load_cfg_from_disk;
+ struct completion firmware_loading_complete;
+ unsigned long irq_flags;
+ enum goodix_irq_pin_access_method irq_pin_access_method;
+ unsigned int contact_size;
+ u8 config[GOODIX_CONFIG_MAX_LENGTH];
+ unsigned short keymap[GOODIX_MAX_KEYS];
+ u8 main_clk[GOODIX_MAIN_CLK_LEN];
+ int bak_ref_len;
+ u8 *bak_ref;
+};
+
+int goodix_i2c_read(struct i2c_client *client, u16 reg, u8 *buf, int len);
+int goodix_i2c_write(struct i2c_client *client, u16 reg, const u8 *buf, int len);
+int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value);
+int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len);
+int goodix_int_sync(struct goodix_ts_data *ts);
+int goodix_reset_no_int_sync(struct goodix_ts_data *ts);
+
+int goodix_firmware_check(struct goodix_ts_data *ts);
+bool goodix_handle_fw_request(struct goodix_ts_data *ts);
+void goodix_save_bak_ref(struct goodix_ts_data *ts);
+
+#endif
diff --git a/drivers/input/touchscreen/goodix_fwupload.c b/drivers/input/touchscreen/goodix_fwupload.c
new file mode 100644
index 00000000000000..c1e7a241307822
--- /dev/null
+++ b/drivers/input/touchscreen/goodix_fwupload.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Goodix Touchscreen firmware upload support
+ *
+ * Copyright (c) 2021 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This is a rewrite of gt9xx_update.c from the Allwinner H3 BSP which is:
+ * Copyright (c) 2010 - 2012 Goodix Technology.
+ * Author: andrew@goodix.com
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include "goodix.h"
+
+#define GOODIX_FW_HEADER_LENGTH sizeof(struct goodix_fw_header)
+#define GOODIX_FW_SECTION_LENGTH 0x2000
+#define GOODIX_FW_DSP_LENGTH 0x1000
+#define GOODIX_FW_UPLOAD_ADDRESS 0xc000
+
+#define GOODIX_CFG_LOC_HAVE_KEY 7
+#define GOODIX_CFG_LOC_DRVA_NUM 27
+#define GOODIX_CFG_LOC_DRVB_NUM 28
+#define GOODIX_CFG_LOC_SENS_NUM 29
+
+struct goodix_fw_header {
+ u8 hw_info[4];
+ u8 pid[8];
+ u8 vid[2];
+} __packed;
+
+static u16 goodix_firmware_checksum(const u8 *data, int size)
+{
+ u16 checksum = 0;
+ int i;
+
+ for (i = 0; i < size; i += 2)
+ checksum += (data[i] << 8) + data[i + 1];
+
+ return checksum;
+}
+
+static int goodix_firmware_verify(struct device *dev, const struct firmware *fw)
+{
+ const struct goodix_fw_header *fw_header;
+ size_t expected_size;
+ const u8 *data;
+ u16 checksum;
+ char buf[9];
+
+ expected_size = GOODIX_FW_HEADER_LENGTH + 4 * GOODIX_FW_SECTION_LENGTH +
+ GOODIX_FW_DSP_LENGTH;
+ if (fw->size != expected_size) {
+ dev_err(dev, "Firmware has wrong size, expected %zu got %zu\n",
+ expected_size, fw->size);
+ return -EINVAL;
+ }
+
+ data = fw->data + GOODIX_FW_HEADER_LENGTH;
+ checksum = goodix_firmware_checksum(data, 4 * GOODIX_FW_SECTION_LENGTH);
+ if (checksum) {
+ dev_err(dev, "Main firmware checksum error\n");
+ return -EINVAL;
+ }
+
+ data += 4 * GOODIX_FW_SECTION_LENGTH;
+ checksum = goodix_firmware_checksum(data, GOODIX_FW_DSP_LENGTH);
+ if (checksum) {
+ dev_err(dev, "DSP firmware checksum error\n");
+ return -EINVAL;
+ }
+
+ fw_header = (const struct goodix_fw_header *)fw->data;
+ dev_info(dev, "Firmware hardware info %02x%02x%02x%02x\n",
+ fw_header->hw_info[0], fw_header->hw_info[1],
+ fw_header->hw_info[2], fw_header->hw_info[3]);
+ /* pid is a 8 byte buffer containing a string, weird I know */
+ memcpy(buf, fw_header->pid, 8);
+ buf[8] = 0;
+ dev_info(dev, "Firmware PID: %s VID: %02x%02x\n", buf,
+ fw_header->vid[0], fw_header->vid[1]);
+ return 0;
+}
+
+static int goodix_enter_upload_mode(struct i2c_client *client)
+{
+ int tries, error;
+ u8 val;
+
+ tries = 200;
+ do {
+ error = goodix_i2c_write_u8(client,
+ GOODIX_REG_MISCTL_SWRST, 0x0c);
+ if (error)
+ return error;
+
+ error = goodix_i2c_read(client,
+ GOODIX_REG_MISCTL_SWRST, &val, 1);
+ if (error)
+ return error;
+
+ if (val == 0x0c)
+ break;
+ } while (--tries);
+
+ if (!tries) {
+ dev_err(&client->dev, "Error could not hold ss51 & dsp\n");
+ return -EIO;
+ }
+
+ /* DSP_CK and DSP_ALU_CK PowerOn */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_DSP_CTL, 0x00);
+ if (error)
+ return error;
+
+ /* Disable watchdog */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_TMR0_EN, 0x00);
+ if (error)
+ return error;
+
+ /* Clear cache enable */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_CACHE_EN, 0x00);
+ if (error)
+ return error;
+
+ /* Set boot from SRAM */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_BOOTCTL, 0x02);
+ if (error)
+ return error;
+
+ /* Software reboot */
+ error = goodix_i2c_write_u8(client,
+ GOODIX_REG_MISCTL_CPU_SWRST_PULSE, 0x01);
+ if (error)
+ return error;
+
+ /* Clear control flag */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_BOOTCTL, 0x00);
+ if (error)
+ return error;
+
+ /* Set scramble */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_BOOT_OPT, 0x00);
+ if (error)
+ return error;
+
+ /* Enable accessing code */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_MEM_CD_EN, 0x01);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int goodix_start_firmware(struct i2c_client *client)
+{
+ int error;
+ u8 val;
+
+ /* Init software watchdog */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_SW_WDT, 0xaa);
+ if (error)
+ return error;
+
+ /* Release SS51 & DSP */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_MISCTL_SWRST, 0x00);
+ if (error)
+ return error;
+
+ error = goodix_i2c_read(client, GOODIX_REG_SW_WDT, &val, 1);
+ if (error)
+ return error;
+
+ /* The value we've written to SW_WDT should have been cleared now */
+ if (val == 0xaa) {
+ dev_err(&client->dev, "Error SW_WDT reg not cleared on fw startup\n");
+ return -EIO;
+ }
+
+ /* Re-init software watchdog */
+ error = goodix_i2c_write_u8(client, GOODIX_REG_SW_WDT, 0xaa);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int goodix_firmware_upload(struct goodix_ts_data *ts)
+{
+ const struct firmware *fw;
+ char fw_name[64];
+ const u8 *data;
+ int error;
+
+ snprintf(fw_name, sizeof(fw_name), "goodix/%s", ts->firmware_name);
+
+ error = request_firmware(&fw, fw_name, &ts->client->dev);
+ if (error) {
+ dev_err(&ts->client->dev, "Firmware request error %d\n", error);
+ return error;
+ }
+
+ error = goodix_firmware_verify(&ts->client->dev, fw);
+ if (error)
+ goto release;
+
+ error = goodix_reset_no_int_sync(ts);
+ if (error)
+ return error;
+
+ error = goodix_enter_upload_mode(ts->client);
+ if (error)
+ goto release;
+
+ /* Select SRAM bank 0 and upload section 1 & 2 */
+ error = goodix_i2c_write_u8(ts->client,
+ GOODIX_REG_MISCTL_SRAM_BANK, 0x00);
+ if (error)
+ goto release;
+
+ data = fw->data + GOODIX_FW_HEADER_LENGTH;
+ error = goodix_i2c_write(ts->client, GOODIX_FW_UPLOAD_ADDRESS,
+ data, 2 * GOODIX_FW_SECTION_LENGTH);
+ if (error)
+ goto release;
+
+ /* Select SRAM bank 1 and upload section 3 & 4 */
+ error = goodix_i2c_write_u8(ts->client,
+ GOODIX_REG_MISCTL_SRAM_BANK, 0x01);
+ if (error)
+ goto release;
+
+ data += 2 * GOODIX_FW_SECTION_LENGTH;
+ error = goodix_i2c_write(ts->client, GOODIX_FW_UPLOAD_ADDRESS,
+ data, 2 * GOODIX_FW_SECTION_LENGTH);
+ if (error)
+ goto release;
+
+ /* Select SRAM bank 2 and upload the DSP firmware */
+ error = goodix_i2c_write_u8(ts->client,
+ GOODIX_REG_MISCTL_SRAM_BANK, 0x02);
+ if (error)
+ goto release;
+
+ data += 2 * GOODIX_FW_SECTION_LENGTH;
+ error = goodix_i2c_write(ts->client, GOODIX_FW_UPLOAD_ADDRESS,
+ data, GOODIX_FW_DSP_LENGTH);
+ if (error)
+ goto release;
+
+ error = goodix_start_firmware(ts->client);
+ if (error)
+ goto release;
+
+ error = goodix_int_sync(ts);
+release:
+ release_firmware(fw);
+ return error;
+}
+
+static int goodix_prepare_bak_ref(struct goodix_ts_data *ts)
+{
+ u8 have_key, driver_num, sensor_num;
+
+ if (ts->bak_ref)
+ return 0; /* Already done */
+
+ have_key = (ts->config[GOODIX_CFG_LOC_HAVE_KEY] & 0x01);
+
+ driver_num = (ts->config[GOODIX_CFG_LOC_DRVA_NUM] & 0x1f) +
+ (ts->config[GOODIX_CFG_LOC_DRVB_NUM] & 0x1f);
+ if (have_key)
+ driver_num--;
+
+ sensor_num = (ts->config[GOODIX_CFG_LOC_SENS_NUM] & 0x0f) +
+ ((ts->config[GOODIX_CFG_LOC_SENS_NUM] >> 4) & 0x0f);
+
+ dev_dbg(&ts->client->dev, "Drv %d Sen %d Key %d\n",
+ driver_num, sensor_num, have_key);
+
+ ts->bak_ref_len = (driver_num * (sensor_num - 2) + 2) * 2;
+
+ ts->bak_ref = devm_kzalloc(&ts->client->dev,
+ ts->bak_ref_len, GFP_KERNEL);
+ if (!ts->bak_ref)
+ return -ENOMEM;
+
+ /*
+ * The bak_ref array contains the backup of an array of (self/auto)
+ * calibration related values which the Android version of the driver
+ * stores on the filesystem so that it can be restored after reboot.
+ * The mainline kernel never writes directly to the filesystem like
+ * this, we always start will all the values which give a correction
+ * factor in approx. the -20 - +20 range (in 2s complement) set to 0.
+ *
+ * Note the touchscreen works fine without restoring the reference
+ * values after a reboot / power-cycle.
+ *
+ * The last 2 bytes are a 16 bits unsigned checksum which is expected
+ * to make the addition al all 16 bit unsigned values in the array add
+ * up to 1 (rather then the usual 0), so we must set the last byte to 1.
+ */
+ ts->bak_ref[ts->bak_ref_len - 1] = 1;
+
+ return 0;
+}
+
+static int goodix_send_main_clock(struct goodix_ts_data *ts)
+{
+ u32 main_clk = 54; /* Default main clock */
+ u8 checksum = 0;
+ int i;
+
+ device_property_read_u32(&ts->client->dev,
+ "goodix,main-clk", &main_clk);
+
+ for (i = 0; i < (GOODIX_MAIN_CLK_LEN - 1); i++) {
+ ts->main_clk[i] = main_clk;
+ checksum += main_clk;
+ }
+
+ /* The value of all bytes combines must be 0 */
+ ts->main_clk[GOODIX_MAIN_CLK_LEN - 1] = 256 - checksum;
+
+ return goodix_i2c_write(ts->client, GOODIX_REG_MAIN_CLK,
+ ts->main_clk, GOODIX_MAIN_CLK_LEN);
+}
+
+int goodix_firmware_check(struct goodix_ts_data *ts)
+{
+ device_property_read_string(&ts->client->dev,
+ "firmware-name", &ts->firmware_name);
+ if (!ts->firmware_name)
+ return 0;
+
+ if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) {
+ dev_err(&ts->client->dev, "Error no IRQ-pin access method, cannot upload fw.\n");
+ return -EINVAL;
+ }
+
+ dev_info(&ts->client->dev, "Touchscreen controller needs fw-upload\n");
+ ts->load_cfg_from_disk = true;
+
+ return goodix_firmware_upload(ts);
+}
+
+bool goodix_handle_fw_request(struct goodix_ts_data *ts)
+{
+ int error;
+ u8 val;
+
+ error = goodix_i2c_read(ts->client, GOODIX_REG_REQUEST, &val, 1);
+ if (error)
+ return false;
+
+ switch (val) {
+ case GOODIX_RQST_RESPONDED:
+ /*
+ * If we read back our own last ack the IRQ was not for
+ * a request.
+ */
+ return false;
+ case GOODIX_RQST_CONFIG:
+ error = goodix_send_cfg(ts, ts->config, ts->chip->config_len);
+ if (error)
+ return false;
+
+ break;
+ case GOODIX_RQST_BAK_REF:
+ error = goodix_prepare_bak_ref(ts);
+ if (error)
+ return false;
+
+ error = goodix_i2c_write(ts->client, GOODIX_REG_BAK_REF,
+ ts->bak_ref, ts->bak_ref_len);
+ if (error)
+ return false;
+
+ break;
+ case GOODIX_RQST_RESET:
+ error = goodix_firmware_upload(ts);
+ if (error)
+ return false;
+
+ break;
+ case GOODIX_RQST_MAIN_CLOCK:
+ error = goodix_send_main_clock(ts);
+ if (error)
+ return false;
+
+ break;
+ case GOODIX_RQST_UNKNOWN:
+ case GOODIX_RQST_IDLE:
+ break;
+ default:
+ dev_err_ratelimited(&ts->client->dev, "Unknown Request: 0x%02x\n", val);
+ }
+
+ /* Ack the request */
+ goodix_i2c_write_u8(ts->client,
+ GOODIX_REG_REQUEST, GOODIX_RQST_RESPONDED);
+ return true;
+}
+
+void goodix_save_bak_ref(struct goodix_ts_data *ts)
+{
+ int error;
+ u8 val;
+
+ if (!ts->firmware_name)
+ return;
+
+ error = goodix_i2c_read(ts->client, GOODIX_REG_STATUS, &val, 1);
+ if (error)
+ return;
+
+ if (!(val & 0x80))
+ return;
+
+ error = goodix_i2c_read(ts->client, GOODIX_REG_BAK_REF,
+ ts->bak_ref, ts->bak_ref_len);
+ if (error) {
+ memset(ts->bak_ref, 0, ts->bak_ref_len);
+ ts->bak_ref[ts->bak_ref_len - 1] = 1;
+ }
+}
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 30576a5f2f045d..2bd407d86bae52 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/crc-ccitt.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/ihex.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
@@ -12,7 +14,7 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
-#define ILI2XXX_POLL_PERIOD 20
+#define ILI2XXX_POLL_PERIOD 15
#define ILI210X_DATA_SIZE 64
#define ILI211X_DATA_SIZE 43
@@ -22,8 +24,23 @@
/* Touchscreen commands */
#define REG_TOUCHDATA 0x10
#define REG_PANEL_INFO 0x20
+#define REG_FIRMWARE_VERSION 0x40
+#define REG_PROTOCOL_VERSION 0x42
+#define REG_KERNEL_VERSION 0x61
+#define REG_IC_BUSY 0x80
+#define REG_IC_BUSY_NOT_BUSY 0x50
+#define REG_GET_MODE 0xc0
+#define REG_GET_MODE_AP 0x5a
+#define REG_GET_MODE_BL 0x55
+#define REG_SET_MODE_AP 0xc1
+#define REG_SET_MODE_BL 0xc2
+#define REG_WRITE_DATA 0xc3
+#define REG_WRITE_ENABLE 0xc4
+#define REG_READ_DATA_CRC 0xc7
#define REG_CALIBRATE 0xcc
+#define ILI251X_FW_FILENAME "ilitek/ili251x.bin"
+
struct ili2xxx_chip {
int (*read_reg)(struct i2c_client *client, u8 reg,
void *buf, size_t len);
@@ -35,6 +52,7 @@ struct ili2xxx_chip {
unsigned int max_touches;
unsigned int resolution;
bool has_calibrate_reg;
+ bool has_firmware_proto;
bool has_pressure_reg;
};
@@ -44,6 +62,10 @@ struct ili210x {
struct gpio_desc *reset_gpio;
struct touchscreen_properties prop;
const struct ili2xxx_chip *chip;
+ u8 version_firmware[8];
+ u8 version_kernel[5];
+ u8 version_proto[2];
+ u8 ic_mode[2];
bool stop;
};
@@ -202,15 +224,17 @@ static const struct ili2xxx_chip ili212x_chip = {
.has_calibrate_reg = true,
};
-static int ili251x_read_reg(struct i2c_client *client,
- u8 reg, void *buf, size_t len)
+static int ili251x_read_reg_common(struct i2c_client *client,
+ u8 reg, void *buf, size_t len,
+ unsigned int delay)
{
int error;
int ret;
ret = i2c_master_send(client, &reg, 1);
if (ret == 1) {
- usleep_range(5000, 5500);
+ if (delay)
+ usleep_range(delay, delay + 500);
ret = i2c_master_recv(client, buf, len);
if (ret == len)
@@ -222,12 +246,18 @@ static int ili251x_read_reg(struct i2c_client *client,
return ret;
}
+static int ili251x_read_reg(struct i2c_client *client,
+ u8 reg, void *buf, size_t len)
+{
+ return ili251x_read_reg_common(client, reg, buf, len, 5000);
+}
+
static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
{
int error;
- error = ili251x_read_reg(client, REG_TOUCHDATA,
- data, ILI251X_DATA_SIZE1);
+ error = ili251x_read_reg_common(client, REG_TOUCHDATA,
+ data, ILI251X_DATA_SIZE1, 0);
if (!error && data[0] == 2) {
error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
ILI251X_DATA_SIZE2);
@@ -268,6 +298,7 @@ static const struct ili2xxx_chip ili251x_chip = {
.continue_polling = ili251x_check_continue_polling,
.max_touches = 10,
.has_calibrate_reg = true,
+ .has_firmware_proto = true,
.has_pressure_reg = true,
};
@@ -303,10 +334,13 @@ static irqreturn_t ili210x_irq(int irq, void *irq_data)
const struct ili2xxx_chip *chip = priv->chip;
u8 touchdata[ILI210X_DATA_SIZE] = { 0 };
bool keep_polling;
+ ktime_t time_next;
+ s64 time_delta;
bool touch;
int error;
do {
+ time_next = ktime_add_ms(ktime_get(), ILI2XXX_POLL_PERIOD);
error = chip->get_touch_data(client, touchdata);
if (error) {
dev_err(&client->dev,
@@ -316,13 +350,201 @@ static irqreturn_t ili210x_irq(int irq, void *irq_data)
touch = ili210x_report_events(priv, touchdata);
keep_polling = chip->continue_polling(touchdata, touch);
- if (keep_polling)
- msleep(ILI2XXX_POLL_PERIOD);
+ if (keep_polling) {
+ time_delta = ktime_us_delta(time_next, ktime_get());
+ if (time_delta > 0)
+ usleep_range(time_delta, time_delta + 1000);
+ }
} while (!priv->stop && keep_polling);
return IRQ_HANDLED;
}
+static int ili251x_firmware_update_resolution(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u16 resx, resy;
+ u8 rs[10];
+ int error;
+
+ /* The firmware update blob might have changed the resolution. */
+ error = priv->chip->read_reg(client, REG_PANEL_INFO, &rs, sizeof(rs));
+ if (error)
+ return error;
+
+ resx = le16_to_cpup((__le16 *)rs);
+ resy = le16_to_cpup((__le16 *)(rs + 2));
+
+ /* The value reported by the firmware is invalid. */
+ if (!resx || resx == 0xffff || !resy || resy == 0xffff)
+ return -EINVAL;
+
+ input_abs_set_max(priv->input, ABS_X, resx - 1);
+ input_abs_set_max(priv->input, ABS_Y, resy - 1);
+ input_abs_set_max(priv->input, ABS_MT_POSITION_X, resx - 1);
+ input_abs_set_max(priv->input, ABS_MT_POSITION_Y, resy - 1);
+
+ return 0;
+}
+
+static ssize_t ili251x_firmware_update_firmware_version(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+ u8 fw[8];
+
+ /* Get firmware version */
+ error = priv->chip->read_reg(client, REG_FIRMWARE_VERSION,
+ &fw, sizeof(fw));
+ if (!error)
+ memcpy(priv->version_firmware, fw, sizeof(fw));
+
+ return error;
+}
+
+static ssize_t ili251x_firmware_update_kernel_version(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+ u8 kv[5];
+
+ /* Get kernel version */
+ error = priv->chip->read_reg(client, REG_KERNEL_VERSION,
+ &kv, sizeof(kv));
+ if (!error)
+ memcpy(priv->version_kernel, kv, sizeof(kv));
+
+ return error;
+}
+
+static ssize_t ili251x_firmware_update_protocol_version(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+ u8 pv[2];
+
+ /* Get protocol version */
+ error = priv->chip->read_reg(client, REG_PROTOCOL_VERSION,
+ &pv, sizeof(pv));
+ if (!error)
+ memcpy(priv->version_proto, pv, sizeof(pv));
+
+ return error;
+}
+
+static ssize_t ili251x_firmware_update_ic_mode(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+ u8 md[2];
+
+ /* Get chip boot mode */
+ error = priv->chip->read_reg(client, REG_GET_MODE, &md, sizeof(md));
+ if (!error)
+ memcpy(priv->ic_mode, md, sizeof(md));
+
+ return error;
+}
+
+static int ili251x_firmware_update_cached_state(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+
+ if (!priv->chip->has_firmware_proto)
+ return 0;
+
+ /* Wait for firmware to boot and stabilize itself. */
+ msleep(200);
+
+ /* Firmware does report valid information. */
+ error = ili251x_firmware_update_resolution(dev);
+ if (error)
+ return error;
+
+ error = ili251x_firmware_update_firmware_version(dev);
+ if (error)
+ return error;
+
+ error = ili251x_firmware_update_kernel_version(dev);
+ if (error)
+ return error;
+
+ error = ili251x_firmware_update_protocol_version(dev);
+ if (error)
+ return error;
+
+ error = ili251x_firmware_update_ic_mode(dev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static ssize_t ili251x_firmware_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 *fw = priv->version_firmware;
+
+ return sysfs_emit(buf, "%02x%02x.%02x%02x.%02x%02x.%02x%02x\n",
+ fw[0], fw[1], fw[2], fw[3],
+ fw[4], fw[5], fw[6], fw[7]);
+}
+static DEVICE_ATTR(firmware_version, 0444, ili251x_firmware_version_show, NULL);
+
+static ssize_t ili251x_kernel_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 *kv = priv->version_kernel;
+
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x.%02x\n",
+ kv[0], kv[1], kv[2], kv[3], kv[4]);
+}
+static DEVICE_ATTR(kernel_version, 0444, ili251x_kernel_version_show, NULL);
+
+static ssize_t ili251x_protocol_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 *pv = priv->version_proto;
+
+ return sysfs_emit(buf, "%02x.%02x\n", pv[0], pv[1]);
+}
+static DEVICE_ATTR(protocol_version, 0444, ili251x_protocol_version_show, NULL);
+
+static ssize_t ili251x_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 *md = priv->ic_mode;
+ char *mode = "AP";
+
+ if (md[0] == REG_GET_MODE_AP) /* Application Mode */
+ mode = "AP";
+ else if (md[0] == REG_GET_MODE_BL) /* BootLoader Mode */
+ mode = "BL";
+ else /* Unknown Mode */
+ mode = "??";
+
+ return sysfs_emit(buf, "%02x.%02x:%s\n", md[0], md[1], mode);
+}
+static DEVICE_ATTR(mode, 0444, ili251x_mode_show, NULL);
+
static ssize_t ili210x_calibrate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -349,24 +571,333 @@ static ssize_t ili210x_calibrate(struct device *dev,
}
static DEVICE_ATTR(calibrate, S_IWUSR, NULL, ili210x_calibrate);
+static int ili251x_firmware_to_buffer(const struct firmware *fw,
+ u8 **buf, u16 *ac_end, u16 *df_end)
+{
+ const struct ihex_binrec *rec;
+ u32 fw_addr, fw_last_addr = 0;
+ u16 fw_len;
+ u8 *fw_buf;
+ int error;
+
+ /*
+ * The firmware ihex blob can never be bigger than 64 kiB, so make this
+ * simple -- allocate a 64 kiB buffer, iterate over the ihex blob records
+ * once, copy them all into this buffer at the right locations, and then
+ * do all operations on this linear buffer.
+ */
+ fw_buf = kzalloc(SZ_64K, GFP_KERNEL);
+ if (!fw_buf)
+ return -ENOMEM;
+
+ rec = (const struct ihex_binrec *)fw->data;
+ while (rec) {
+ fw_addr = be32_to_cpu(rec->addr);
+ fw_len = be16_to_cpu(rec->len);
+
+ /* The last 32 Byte firmware block can be 0xffe0 */
+ if (fw_addr + fw_len > SZ_64K || fw_addr > SZ_64K - 32) {
+ error = -EFBIG;
+ goto err_big;
+ }
+
+ /* Find the last address before DF start address, that is AC end */
+ if (fw_addr == 0xf000)
+ *ac_end = fw_last_addr;
+ fw_last_addr = fw_addr + fw_len;
+
+ memcpy(fw_buf + fw_addr, rec->data, fw_len);
+ rec = ihex_next_binrec(rec);
+ }
+
+ /* DF end address is the last address in the firmware blob */
+ *df_end = fw_addr + fw_len;
+ *buf = fw_buf;
+ return 0;
+
+err_big:
+ kfree(fw_buf);
+ return error;
+}
+
+/* Switch mode between Application and BootLoader */
+static int ili251x_switch_ic_mode(struct i2c_client *client, u8 cmd_mode)
+{
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 cmd_wren[3] = { REG_WRITE_ENABLE, 0x5a, 0xa5 };
+ u8 md[2];
+ int error;
+
+ error = priv->chip->read_reg(client, REG_GET_MODE, md, sizeof(md));
+ if (error)
+ return error;
+ /* Mode already set */
+ if ((cmd_mode == REG_SET_MODE_AP && md[0] == REG_GET_MODE_AP) ||
+ (cmd_mode == REG_SET_MODE_BL && md[0] == REG_GET_MODE_BL))
+ return 0;
+
+ /* Unlock writes */
+ error = i2c_master_send(client, cmd_wren, sizeof(cmd_wren));
+ if (error != sizeof(cmd_wren))
+ return -EINVAL;
+
+ mdelay(20);
+
+ /* Select mode (BootLoader or Application) */
+ error = i2c_master_send(client, &cmd_mode, 1);
+ if (error != 1)
+ return -EINVAL;
+
+ mdelay(200); /* Reboot into bootloader takes a lot of time ... */
+
+ /* Read back mode */
+ error = priv->chip->read_reg(client, REG_GET_MODE, md, sizeof(md));
+ if (error)
+ return error;
+ /* Check if mode is correct now. */
+ if ((cmd_mode == REG_SET_MODE_AP && md[0] == REG_GET_MODE_AP) ||
+ (cmd_mode == REG_SET_MODE_BL && md[0] == REG_GET_MODE_BL))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int ili251x_firmware_busy(struct i2c_client *client)
+{
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error, i = 0;
+ u8 data;
+
+ do {
+ /* The read_reg already contains suitable delay */
+ error = priv->chip->read_reg(client, REG_IC_BUSY, &data, 1);
+ if (error)
+ return error;
+ if (i++ == 100000)
+ return -ETIMEDOUT;
+ } while (data != REG_IC_BUSY_NOT_BUSY);
+
+ return 0;
+}
+
+static int ili251x_firmware_write_to_ic(struct device *dev, u8 *fwbuf,
+ u16 start, u16 end, u8 dataflash)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 cmd_crc = REG_READ_DATA_CRC;
+ u8 crcrb[4] = { 0 };
+ u8 fw_data[33];
+ u16 fw_addr;
+ int error;
+
+ /*
+ * The DF (dataflash) needs 2 bytes offset for unknown reasons,
+ * the AC (application) has 2 bytes CRC16-CCITT at the end.
+ */
+ u16 crc = crc_ccitt(0, fwbuf + start + (dataflash ? 2 : 0),
+ end - start - 2);
+
+ /* Unlock write to either AC (application) or DF (dataflash) area */
+ u8 cmd_wr[10] = {
+ REG_WRITE_ENABLE, 0x5a, 0xa5, dataflash,
+ (end >> 16) & 0xff, (end >> 8) & 0xff, end & 0xff,
+ (crc >> 16) & 0xff, (crc >> 8) & 0xff, crc & 0xff
+ };
+
+ error = i2c_master_send(client, cmd_wr, sizeof(cmd_wr));
+ if (error != sizeof(cmd_wr))
+ return -EINVAL;
+
+ error = ili251x_firmware_busy(client);
+ if (error)
+ return error;
+
+ for (fw_addr = start; fw_addr < end; fw_addr += 32) {
+ fw_data[0] = REG_WRITE_DATA;
+ memcpy(&(fw_data[1]), fwbuf + fw_addr, 32);
+ error = i2c_master_send(client, fw_data, 33);
+ if (error != sizeof(fw_data))
+ return error;
+ error = ili251x_firmware_busy(client);
+ if (error)
+ return error;
+ }
+
+ error = i2c_master_send(client, &cmd_crc, 1);
+ if (error != 1)
+ return -EINVAL;
+
+ error = ili251x_firmware_busy(client);
+ if (error)
+ return error;
+
+ error = priv->chip->read_reg(client, REG_READ_DATA_CRC,
+ &crcrb, sizeof(crcrb));
+ if (error)
+ return error;
+
+ /* Check CRC readback */
+ if ((crcrb[0] != (crc & 0xff)) || crcrb[1] != ((crc >> 8) & 0xff))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ili251x_firmware_reset(struct i2c_client *client)
+{
+ u8 cmd_reset[2] = { 0xf2, 0x01 };
+ int error;
+
+ error = i2c_master_send(client, cmd_reset, sizeof(cmd_reset));
+ if (error != sizeof(cmd_reset))
+ return -EINVAL;
+
+ return ili251x_firmware_busy(client);
+}
+
+static void ili251x_hardware_reset(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+
+ /* Reset the controller */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(10000, 15000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ msleep(300);
+}
+
+static ssize_t ili210x_firmware_update_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ const char *fwname = ILI251X_FW_FILENAME;
+ const struct firmware *fw;
+ u16 ac_end, df_end;
+ u8 *fwbuf;
+ int error;
+ int i;
+
+ error = request_ihex_firmware(&fw, fwname, dev);
+ if (error) {
+ dev_err(dev, "Failed to request firmware %s, error=%d\n",
+ fwname, error);
+ return error;
+ }
+
+ error = ili251x_firmware_to_buffer(fw, &fwbuf, &ac_end, &df_end);
+ release_firmware(fw);
+ if (error)
+ return error;
+
+ /*
+ * Disable touchscreen IRQ, so that we would not get spurious touch
+ * interrupt during firmware update, and so that the IRQ handler won't
+ * trigger and interfere with the firmware update. There is no bit in
+ * the touch controller to disable the IRQs during update, so we have
+ * to do it this way here.
+ */
+ disable_irq(client->irq);
+
+ dev_dbg(dev, "Firmware update started, firmware=%s\n", fwname);
+
+ ili251x_hardware_reset(dev);
+
+ error = ili251x_firmware_reset(client);
+ if (error)
+ goto exit;
+
+ /* This may not succeed on first try, so re-try a few times. */
+ for (i = 0; i < 5; i++) {
+ error = ili251x_switch_ic_mode(client, REG_SET_MODE_BL);
+ if (!error)
+ break;
+ }
+
+ if (error)
+ goto exit;
+
+ dev_dbg(dev, "IC is now in BootLoader mode\n");
+
+ msleep(200); /* The bootloader seems to need some time too. */
+
+ error = ili251x_firmware_write_to_ic(dev, fwbuf, 0xf000, df_end, 1);
+ if (error) {
+ dev_err(dev, "DF firmware update failed, error=%d\n", error);
+ goto exit;
+ }
+
+ dev_dbg(dev, "DataFlash firmware written\n");
+
+ error = ili251x_firmware_write_to_ic(dev, fwbuf, 0x2000, ac_end, 0);
+ if (error) {
+ dev_err(dev, "AC firmware update failed, error=%d\n", error);
+ goto exit;
+ }
+
+ dev_dbg(dev, "Application firmware written\n");
+
+ /* This may not succeed on first try, so re-try a few times. */
+ for (i = 0; i < 5; i++) {
+ error = ili251x_switch_ic_mode(client, REG_SET_MODE_AP);
+ if (!error)
+ break;
+ }
+
+ if (error)
+ goto exit;
+
+ dev_dbg(dev, "IC is now in Application mode\n");
+
+ error = ili251x_firmware_update_cached_state(dev);
+ if (error)
+ goto exit;
+
+ error = count;
+
+exit:
+ ili251x_hardware_reset(dev);
+ dev_dbg(dev, "Firmware update ended, error=%i\n", error);
+ enable_irq(client->irq);
+ kfree(fwbuf);
+ return error;
+}
+
+static DEVICE_ATTR(firmware_update, 0200, NULL, ili210x_firmware_update_store);
+
static struct attribute *ili210x_attributes[] = {
&dev_attr_calibrate.attr,
+ &dev_attr_firmware_update.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_kernel_version.attr,
+ &dev_attr_protocol_version.attr,
+ &dev_attr_mode.attr,
NULL,
};
-static umode_t ili210x_calibrate_visible(struct kobject *kobj,
+static umode_t ili210x_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = kobj_to_dev(kobj);
struct i2c_client *client = to_i2c_client(dev);
struct ili210x *priv = i2c_get_clientdata(client);
- return priv->chip->has_calibrate_reg ? attr->mode : 0;
+ /* Calibrate is present on all ILI2xxx which have calibrate register */
+ if (attr == &dev_attr_calibrate.attr)
+ return priv->chip->has_calibrate_reg ? attr->mode : 0;
+
+ /* Firmware/Kernel/Protocol/BootMode is implememted only for ILI251x */
+ if (!priv->chip->has_firmware_proto)
+ return 0;
+
+ return attr->mode;
}
static const struct attribute_group ili210x_attr_group = {
.attrs = ili210x_attributes,
- .is_visible = ili210x_calibrate_visible,
+ .is_visible = ili210x_attributes_visible,
};
static void ili210x_power_down(void *data)
@@ -449,6 +980,12 @@ static int ili210x_i2c_probe(struct i2c_client *client,
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
if (priv->chip->has_pressure_reg)
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
+ error = ili251x_firmware_update_cached_state(dev);
+ if (error) {
+ dev_err(dev, "Unable to cache firmware information, err: %d\n",
+ error);
+ return error;
+ }
touchscreen_parse_properties(input, true, &priv->prop);
error = input_mt_init_slots(input, priv->chip->max_touches,
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 4d2d22a8697732..3a4952935366f9 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -37,6 +37,7 @@
#define RM_CMD_BOOT_READ 0x44 /* send wait bl data ready*/
#define RM_BOOT_RDY 0xFF /* bl data ready */
+#define RM_BOOT_CMD_READHWID 0x0E /* read hwid */
/* I2C main commands */
#define RM_CMD_QUERY_BANK 0x2B
@@ -290,6 +291,44 @@ static int raydium_i2c_sw_reset(struct i2c_client *client)
return 0;
}
+static int raydium_i2c_query_ts_bootloader_info(struct raydium_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ static const u8 get_hwid[] = { RM_BOOT_CMD_READHWID,
+ 0x10, 0xc0, 0x01, 0x00, 0x04, 0x00 };
+ u8 rbuf[5] = { 0 };
+ u32 hw_ver;
+ int error;
+
+ error = raydium_i2c_send(client, RM_CMD_BOOT_WRT,
+ get_hwid, sizeof(get_hwid));
+ if (error) {
+ dev_err(&client->dev, "WRT HWID command failed: %d\n", error);
+ return error;
+ }
+
+ error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, rbuf, 1);
+ if (error) {
+ dev_err(&client->dev, "Ack HWID command failed: %d\n", error);
+ return error;
+ }
+
+ error = raydium_i2c_read(client, RM_CMD_BOOT_CHK, rbuf, sizeof(rbuf));
+ if (error) {
+ dev_err(&client->dev, "Read HWID command failed: %d (%4ph)\n",
+ error, rbuf + 1);
+ hw_ver = 0xffffffffUL;
+ } else {
+ hw_ver = get_unaligned_be32(rbuf + 1);
+ }
+
+ ts->info.hw_ver = cpu_to_le32(hw_ver);
+ ts->info.main_ver = 0xff;
+ ts->info.sub_ver = 0xff;
+
+ return error;
+}
+
static int raydium_i2c_query_ts_info(struct raydium_data *ts)
{
struct i2c_client *client = ts->client;
@@ -388,13 +427,10 @@ static int raydium_i2c_initialize(struct raydium_data *ts)
if (error)
ts->boot_mode = RAYDIUM_TS_BLDR;
- if (ts->boot_mode == RAYDIUM_TS_BLDR) {
- ts->info.hw_ver = cpu_to_le32(0xffffffffUL);
- ts->info.main_ver = 0xff;
- ts->info.sub_ver = 0xff;
- } else {
+ if (ts->boot_mode == RAYDIUM_TS_BLDR)
+ raydium_i2c_query_ts_bootloader_info(ts);
+ else
raydium_i2c_query_ts_info(ts);
- }
return error;
}
@@ -1082,11 +1118,11 @@ static int raydium_i2c_probe(struct i2c_client *client,
if (error)
return error;
- error = devm_add_action(&client->dev, raydium_i2c_power_off, ts);
+ error = devm_add_action_or_reset(&client->dev,
+ raydium_i2c_power_off, ts);
if (error) {
dev_err(&client->dev,
"failed to install power off action: %d\n", error);
- raydium_i2c_power_off(ts);
return error;
}
@@ -1218,7 +1254,7 @@ static SIMPLE_DEV_PM_OPS(raydium_i2c_pm_ops,
raydium_i2c_suspend, raydium_i2c_resume);
static const struct i2c_device_id raydium_i2c_id[] = {
- { "raydium_i2c" , 0 },
+ { "raydium_i2c", 0 },
{ "rm32380", 0 },
{ /* sentinel */ }
};
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 6abae665ca71d8..e38ba3e4f18377 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -92,7 +92,7 @@ static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
unsigned int retries;
int error;
- for (retries = 10; retries; retries--) {
+ for (retries = 100; retries; retries--) {
error = st1232_ts_read_data(ts, REG_STATUS, 1);
if (!error) {
switch (ts->read_buf[0]) {
@@ -389,6 +389,7 @@ static struct i2c_driver st1232_ts_driver = {
.driver = {
.name = ST1232_TS_NAME,
.of_match_table = st1232_ts_dt_ids,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &st1232_ts_pm_ops,
},
};
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index 0272cedcc72617..9fdd870c4c0bd8 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -45,7 +45,9 @@ static int tsc2004_probe(struct i2c_client *i2c,
static int tsc2004_remove(struct i2c_client *i2c)
{
- return tsc200x_remove(&i2c->dev);
+ tsc200x_remove(&i2c->dev);
+
+ return 0;
}
static const struct i2c_device_id tsc2004_idtable[] = {
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 923496bbb368d5..a2f55920b9b2e1 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -66,7 +66,9 @@ static int tsc2005_probe(struct spi_device *spi)
static int tsc2005_remove(struct spi_device *spi)
{
- return tsc200x_remove(&spi->dev);
+ tsc200x_remove(&spi->dev);
+
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index b8d720d520131a..27810f6c69f679 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -577,15 +577,13 @@ disable_regulator:
}
EXPORT_SYMBOL_GPL(tsc200x_probe);
-int tsc200x_remove(struct device *dev)
+void tsc200x_remove(struct device *dev)
{
struct tsc200x *ts = dev_get_drvdata(dev);
sysfs_remove_group(&dev->kobj, &tsc200x_attr_group);
regulator_disable(ts->vio);
-
- return 0;
}
EXPORT_SYMBOL_GPL(tsc200x_remove);
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index a43c08ccfd3dc0..4ded34425b21ee 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -74,6 +74,6 @@ extern const struct dev_pm_ops tsc200x_pm_ops;
int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
-int tsc200x_remove(struct device *dev);
+void tsc200x_remove(struct device *dev);
#endif
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 22826c387da5b6..fe4ea6204a4e36 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -6,6 +6,7 @@
* <tobita.tatsunosuke@wacom.co.jp>
*/
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -14,6 +15,15 @@
#include <linux/interrupt.h>
#include <asm/unaligned.h>
+/* Bitmasks (for data[3]) */
+#define WACOM_TIP_SWITCH BIT(0)
+#define WACOM_BARREL_SWITCH BIT(1)
+#define WACOM_ERASER BIT(2)
+#define WACOM_INVERT BIT(3)
+#define WACOM_BARREL_SWITCH_2 BIT(4)
+#define WACOM_IN_PROXIMITY BIT(5)
+
+/* Registers */
#define WACOM_CMD_QUERY0 0x04
#define WACOM_CMD_QUERY1 0x00
#define WACOM_CMD_QUERY2 0x33
@@ -99,19 +109,19 @@ static irqreturn_t wacom_i2c_irq(int irq, void *dev_id)
if (error < 0)
goto out;
- tsw = data[3] & 0x01;
- ers = data[3] & 0x04;
- f1 = data[3] & 0x02;
- f2 = data[3] & 0x10;
+ tsw = data[3] & WACOM_TIP_SWITCH;
+ ers = data[3] & WACOM_ERASER;
+ f1 = data[3] & WACOM_BARREL_SWITCH;
+ f2 = data[3] & WACOM_BARREL_SWITCH_2;
x = le16_to_cpup((__le16 *)&data[4]);
y = le16_to_cpup((__le16 *)&data[6]);
pressure = le16_to_cpup((__le16 *)&data[8]);
if (!wac_i2c->prox)
- wac_i2c->tool = (data[3] & 0x0c) ?
+ wac_i2c->tool = (data[3] & (WACOM_ERASER | WACOM_INVERT)) ?
BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- wac_i2c->prox = data[3] & 0x20;
+ wac_i2c->prox = data[3] & WACOM_IN_PROXIMITY;
input_report_key(input, BTN_TOUCH, tsw || ers);
input_report_key(input, wac_i2c->tool, wac_i2c->prox);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 034dbd487c3347..10506a4b66ef85 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -294,6 +294,7 @@ config GTP
config AMT
tristate "Automatic Multicast Tunneling (AMT)"
depends on INET && IP_MULTICAST
+ depends on IPV6 || !IPV6
select NET_UDP_TUNNEL
help
This allows one to create AMT(Automatic Multicast Tunneling)
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 60a7053a9cf7e2..47a04c330885ef 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -12,7 +12,6 @@
#include <linux/igmp.h>
#include <linux/workqueue.h>
#include <net/net_namespace.h>
-#include <net/protocol.h>
#include <net/ip.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
@@ -23,7 +22,6 @@
#include <linux/security.h>
#include <net/gro_cells.h>
#include <net/ipv6.h>
-#include <net/protocol.h>
#include <net/if_inet6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
@@ -2767,7 +2765,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
rcu_read_lock_bh();
amt = rcu_dereference_sk_user_data(sk);
if (!amt)
- goto drop;
+ goto out;
if (amt->mode != AMT_MODE_GATEWAY)
goto drop;
@@ -2789,6 +2787,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
default:
goto drop;
}
+out:
rcu_read_unlock_bh();
return 0;
drop:
@@ -3259,8 +3258,10 @@ static int __init amt_init(void)
goto unregister_notifier;
amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
- if (!amt_wq)
+ if (!amt_wq) {
+ err = -ENOMEM;
goto rtnl_unregister;
+ }
spin_lock_init(&source_gc_lock);
spin_lock_bh(&source_gc_lock);
@@ -3285,7 +3286,7 @@ static void __exit amt_fini(void)
{
rtnl_link_unregister(&amt_link_ops);
unregister_netdevice_notifier(&amt_notifier_block);
- flush_delayed_work(&source_gc_wq);
+ cancel_delayed_work(&source_gc_wq);
__amt_source_gc_work();
destroy_workqueue(amt_wq);
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index fd07561da0348a..6a6cdd0bb2585b 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -108,15 +108,15 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
}
static SLAVE_ATTR_RO(ad_partner_oper_port_state);
-static const struct slave_attribute *slave_attrs[] = {
- &slave_attr_state,
- &slave_attr_mii_status,
- &slave_attr_link_failure_count,
- &slave_attr_perm_hwaddr,
- &slave_attr_queue_id,
- &slave_attr_ad_aggregator_id,
- &slave_attr_ad_actor_oper_port_state,
- &slave_attr_ad_partner_oper_port_state,
+static const struct attribute *slave_attrs[] = {
+ &slave_attr_state.attr,
+ &slave_attr_mii_status.attr,
+ &slave_attr_link_failure_count.attr,
+ &slave_attr_perm_hwaddr.attr,
+ &slave_attr_queue_id.attr,
+ &slave_attr_ad_aggregator_id.attr,
+ &slave_attr_ad_actor_oper_port_state.attr,
+ &slave_attr_ad_partner_oper_port_state.attr,
NULL
};
@@ -137,24 +137,10 @@ const struct sysfs_ops slave_sysfs_ops = {
int bond_sysfs_slave_add(struct slave *slave)
{
- const struct slave_attribute **a;
- int err;
-
- for (a = slave_attrs; *a; ++a) {
- err = sysfs_create_file(&slave->kobj, &((*a)->attr));
- if (err) {
- kobject_put(&slave->kobj);
- return err;
- }
- }
-
- return 0;
+ return sysfs_create_files(&slave->kobj, slave_attrs);
}
void bond_sysfs_slave_del(struct slave *slave)
{
- const struct slave_attribute **a;
-
- for (a = slave_attrs; *a; ++a)
- sysfs_remove_file(&slave->kobj, &((*a)->attr));
+ sysfs_remove_files(&slave->kobj, slave_attrs);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 673861ab665a4a..e16dc482f3270b 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1092,7 +1092,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
err = mcp251xfd_chip_rx_int_enable(priv);
if (err)
- return err;
+ goto out_chip_stop;
err = mcp251xfd_chip_ecc_init(priv);
if (err)
@@ -2290,8 +2290,10 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
* check will fail, too. So leave IRQ handler
* directly.
*/
- if (priv->can.state == CAN_STATE_BUS_OFF)
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ can_rx_offload_threaded_irq_finish(&priv->offload);
return IRQ_HANDLED;
+ }
}
handled = IRQ_HANDLED;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 96a13c770e4a18..24627ab1462617 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -664,7 +664,7 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
struct can_device_stats *can_stats = &can->can_stats;
struct can_frame *cf = NULL;
struct sk_buff *skb;
- int ret;
+ int ret = 0;
if (!netif_running(netdev)) {
if (net_ratelimit())
@@ -823,8 +823,6 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
can->state = CAN_STATE_BUS_OFF;
can_bus_off(netdev);
ret = can->do_set_mode(netdev, CAN_MODE_STOP);
- if (ret)
- return ret;
}
break;
@@ -881,7 +879,7 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
ES58X_EVENT_BUSOFF, timestamp);
}
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 837b3fecd71e95..8762187527669b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -841,14 +841,14 @@ static int pcan_usb_start(struct peak_usb_device *dev)
pdev->bec.rxerr = 0;
pdev->bec.txerr = 0;
- /* be notified on error counter changes (if requested by user) */
- if (dev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
- err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK);
- if (err)
- netdev_warn(dev->netdev,
- "Asking for BERR reporting error %u\n",
- err);
- }
+ /* always ask the device for BERR reporting, to be able to switch from
+ * WARNING to PASSIVE state
+ */
+ err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK);
+ if (err)
+ netdev_warn(dev->netdev,
+ "Asking for BERR reporting error %u\n",
+ err);
/* if revision greater than 3, can put silent mode on/off */
if (dev->device_rev > 3) {
@@ -883,6 +883,11 @@ static int pcan_usb_init(struct peak_usb_device *dev)
return err;
}
+ dev_info(dev->netdev->dev.parent,
+ "PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
+ pcan_usb.name, dev->device_rev, serial_number,
+ pcan_usb.ctrl_count);
+
/* Since rev 4.1, PCAN-USB is able to make single-shot as well as
* looped back frames.
*/
@@ -896,11 +901,6 @@ static int pcan_usb_init(struct peak_usb_device *dev)
"Firmware update available. Please contact support@peak-system.com\n");
}
- dev_info(dev->netdev->dev.parent,
- "PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
- pcan_usb.name, dev->device_rev, serial_number,
- pcan_usb.ctrl_count);
-
return 0;
}
@@ -986,7 +986,6 @@ const struct peak_usb_adapter pcan_usb = {
.device_id = PCAN_USB_PRODUCT_ID,
.ctrl_count = 1,
.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 14c678a9e41b35..f00cbf5753b914 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -640,7 +640,10 @@ static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
unsigned long *mask,
struct phylink_link_state *state)
{
- if (port == 0 || port == 9 || port == 10) {
+ bool is_6191x =
+ chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6191X;
+
+ if (((port == 0 || port == 9) && !is_6191x) || port == 10) {
phylink_set(mask, 10000baseT_Full);
phylink_set(mask, 10000baseKR_Full);
phylink_set(mask, 10000baseCR_Full);
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 83808e7dbddaf5..327cc465480654 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1370,12 +1370,12 @@ out:
static bool felix_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
- u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN;
+ u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo;
struct skb_shared_hwtstamps *shhwtstamps;
struct ocelot *ocelot = ds->priv;
- u32 tstamp_lo, tstamp_hi;
struct timespec64 ts;
- u64 tstamp, val;
+ u32 tstamp_hi;
+ u64 tstamp;
/* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
* for RX timestamping. Then free it, and poll for its copy through
@@ -1390,9 +1390,6 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- ocelot_xfh_get_rew_val(extraction, &val);
- tstamp_lo = (u32)val;
-
tstamp_hi = tstamp >> 32;
if ((tstamp & 0xffffffff) < tstamp_lo)
tstamp_hi--;
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index ea7f1277892219..a429c9750add1b 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1109,6 +1109,14 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ /* Make sure MAC06 is disabled */
+ ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL,
+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed disabling MAC06 exchange");
+ return ret;
+ }
+
/* Enable CPU Port */
ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index e10571a398c98a..128b8cf85e0804 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -34,6 +34,7 @@
#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
#define QCA8K_REG_PORT5_PAD_CTRL 0x008
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index 4b0c5a09fd57ff..e230d8d0ff739c 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -934,7 +934,7 @@ static const struct net_device_ops ax88796c_netdev_ops = {
.ndo_stop = ax88796c_close,
.ndo_start_xmit = ax88796c_start_xmit,
.ndo_get_stats64 = ax88796c_get_stats64,
- .ndo_do_ioctl = ax88796c_ioctl,
+ .ndo_eth_ioctl = ax88796c_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = ax88796c_set_features,
};
@@ -1114,11 +1114,13 @@ static int ax88796c_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id ax88796c_dt_ids[] = {
{ .compatible = "asix,ax88796c" },
{},
};
MODULE_DEVICE_TABLE(of, ax88796c_dt_ids);
+#endif
static const struct spi_device_id asix_id[] = {
{ "ax88796c", 0 },
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ce790e9b45c302..5c464ea73576f4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -443,7 +443,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
if (BNXT_PF(bp) && bp->pf.active_vfs) {
NL_SET_ERR_MSG_MOD(extack,
- "reload is unsupported when VFs are allocated\n");
+ "reload is unsupported when VFs are allocated");
return -EOPNOTSUPP;
}
rtnl_lock();
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b1328c5524b535..85ca3909859d2d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -5503,7 +5503,6 @@ static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
int workaround, port_a;
serdes_cfg = 0;
- expected_sg_dig_ctrl = 0;
workaround = 0;
port_a = 1;
current_link_up = false;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 5903bdb78916fa..129352bbe1143b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -2015,12 +2015,15 @@ static int cxgb4_get_module_info(struct net_device *dev,
if (ret)
return ret;
- if (!sff8472_comp || (sff_diag_type & 4)) {
+ if (!sff8472_comp || (sff_diag_type & SFP_DIAG_ADDRMODE)) {
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ if (sff_diag_type & SFP_DIAG_IMPLEMENTED)
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ else
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
}
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 002fc62ea7262c..63bc956d20376b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -293,6 +293,8 @@ enum {
#define I2C_PAGE_SIZE 0x100
#define SFP_DIAG_TYPE_ADDR 0x5c
#define SFP_DIAG_TYPE_LEN 0x1
+#define SFP_DIAG_ADDRMODE BIT(2)
+#define SFP_DIAG_IMPLEMENTED BIT(6)
#define SFF_8472_COMP_ADDR 0x5e
#define SFF_8472_COMP_LEN 0x1
#define SFF_REV_ADDR 0x1
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 6b02ef432eda6b..59b66f679e46e8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1137,7 +1137,7 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
goto reset;
ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
- if (ntfy_idx > priv->num_ntfy_blks)
+ if (ntfy_idx >= priv->num_ntfy_blks)
goto reset;
block = &priv->ntfy_blocks[ntfy_idx];
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index c8500babbd1db9..3d04b5aff331b4 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -500,7 +500,8 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
rx->rx_copied_pkt++;
rx->rx_frag_copy_cnt++;
rx->rx_copybreak_pkt++;
- } u64_stats_update_end(&rx->statss);
+ u64_stats_update_end(&rx->statss);
+ }
} else {
if (rx->data.raw_addressing) {
int recycle = gve_rx_can_recycle_buffer(page_info);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a2b993d62822e1..9ccebbaa0d696e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -4210,6 +4210,13 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
}
out:
+ /* sync head pointer before exiting, since hardware will calculate
+ * FBD number with head pointer
+ */
+ if (unused_count > 0)
+ failure = failure ||
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
+
return failure ? budget : recv_pkts;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 9fdedd83f39225..c8442b86df9417 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -238,9 +238,11 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
}
/**
- * hns3_lp_run_test - run loopback test
+ * hns3_lp_run_test - run loopback test
* @ndev: net device
* @mode: loopback type
+ *
+ * Return: %0 for success or a NIC loopback test error code on failure
*/
static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
{
@@ -398,7 +400,7 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
}
/**
- * hns3_nic_self_test - self test
+ * hns3_self_test - self test
* @ndev: net device
* @eth_test: test cmd
* @data: test result
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index c327df9dbac409..c5d5466810bb2e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -483,6 +483,7 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
if (hnae3_dev_phy_imp_supported(hdev))
hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
hnae3_set_bit(compat, HCLGE_MAC_STATS_EXT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_SYNC_RX_RING_HEAD_EN_B, 1);
req->compat = cpu_to_le32(compat);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index c38b57fc6c6a54..d24e5902879831 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1151,6 +1151,7 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
#define HCLGE_PHY_IMP_EN_B 2
#define HCLGE_MAC_STATS_EXT_EN_B 3
+#define HCLGE_SYNC_RX_RING_HEAD_EN_B 4
struct hclge_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 91cb578f56b80a..375ebf105a9aae 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -129,7 +129,7 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
u32 total_ets_bw = 0;
u8 i;
- for (i = 0; i < hdev->tc_max; i++) {
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -286,28 +286,24 @@ err_out:
static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
{
- u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
struct hclge_vport *vport = hclge_get_vport(h);
struct hclge_dev *hdev = vport->back;
int ret;
- u8 i;
memset(pfc, 0, sizeof(*pfc));
pfc->pfc_cap = hdev->pfc_max;
pfc->pfc_en = hdev->tm_info.pfc_en;
- ret = hclge_pfc_tx_stats_get(hdev, requests);
- if (ret)
+ ret = hclge_mac_update_stats(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to update MAC stats, ret = %d.\n", ret);
return ret;
+ }
- ret = hclge_pfc_rx_stats_get(hdev, indications);
- if (ret)
- return ret;
+ hclge_pfc_tx_stats_get(hdev, pfc->requests);
+ hclge_pfc_rx_stats_get(hdev, pfc->indications);
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- pfc->requests[i] = requests[i];
- pfc->indications[i] = indications[i];
- }
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 2e41aa2d1df80f..c2a58101144e3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -26,8 +26,6 @@
#include "hclge_devlink.h"
#define HCLGE_NAME "hclge"
-#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
-#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256U
#define HCLGE_BUF_MUL_BY 2
@@ -568,6 +566,16 @@ static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
struct hclge_desc desc;
int ret;
+ /* Driver needs total register number of both valid registers and
+ * reserved registers, but the old firmware only returns number
+ * of valid registers in device V2. To be compatible with these
+ * devices, driver uses a fixed value.
+ */
+ if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
+ return 0;
+ }
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -587,7 +595,7 @@ static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
return 0;
}
-static int hclge_mac_update_stats(struct hclge_dev *hdev)
+int hclge_mac_update_stats(struct hclge_dev *hdev)
{
/* The firmware supports the new statistics acquisition method */
if (hdev->ae_dev->dev_specs.mac_stats_num)
@@ -2581,7 +2589,7 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
return -EINVAL;
- roce->rinfo.base_vector = hdev->roce_base_vector;
+ roce->rinfo.base_vector = hdev->num_nic_msi;
roce->rinfo.netdev = nic->kinfo.netdev;
roce->rinfo.roce_io_base = hdev->hw.io_base;
@@ -2617,10 +2625,6 @@ static int hclge_init_msi(struct hclge_dev *hdev)
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
- hdev->base_msi_vector = pdev->irq;
- hdev->roce_base_vector = hdev->base_msi_vector +
- hdev->num_nic_msi;
-
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
if (!hdev->vector_status) {
@@ -8949,8 +8953,11 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
err_no_space:
/* if already overflow, not to print each time */
- if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ }
+
return -ENOSPC;
}
@@ -9006,12 +9013,17 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
struct list_head *list,
- int (*sync)(struct hclge_vport *,
- const unsigned char *))
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
{
+ int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
struct hclge_mac_node *mac_node, *tmp;
int ret;
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ sync = hclge_add_uc_addr_common;
+ else
+ sync = hclge_add_mc_addr_common;
+
list_for_each_entry_safe(mac_node, tmp, list, node) {
ret = sync(vport, mac_node->mac_addr);
if (!ret) {
@@ -9023,8 +9035,13 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
/* If one unicast mac address is existing in hardware,
* we need to try whether other unicast mac addresses
* are new addresses that can be added.
+ * Multicast mac address can be reusable, even though
+ * there is no space to add new multicast mac address,
+ * we should check whether other mac addresses are
+ * existing in hardware for reuse.
*/
- if (ret != -EEXIST)
+ if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
+ (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
break;
}
}
@@ -9032,12 +9049,17 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
struct list_head *list,
- int (*unsync)(struct hclge_vport *,
- const unsigned char *))
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
struct hclge_mac_node *mac_node, *tmp;
int ret;
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ unsync = hclge_rm_uc_addr_common;
+ else
+ unsync = hclge_rm_mc_addr_common;
+
list_for_each_entry_safe(mac_node, tmp, list, node) {
ret = unsync(vport, mac_node->mac_addr);
if (!ret || ret == -ENOENT) {
@@ -9168,17 +9190,8 @@ stop_traverse:
spin_unlock_bh(&vport->mac_list_lock);
/* delete first, in order to get max mac table space for adding */
- if (mac_type == HCLGE_MAC_ADDR_UC) {
- hclge_unsync_vport_mac_list(vport, &tmp_del_list,
- hclge_rm_uc_addr_common);
- hclge_sync_vport_mac_list(vport, &tmp_add_list,
- hclge_add_uc_addr_common);
- } else {
- hclge_unsync_vport_mac_list(vport, &tmp_del_list,
- hclge_rm_mc_addr_common);
- hclge_sync_vport_mac_list(vport, &tmp_add_list,
- hclge_add_mc_addr_common);
- }
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
/* if some mac addresses were added/deleted fail, move back to the
* mac_list, and retry at next time.
@@ -9337,12 +9350,7 @@ static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
spin_unlock_bh(&vport->mac_list_lock);
- if (mac_type == HCLGE_MAC_ADDR_UC)
- hclge_unsync_vport_mac_list(vport, &tmp_del_list,
- hclge_rm_uc_addr_common);
- else
- hclge_unsync_vport_mac_list(vport, &tmp_del_list,
- hclge_rm_mc_addr_common);
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
if (!list_empty(&tmp_del_list))
dev_warn(&hdev->pdev->dev,
@@ -9410,36 +9418,6 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
return return_status;
}
-static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
- u8 *mac_addr)
-{
- struct hclge_mac_vlan_tbl_entry_cmd req;
- struct hclge_dev *hdev = vport->back;
- struct hclge_desc desc;
- u16 egress_port = 0;
- int i;
-
- if (is_zero_ether_addr(mac_addr))
- return false;
-
- memset(&req, 0, sizeof(req));
- hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
- HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
- req.egress_port = cpu_to_le16(egress_port);
- hclge_prepare_mac_addr(&req, mac_addr, false);
-
- if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
- return true;
-
- vf_idx += HCLGE_VF_VPORT_START_NUM;
- for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
- if (i != vf_idx &&
- ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
- return true;
-
- return false;
-}
-
static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
u8 *mac_addr)
{
@@ -9457,12 +9435,6 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
return 0;
}
- if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
- dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
- mac_addr);
- return -EEXIST;
- }
-
ether_addr_copy(vport->vf_info.mac, mac_addr);
if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 9e1eede599ec8f..ebba603483a04f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -404,7 +404,7 @@ struct hclge_tm_info {
};
/* max number of mac statistics on each version */
-#define HCLGE_MAC_STATS_MAX_NUM_V1 84
+#define HCLGE_MAC_STATS_MAX_NUM_V1 87
#define HCLGE_MAC_STATS_MAX_NUM_V2 105
struct hclge_comm_stats_str {
@@ -852,6 +852,9 @@ struct hclge_vf_vlan_cfg {
(y) = (_k_ ^ ~_v_) & (_k_); \
} while (0)
+#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
+#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+
#define HCLGE_MAC_TNL_LOG_SIZE 8
#define HCLGE_VPORT_NUM 256
struct hclge_dev {
@@ -904,12 +907,10 @@ struct hclge_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
- u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
u16 num_nic_msi; /* Num of nic vectors for this PF */
u16 num_roce_msi; /* Num of roce vectors for this PF */
- int roce_base_vector;
unsigned long service_timer_period;
unsigned long service_timer_previous;
@@ -1168,4 +1169,5 @@ void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
int hclge_push_vf_link_status(struct hclge_vport *vport);
int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
+int hclge_mac_update_stats(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 95074e91a8466a..429652a8cde167 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -113,50 +113,50 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
return 0;
}
-static int hclge_pfc_stats_get(struct hclge_dev *hdev,
- enum hclge_opcode_type opcode, u64 *stats)
-{
- struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
- int ret, i, j;
-
- if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
- opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
- return -EINVAL;
-
- for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
- hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- }
-
- hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
+static const u16 hclge_pfc_tx_stats_offset[] = {
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
+};
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
- if (ret)
- return ret;
+static const u16 hclge_pfc_rx_stats_offset[] = {
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
+};
- for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
- struct hclge_pfc_stats_cmd *pfc_stats =
- (struct hclge_pfc_stats_cmd *)desc[i].data;
+static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
+{
+ const u16 *offset;
+ int i;
- for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
- u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
+ if (tx)
+ offset = hclge_pfc_tx_stats_offset;
+ else
+ offset = hclge_pfc_rx_stats_offset;
- if (index < HCLGE_MAX_TC_NUM)
- stats[index] =
- le64_to_cpu(pfc_stats->pkt_num[j]);
- }
- }
- return 0;
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
}
-int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
+void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
{
- return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
+ hclge_pfc_stats_get(hdev, false, stats);
}
-int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
+void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
{
- return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
+ hclge_pfc_stats_get(hdev, true, stats);
}
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
@@ -1123,7 +1123,6 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
{
-#define DEFAULT_TC_WEIGHT 1
#define DEFAULT_TC_OFFSET 14
struct hclge_ets_tc_weight_cmd *ets_weight;
@@ -1136,13 +1135,7 @@ static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
for (i = 0; i < HNAE3_MAX_TC; i++) {
struct hclge_pg_info *pg_info;
- ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
-
- if (!(hdev->hw_tc_map & BIT(i)))
- continue;
-
- pg_info =
- &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
+ pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 2ee9b795f71dc4..1db7f40b452551 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -228,8 +228,8 @@ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
-int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
-int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
+void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
+void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index f89bfb352adfdc..e605c2c5bcce7d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -434,8 +434,28 @@ err_csq:
return ret;
}
+static int hclgevf_firmware_compat_config(struct hclgevf_dev *hdev, bool en)
+{
+ struct hclgevf_firmware_compat_cmd *req;
+ struct hclgevf_desc desc;
+ u32 compat = 0;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_IMP_COMPAT_CFG, false);
+
+ if (en) {
+ req = (struct hclgevf_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGEVF_SYNC_RX_RING_HEAD_EN_B, 1);
+
+ req->compat = cpu_to_le32(compat);
+ }
+
+ return hclgevf_cmd_send(&hdev->hw, &desc, 1);
+}
+
int hclgevf_cmd_init(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
@@ -484,6 +504,17 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ /* ask the firmware to enable some features, driver can work
+ * without it.
+ */
+ ret = hclgevf_firmware_compat_config(hdev, true);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
+ }
+
return 0;
err_cmd_init:
@@ -508,6 +539,7 @@ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
{
+ hclgevf_firmware_compat_config(hdev, false);
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
/* wait to ensure that the firmware completes the possible left
* over commands.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 39d0b589c720aa..edc9e154061afb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -15,6 +15,12 @@
struct hclgevf_hw;
struct hclgevf_dev;
+#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4
+struct hclgevf_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
struct hclgevf_desc {
__le16 opcode;
__le16 flag;
@@ -107,6 +113,9 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
/* Mailbox cmd */
HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+
+ /* IMP stats command */
+ HCLGEVF_OPC_IMP_COMPAT_CFG = 0x701A,
};
#define HCLGEVF_TQP_REG_OFFSET 0x80000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 645b2c0011e6bc..25c419d40066dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2557,7 +2557,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
hdev->num_msi_left == 0)
return -EINVAL;
- roce->rinfo.base_vector = hdev->roce_base_vector;
+ roce->rinfo.base_vector = hdev->roce_base_msix_offset;
roce->rinfo.netdev = nic->kinfo.netdev;
roce->rinfo.roce_io_base = hdev->hw.io_base;
@@ -2823,9 +2823,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
- hdev->base_msi_vector = pdev->irq;
- hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
-
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
if (!hdev->vector_status) {
@@ -3013,7 +3010,10 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
/* un-init roce, if it exists */
if (hdev->roce_client) {
+ while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGEVF_WAIT_RESET_DONE);
clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
+
hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
hdev->roce_client = NULL;
hdev->roce.client = NULL;
@@ -3022,6 +3022,8 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
/* un-init nic/unic, if this was not called by roce client */
if (client->ops->uninit_instance && hdev->nic_client &&
client->type != HNAE3_CLIENT_ROCE) {
+ while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGEVF_WAIT_RESET_DONE);
clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
client->ops->uninit_instance(&hdev->nic, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 28288d7e33032c..f6f736c0091c05 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -109,6 +109,8 @@
#define HCLGEVF_VF_RST_ING 0x07008
#define HCLGEVF_VF_RST_ING_BIT BIT(16)
+#define HCLGEVF_WAIT_RESET_DONE 100
+
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
#define HCLGEVF_RSS_KEY_SIZE 40
@@ -308,8 +310,6 @@ struct hclgevf_dev {
u16 num_nic_msix; /* Num of nic vectors for this VF */
u16 num_roce_msix; /* Num of roce vectors for this VF */
u16 roce_base_msix_offset;
- int roce_base_vector;
- u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index bf4ecd9a517c8d..b2db39ee5f85c0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -165,13 +165,10 @@
#define ice_for_each_chnl_tc(i) \
for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
-#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
- ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
+#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX)
#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
- ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_UCAST_RX | \
- ICE_PROMISC_MCAST_RX | \
ICE_PROMISC_VLAN_TX | \
ICE_PROMISC_VLAN_RX)
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index fa6cd63cbf1f09..1efc635cc0f5e2 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -962,7 +962,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
+ dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
ice_stat_str(status));
return -ENODEV;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 2ac21484b87669..217ff5e9a6f143 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -638,8 +638,7 @@ void ice_free_vfs(struct ice_pf *pf)
/* Avoid wait time by stopping all VFs at the same time */
ice_for_each_vf(pf, i)
- if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
- ice_dis_vf_qs(&pf->vf[i]);
+ ice_dis_vf_qs(&pf->vf[i]);
tmp = pf->num_alloc_vfs;
pf->num_qps_per_vf = 0;
@@ -651,6 +650,8 @@ void ice_free_vfs(struct ice_pf *pf)
set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
ice_free_vf_res(&pf->vf[i]);
}
+
+ mutex_destroy(&pf->vf[i].cfg_lock);
}
if (ice_sriov_free_msix_res(pf))
@@ -1695,8 +1696,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
vsi = ice_get_vf_vsi(vf);
- if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
- ice_dis_vf_qs(vf);
+ ice_dis_vf_qs(vf);
/* Call Disable LAN Tx queue AQ whether or not queues are
* enabled. This is needed for successful completion of VFR.
@@ -1948,6 +1948,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
ice_vf_fdir_init(vf);
ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+
+ mutex_init(&vf->cfg_lock);
}
}
@@ -3013,6 +3015,7 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ enum ice_status mcast_status = 0, ucast_status = 0;
bool rm_promisc, alluni = false, allmulti = false;
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
@@ -3054,24 +3057,6 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
rm_promisc = !allmulti && !alluni;
if (vsi->num_vlan || vf->port_vlan_info) {
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
- struct net_device *pf_netdev;
-
- if (!pf_vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- pf_netdev = pf_vsi->netdev;
-
- ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
- if (ret) {
- dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
- rm_promisc ? "ON" : "OFF", vf->vf_id,
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- }
-
if (rm_promisc)
ret = ice_cfg_vlan_pruning(vsi, true);
else
@@ -3105,52 +3090,51 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
} else {
- enum ice_status status;
- u8 promisc_m;
-
- if (alluni) {
- if (vf->port_vlan_info || vsi->num_vlan)
- promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_UCAST_PROMISC_BITS;
- } else if (allmulti) {
- if (vf->port_vlan_info || vsi->num_vlan)
- promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_MCAST_PROMISC_BITS;
+ u8 mcast_m, ucast_m;
+
+ if (vf->port_vlan_info || vsi->num_vlan > 1) {
+ mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
} else {
- if (vf->port_vlan_info || vsi->num_vlan)
- promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_UCAST_PROMISC_BITS;
+ mcast_m = ICE_MCAST_PROMISC_BITS;
+ ucast_m = ICE_UCAST_PROMISC_BITS;
}
- /* Configure multicast/unicast with or without VLAN promiscuous
- * mode
- */
- status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
- if (status) {
- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
- rm_promisc ? "dis" : "en", vf->vf_id,
- ice_stat_str(status));
- v_ret = ice_err_to_virt_err(status);
- goto error_param;
- } else {
- dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
- rm_promisc ? "dis" : "en", vf->vf_id);
+ ucast_status = ice_vf_set_vsi_promisc(vf, vsi, ucast_m,
+ !alluni);
+ if (ucast_status) {
+ dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
+ alluni ? "en" : "dis", vf->vf_id);
+ v_ret = ice_err_to_virt_err(ucast_status);
+ }
+
+ mcast_status = ice_vf_set_vsi_promisc(vf, vsi, mcast_m,
+ !allmulti);
+ if (mcast_status) {
+ dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
+ allmulti ? "en" : "dis", vf->vf_id);
+ v_ret = ice_err_to_virt_err(mcast_status);
}
}
- if (allmulti &&
- !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", vf->vf_id);
- else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", vf->vf_id);
+ if (!mcast_status) {
+ if (allmulti &&
+ !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
+ vf->vf_id);
+ else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
+ vf->vf_id);
+ }
- if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id);
- else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", vf->vf_id);
+ if (!ucast_status) {
+ if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+ else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
+ }
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
@@ -3824,6 +3808,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
struct device *dev = ice_pf_to_dev(vf->pf);
u8 *mac_addr = vc_ether_addr->addr;
enum ice_status status;
+ int ret = 0;
/* device MAC already added */
if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
@@ -3836,20 +3821,23 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
- dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
+ dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
- return -EEXIST;
+ /* don't return since we might need to update
+ * the primary MAC in ice_vfhw_mac_add() below
+ */
+ ret = -EEXIST;
} else if (status) {
dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
mac_addr, vf->vf_id, ice_stat_str(status));
return -EIO;
+ } else {
+ vf->num_mac++;
}
ice_vfhw_mac_add(vf, vc_ether_addr);
- vf->num_mac++;
-
- return 0;
+ return ret;
}
/**
@@ -4151,6 +4139,8 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
return 0;
}
+ mutex_lock(&vf->cfg_lock);
+
vf->port_vlan_info = vlanprio;
if (vf->port_vlan_info)
@@ -4160,6 +4150,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
ice_vc_reset_vf(vf);
+ mutex_unlock(&vf->cfg_lock);
return 0;
}
@@ -4699,6 +4690,15 @@ error_handler:
return;
}
+ /* VF is being configured in another context that triggers a VFR, so no
+ * need to process this message
+ */
+ if (!mutex_trylock(&vf->cfg_lock)) {
+ dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
+ vf->vf_id);
+ return;
+ }
+
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
err = ops->get_ver_msg(vf, msg);
@@ -4787,6 +4787,8 @@ error_handler:
dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
vf_id, v_opcode, err);
}
+
+ mutex_unlock(&vf->cfg_lock);
}
/**
@@ -4902,6 +4904,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return -EINVAL;
}
+ mutex_lock(&vf->cfg_lock);
+
/* VF is notified of its new MAC via the PF's response to the
* VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
@@ -4920,6 +4924,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
ice_vc_reset_vf(vf);
+ mutex_unlock(&vf->cfg_lock);
return 0;
}
@@ -4954,11 +4959,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
if (trusted == vf->trusted)
return 0;
+ mutex_lock(&vf->cfg_lock);
+
vf->trusted = trusted;
ice_vc_reset_vf(vf);
dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
vf_id, trusted ? "" : "un");
+ mutex_unlock(&vf->cfg_lock);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 5ff93a08f54c94..7e28ecbbe7af0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -100,6 +100,11 @@ struct ice_vc_vf_ops {
struct ice_vf {
struct ice_pf *pf;
+ /* Used during virtchnl message handling and NDO ops against the VF
+ * that will trigger a VFR
+ */
+ struct mutex cfg_lock;
+
u16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
u16 ctrl_vsi_idx;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 2258e3f1916107..6433c909c6b26d 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -262,7 +262,7 @@ ltq_etop_hw_init(struct net_device *dev)
/* enable crc generation */
ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
- ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len);
+ ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, priv->rx_burst_len);
for (i = 0; i < MAX_DMA_CHAN; i++) {
int irq = LTQ_DMA_CH0_INT + i;
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index 3d9385a4989b7a..fdd99f0de424dc 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -242,10 +242,8 @@ static int liteeth_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq);
+ if (irq < 0)
return irq;
- }
netdev->irq = irq;
priv->base = devm_platform_ioremap_resource_byname(pdev, "mac");
@@ -289,7 +287,6 @@ static int liteeth_remove(struct platform_device *pdev)
struct net_device *netdev = platform_get_drvdata(pdev);
unregister_netdev(netdev);
- free_netdev(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 587def69a6f75c..2b18d89d9756d8 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1605,7 +1605,7 @@ static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
mvpp22_gop_fca_enable_periodic(port, true);
}
-static int mvpp22_gop_init(struct mvpp2_port *port)
+static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
{
struct mvpp2 *priv = port->priv;
u32 val;
@@ -1613,7 +1613,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
if (!priv->sysctrl_base)
return 0;
- switch (port->phy_interface) {
+ switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -1743,15 +1743,15 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
* lanes by the physical layer. This is why configurations like
* "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
*/
-static int mvpp22_comphy_init(struct mvpp2_port *port)
+static int mvpp22_comphy_init(struct mvpp2_port *port,
+ phy_interface_t interface)
{
int ret;
if (!port->comphy)
return 0;
- ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
- port->phy_interface);
+ ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface);
if (ret)
return ret;
@@ -2172,7 +2172,8 @@ static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
}
-static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
+static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
+ phy_interface_t interface)
{
struct mvpp2 *priv = port->priv;
void __iomem *mpcs, *xpcs;
@@ -2184,7 +2185,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
- switch (port->phy_interface) {
+ switch (interface) {
case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
@@ -4529,7 +4530,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
return rx_done;
}
-static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
+static void mvpp22_mode_reconfigure(struct mvpp2_port *port,
+ phy_interface_t interface)
{
u32 ctrl3;
@@ -4540,18 +4542,18 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
mvpp22_pcs_reset_assert(port);
/* comphy reconfiguration */
- mvpp22_comphy_init(port);
+ mvpp22_comphy_init(port, interface);
/* gop reconfiguration */
- mvpp22_gop_init(port);
+ mvpp22_gop_init(port, interface);
- mvpp22_pcs_reset_deassert(port);
+ mvpp22_pcs_reset_deassert(port, interface);
if (mvpp2_port_supports_xlg(port)) {
ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
- if (mvpp2_is_xlg(port->phy_interface))
+ if (mvpp2_is_xlg(interface))
ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
else
ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
@@ -4559,7 +4561,7 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
}
- if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
+ if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface))
mvpp2_xlg_max_rx_size_set(port);
else
mvpp2_gmac_max_rx_size_set(port);
@@ -4579,7 +4581,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp2_interrupts_enable(port);
if (port->priv->hw_version >= MVPP22)
- mvpp22_mode_reconfigure(port);
+ mvpp22_mode_reconfigure(port, port->phy_interface);
if (port->phylink) {
phylink_start(port->phylink);
@@ -6444,6 +6446,9 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
mvpp22_gop_mask_irq(port);
phy_power_off(port->comphy);
+
+ /* Reconfigure the serdes lanes */
+ mvpp22_mode_reconfigure(port, interface);
}
}
@@ -6498,9 +6503,6 @@ static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
port->phy_interface != interface) {
port->phy_interface = interface;
- /* Reconfigure the serdes lanes */
- mvpp22_mode_reconfigure(port);
-
/* Unmask interrupts */
mvpp22_gop_unmask_irq(port);
}
@@ -6961,7 +6963,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
* driver does this, we can remove this code.
*/
if (port->comphy) {
- err = mvpp22_comphy_init(port);
+ err = mvpp22_comphy_init(port, port->phy_interface);
if (err == 0)
phy_power_off(port->comphy);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 3f982ccf2c85f7..639893d870550c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -31,6 +31,7 @@ config NDC_DIS_DYNAMIC_CACHING
config OCTEONTX2_PF
tristate "Marvell OcteonTX2 NIC Physical Function driver"
select OCTEONTX2_MBOX
+ select NET_DEVLINK
depends on (64BIT && COMPILE_TEST) || ARM64
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index bb6b42bbefa44d..c0005a1feee69f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2450,9 +2450,7 @@ alloc:
bmap = mcam->bmap_reverse;
start = mcam->bmap_entries - start;
end = mcam->bmap_entries - end;
- index = start;
- start = end;
- end = index;
+ swap(start, end);
} else {
bmap = mcam->bmap;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index e6cb8cd0787d30..78944ad3492ff0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -501,7 +501,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
- .ndo_do_ioctl = otx2_ioctl,
+ .ndo_eth_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *vf)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 6011454dba71f2..40d5b89573bb0b 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -499,7 +499,8 @@ static void prestera_port_mdix_get(struct ethtool_link_ksettings *ecmd,
{
struct prestera_port_phy_state *state = &port->state_phy;
- if (prestera_hw_port_phy_mode_get(port, &state->mdix, NULL, NULL, NULL)) {
+ if (prestera_hw_port_phy_mode_get(port,
+ &state->mdix, NULL, NULL, NULL)) {
netdev_warn(port->dev, "MDIX params get failed");
state->mdix = ETH_TP_MDI_INVALID;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 41ba17cb296573..9b8b1ed474fce1 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -180,108 +180,113 @@ struct prestera_msg_common_resp {
struct prestera_msg_ret ret;
};
-union prestera_msg_switch_param {
- u8 mac[ETH_ALEN];
- __le32 ageing_timeout_ms;
-} __packed;
-
struct prestera_msg_switch_attr_req {
struct prestera_msg_cmd cmd;
__le32 attr;
- union prestera_msg_switch_param param;
+ union {
+ __le32 ageing_timeout_ms;
+ struct {
+ u8 mac[ETH_ALEN];
+ u8 __pad[2];
+ };
+ } param;
};
struct prestera_msg_switch_init_resp {
struct prestera_msg_ret ret;
__le32 port_count;
__le32 mtu_max;
- u8 switch_id;
- u8 lag_max;
- u8 lag_member_max;
__le32 size_tbl_router_nexthop;
-} __packed __aligned(4);
+ u8 switch_id;
+ u8 lag_max;
+ u8 lag_member_max;
+};
struct prestera_msg_event_port_param {
union {
struct {
- u8 oper;
__le32 mode;
__le32 speed;
+ u8 oper;
u8 duplex;
u8 fc;
u8 fec;
- } __packed mac;
+ } mac;
struct {
- u8 mdix;
__le64 lmode_bmap;
+ u8 mdix;
u8 fc;
- } __packed phy;
- } __packed;
-} __packed __aligned(4);
+ u8 __pad[2];
+ } __packed phy; /* make sure always 12 bytes size */
+ };
+};
struct prestera_msg_port_cap_param {
__le64 link_mode;
- u8 type;
- u8 fec;
- u8 fc;
- u8 transceiver;
+ u8 type;
+ u8 fec;
+ u8 fc;
+ u8 transceiver;
};
struct prestera_msg_port_flood_param {
u8 type;
u8 enable;
+ u8 __pad[2];
};
union prestera_msg_port_param {
+ __le32 mtu;
+ __le32 speed;
+ __le32 link_mode;
u8 admin_state;
u8 oper_state;
- __le32 mtu;
u8 mac[ETH_ALEN];
u8 accept_frm_type;
- __le32 speed;
u8 learning;
u8 flood;
- __le32 link_mode;
u8 type;
u8 duplex;
u8 fec;
u8 fc;
-
union {
struct {
- u8 admin:1;
+ u8 admin;
u8 fc;
u8 ap_enable;
+ u8 __reserved[5];
union {
struct {
__le32 mode;
- u8 inband:1;
__le32 speed;
- u8 duplex;
- u8 fec;
- u8 fec_supp;
- } __packed reg_mode;
+ u8 inband;
+ u8 duplex;
+ u8 fec;
+ u8 fec_supp;
+ } reg_mode;
struct {
__le32 mode;
__le32 speed;
- u8 fec;
- u8 fec_supp;
- } __packed ap_modes[PRESTERA_AP_PORT_MAX];
- } __packed;
- } __packed mac;
+ u8 fec;
+ u8 fec_supp;
+ u8 __pad[2];
+ } ap_modes[PRESTERA_AP_PORT_MAX];
+ };
+ } mac;
struct {
- u8 admin:1;
- u8 adv_enable;
__le64 modes;
__le32 mode;
+ u8 admin;
+ u8 adv_enable;
u8 mdix;
- } __packed phy;
- } __packed link;
+ u8 __pad;
+ } phy;
+ } link;
struct prestera_msg_port_cap_param cap;
struct prestera_msg_port_flood_param flood_ext;
struct prestera_msg_event_port_param link_evt;
-} __packed;
+};
struct prestera_msg_port_attr_req {
struct prestera_msg_cmd cmd;
@@ -289,14 +294,12 @@ struct prestera_msg_port_attr_req {
__le32 port;
__le32 dev;
union prestera_msg_port_param param;
-} __packed __aligned(4);
-
+};
struct prestera_msg_port_attr_resp {
struct prestera_msg_ret ret;
union prestera_msg_port_param param;
-} __packed __aligned(4);
-
+};
struct prestera_msg_port_stats_resp {
struct prestera_msg_ret ret;
@@ -313,6 +316,7 @@ struct prestera_msg_port_info_resp {
__le32 hw_id;
__le32 dev_id;
__le16 fp_id;
+ u8 pad[2];
};
struct prestera_msg_vlan_req {
@@ -320,13 +324,13 @@ struct prestera_msg_vlan_req {
__le32 port;
__le32 dev;
__le16 vid;
- u8 is_member;
- u8 is_tagged;
+ u8 is_member;
+ u8 is_tagged;
};
struct prestera_msg_fdb_req {
struct prestera_msg_cmd cmd;
- u8 dest_type;
+ __le32 flush_mode;
union {
struct {
__le32 port;
@@ -334,22 +338,25 @@ struct prestera_msg_fdb_req {
};
__le16 lag_id;
} dest;
- u8 mac[ETH_ALEN];
__le16 vid;
- u8 dynamic;
- __le32 flush_mode;
-} __packed __aligned(4);
+ u8 dest_type;
+ u8 dynamic;
+ u8 mac[ETH_ALEN];
+ u8 __pad[2];
+};
struct prestera_msg_bridge_req {
struct prestera_msg_cmd cmd;
__le32 port;
__le32 dev;
__le16 bridge;
+ u8 pad[2];
};
struct prestera_msg_bridge_resp {
struct prestera_msg_ret ret;
__le16 bridge;
+ u8 pad[2];
};
struct prestera_msg_acl_action {
@@ -359,11 +366,12 @@ struct prestera_msg_acl_action {
struct prestera_msg_acl_match {
__le32 type;
+ __le32 __reserved;
union {
struct {
u8 key;
u8 mask;
- } __packed u8;
+ } u8;
struct {
__le16 key;
__le16 mask;
@@ -379,7 +387,7 @@ struct prestera_msg_acl_match {
struct {
u8 key[ETH_ALEN];
u8 mask[ETH_ALEN];
- } __packed mac;
+ } mac;
} keymask;
};
@@ -408,16 +416,19 @@ struct prestera_msg_acl_ruleset_bind_req {
__le32 port;
__le32 dev;
__le16 ruleset_id;
+ u8 pad[2];
};
struct prestera_msg_acl_ruleset_req {
struct prestera_msg_cmd cmd;
__le16 id;
+ u8 pad[2];
};
struct prestera_msg_acl_ruleset_resp {
struct prestera_msg_ret ret;
__le16 id;
+ u8 pad[2];
};
struct prestera_msg_span_req {
@@ -425,11 +436,13 @@ struct prestera_msg_span_req {
__le32 port;
__le32 dev;
u8 id;
+ u8 pad[3];
};
struct prestera_msg_span_resp {
struct prestera_msg_ret ret;
u8 id;
+ u8 pad[3];
};
struct prestera_msg_stp_req {
@@ -437,12 +450,14 @@ struct prestera_msg_stp_req {
__le32 port;
__le32 dev;
__le16 vid;
- u8 state;
+ u8 state;
+ u8 __pad;
};
struct prestera_msg_rxtx_req {
struct prestera_msg_cmd cmd;
u8 use_sdma;
+ u8 pad[3];
};
struct prestera_msg_rxtx_resp {
@@ -455,12 +470,14 @@ struct prestera_msg_lag_req {
__le32 port;
__le32 dev;
__le16 lag_id;
+ u8 pad[2];
};
struct prestera_msg_cpu_code_counter_req {
struct prestera_msg_cmd cmd;
u8 counter_type;
u8 code;
+ u8 pad[2];
};
struct mvsw_msg_cpu_code_counter_ret {
@@ -485,21 +502,21 @@ union prestera_msg_event_fdb_param {
struct prestera_msg_event_fdb {
struct prestera_msg_event id;
- u8 dest_type;
+ __le32 vid;
union {
__le32 port_id;
__le16 lag_id;
} dest;
- __le32 vid;
union prestera_msg_event_fdb_param param;
-} __packed __aligned(4);
+ u8 dest_type;
+};
-static inline void prestera_hw_build_tests(void)
+static void prestera_hw_build_tests(void)
{
/* check requests */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_req) != 4);
BUILD_BUG_ON(sizeof(struct prestera_msg_switch_attr_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_req) != 120);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_req) != 144);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28);
@@ -516,7 +533,7 @@ static inline void prestera_hw_build_tests(void)
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_switch_init_resp) != 24);
- BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_resp) != 112);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_resp) != 136);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12);
@@ -549,9 +566,9 @@ static int __prestera_cmd_ret(struct prestera_switch *sw,
if (err)
return err;
- if (__le32_to_cpu(ret->cmd.type) != PRESTERA_CMD_TYPE_ACK)
+ if (ret->cmd.type != __cpu_to_le32(PRESTERA_CMD_TYPE_ACK))
return -EBADE;
- if (__le32_to_cpu(ret->status) != PRESTERA_CMD_ACK_OK)
+ if (ret->status != __cpu_to_le32(PRESTERA_CMD_ACK_OK))
return -EINVAL;
return 0;
@@ -1344,7 +1361,8 @@ int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
int prestera_hw_port_autoneg_restart(struct prestera_port *port)
{
struct prestera_msg_port_attr_req req = {
- .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART),
+ .attr =
+ __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 625b40149facfb..4369a3ffad45b7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -405,7 +405,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
err = prestera_port_cfg_mac_write(port, &cfg_mac);
if (err) {
- dev_err(prestera_dev(sw), "Failed to set port(%u) mac mode\n", id);
+ dev_err(prestera_dev(sw),
+ "Failed to set port(%u) mac mode\n", id);
goto err_port_init;
}
@@ -418,7 +419,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
false, 0, 0,
port->cfg_phy.mdix);
if (err) {
- dev_err(prestera_dev(sw), "Failed to set port(%u) phy mode\n", id);
+ dev_err(prestera_dev(sw),
+ "Failed to set port(%u) phy mode\n", id);
goto err_port_init;
}
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index d650082496d64c..f538a749ebd4d2 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -411,7 +411,8 @@ static int prestera_fw_cmd_send(struct prestera_fw *fw, int qid,
goto cmd_exit;
}
- memcpy_fromio(out_msg, prestera_fw_cmdq_buf(fw, qid) + in_size, ret_size);
+ memcpy_fromio(out_msg,
+ prestera_fw_cmdq_buf(fw, qid) + in_size, ret_size);
cmd_exit:
prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index adc836b3d857ff..ad63dd45c8fb9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -289,7 +289,7 @@ mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
match_mask),
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index c96ac81212f74c..636dfef24a6cf2 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1424,7 +1424,7 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "Shutdown was calledd\n");
+ dev_info(&pdev->dev, "Shutdown was called\n");
mana_remove(&gc->mana, true);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index c68837a951f47e..314c9c69eb0e94 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -817,9 +817,7 @@ ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries)
efx->rxq_entries = rxq_entries;
efx->txq_entries = txq_entries;
for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
+ swap(efx->channel[i], other_channel[i]);
}
/* Restart buffer table allocation */
@@ -863,9 +861,7 @@ rollback:
efx->rxq_entries = old_rxq_entries;
efx->txq_entries = old_txq_entries;
for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
+ swap(efx->channel[i], other_channel[i]);
}
goto out;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 8160087ee92f2f..1c4ea0b1b845b3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -786,8 +786,6 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
goto disable;
if (qopt->num_entries >= dep)
return -EINVAL;
- if (!qopt->base_time)
- return -ERANGE;
if (!qopt->cycle_time)
return -ERANGE;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 0c75e0576ee1f0..1ef0aaef5c61cd 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1299,10 +1299,8 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
if (!ale)
return ERR_PTR(-ENOMEM);
- ale->p0_untag_vid_mask =
- devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
- sizeof(unsigned long),
- GFP_KERNEL);
+ ale->p0_untag_vid_mask = devm_bitmap_zalloc(params->dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!ale->p0_untag_vid_mask)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2d2dcf70563f32..d55f06120ce7b6 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -420,8 +420,20 @@ static int emac_set_coalesce(struct net_device *ndev,
u32 int_ctrl, num_interrupts = 0;
u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0;
- if (!coal->rx_coalesce_usecs)
- return -EINVAL;
+ if (!coal->rx_coalesce_usecs) {
+ priv->coal_intvl = 0;
+
+ switch (priv->version) {
+ case EMAC_VERSION_2:
+ emac_ctrl_write(EMAC_DM646X_CMINTCTRL, 0);
+ break;
+ default:
+ emac_ctrl_write(EMAC_CTRL_EWINTTCNT, 0);
+ break;
+ }
+
+ return 0;
+ }
coal_intvl = coal->rx_coalesce_usecs;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 49f10053a7943a..bfdf89e54752c5 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -672,11 +672,13 @@ static void sixpack_close(struct tty_struct *tty)
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
- /* Free all 6pack frame buffers. */
+ unregister_netdev(sp->dev);
+
+ /* Free all 6pack frame buffers after unreg. */
kfree(sp->rbuff);
kfree(sp->xbuff);
- unregister_netdev(sp->dev);
+ free_netdev(sp->dev);
}
/* Perform I/O control on an active 6pack channel. */
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 867252a0247b10..e2b332b54f06d2 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -792,13 +792,14 @@ static void mkiss_close(struct tty_struct *tty)
*/
netif_stop_queue(ax->dev);
- /* Free all AX25 frame buffers. */
- kfree(ax->rbuff);
- kfree(ax->xbuff);
-
ax->tty = NULL;
unregister_netdev(ax->dev);
+
+ /* Free all AX25 frame buffers after unreg. */
+ kfree(ax->rbuff);
+ kfree(ax->xbuff);
+
free_netdev(ax->dev);
}
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index a4de3d2081c5c4..bc50224d43dd3c 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -28,6 +28,11 @@
#define LAN87XX_MASK_LINK_UP (0x0004)
#define LAN87XX_MASK_LINK_DOWN (0x0002)
+/* MISC Control 1 Register */
+#define LAN87XX_CTRL_1 (0x11)
+#define LAN87XX_MASK_RGMII_TXC_DLY_EN (0x4000)
+#define LAN87XX_MASK_RGMII_RXC_DLY_EN (0x2000)
+
/* phyaccess nested types */
#define PHYACC_ATTR_MODE_READ 0
#define PHYACC_ATTR_MODE_WRITE 1
@@ -112,6 +117,43 @@ static int access_ereg_modify_changed(struct phy_device *phydev,
return rc;
}
+static int lan87xx_config_rgmii_delay(struct phy_device *phydev)
+{
+ int rc;
+
+ if (!phy_interface_is_rgmii(phydev))
+ return 0;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC, LAN87XX_CTRL_1, 0);
+ if (rc < 0)
+ return rc;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ rc &= ~LAN87XX_MASK_RGMII_TXC_DLY_EN;
+ rc &= ~LAN87XX_MASK_RGMII_RXC_DLY_EN;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ rc |= LAN87XX_MASK_RGMII_TXC_DLY_EN;
+ rc |= LAN87XX_MASK_RGMII_RXC_DLY_EN;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ rc &= ~LAN87XX_MASK_RGMII_TXC_DLY_EN;
+ rc |= LAN87XX_MASK_RGMII_RXC_DLY_EN;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rc |= LAN87XX_MASK_RGMII_TXC_DLY_EN;
+ rc &= ~LAN87XX_MASK_RGMII_RXC_DLY_EN;
+ break;
+ default:
+ return 0;
+ }
+
+ return access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC, LAN87XX_CTRL_1, rc);
+}
+
static int lan87xx_phy_init(struct phy_device *phydev)
{
static const struct access_ereg_val init[] = {
@@ -185,7 +227,7 @@ static int lan87xx_phy_init(struct phy_device *phydev)
return rc;
}
- return 0;
+ return lan87xx_config_rgmii_delay(phydev);
}
static int lan87xx_phy_config_intr(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a3bfb156c83d7b..beb2b66da13246 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -815,7 +815,12 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
/* Restart the PHY */
- _phy_start_aneg(phydev);
+ if (phy_is_started(phydev)) {
+ phydev->state = PHY_UP;
+ phy_trigger_machine(phydev);
+ } else {
+ _phy_start_aneg(phydev);
+ }
mutex_unlock(&phydev->lock);
return 0;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 291fa449993fb5..4daac5fda073cf 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -409,7 +409,7 @@ static int genmii_read_link(struct mii_phy *phy)
* though magic-aneg shouldn't prevent this case from occurring
*/
- return 0;
+ return 0;
}
static int generic_suspend(struct mii_phy* phy)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index b885a657023598..825e8e5ffb2aed 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -394,12 +394,10 @@ void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
enum ipc_mem_exec_stage exec_stage;
struct ipc_mem_channel *channel;
- enum ipc_phase curr_phase;
int status = 0;
u32 tail = 0;
channel = ipc_imem->ipc_devlink->devlink_sio.channel;
- curr_phase = ipc_imem->phase;
/* Increase the total wait time to boot_check_timeout */
do {
exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 787bcbd290f7b4..a491db46e3bd46 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2216,7 +2216,7 @@ static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
frag = pn533_alloc_skb(dev, frag_size);
if (!frag) {
skb_queue_purge(&dev->fragment_skb);
- break;
+ return -ENOMEM;
}
if (!dev->tgt_mode) {
@@ -2285,7 +2285,7 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
/* jumbo frame ? */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
rc = pn533_fill_fragment_skbs(dev, skb);
- if (rc <= 0)
+ if (rc < 0)
goto error;
skb = skb_dequeue(&dev->fragment_skb);
@@ -2353,7 +2353,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
/* let's split in multiple chunks if size's too big */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
rc = pn533_fill_fragment_skbs(dev, skb);
- if (rc <= 0)
+ if (rc < 0)
goto error;
/* get the first skb */
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 16ceb763594fc9..d7db1a0e6be125 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -624,7 +624,7 @@ static void port100_recv_response(struct urb *urb)
break; /* success */
case -ECONNRESET:
case -ENOENT:
- nfc_err(&dev->interface->dev,
+ nfc_dbg(&dev->interface->dev,
"The urb has been canceled (status %d)\n", urb->status);
goto sched_wq;
case -ESHUTDOWN:
@@ -678,7 +678,7 @@ static void port100_recv_ack(struct urb *urb)
break; /* success */
case -ECONNRESET:
case -ENOENT:
- nfc_err(&dev->interface->dev,
+ nfc_dbg(&dev->interface->dev,
"The urb has been stopped (status %d)\n", urb->status);
goto sched_wq;
case -ESHUTDOWN:
@@ -942,7 +942,7 @@ static void port100_send_complete(struct urb *urb)
break; /* success */
case -ECONNRESET:
case -ENOENT:
- nfc_err(&dev->interface->dev,
+ nfc_dbg(&dev->interface->dev,
"The urb has been stopped (status %d)\n", urb->status);
break;
case -ESHUTDOWN:
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index e917bb3652bb3e..93b1411105373c 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -270,7 +270,8 @@ config VMD
config PCIE_BRCMSTB
tristate "Broadcom Brcmstb PCIe host controller"
- depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCM4908 || COMPILE_TEST
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCM4908 || \
+ BMIPS_GENERIC || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
default ARCH_BRCMSTB
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index dcefdb42ac463d..a89b7de72dcfa0 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -57,6 +57,29 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
return zpci_deconfigure_device(zdev);
}
+static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
+{
+ struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
+ hotplug_slot);
+
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
+ return -EIO;
+ /*
+ * We can't take the zdev->lock as reset_slot may be called during
+ * probing and/or device removal which already happens under the
+ * zdev->lock. Instead the user should use the higher level
+ * pci_reset_function() or pci_bus_reset() which hold the PCI device
+ * lock preventing concurrent removal. If not using these functions
+ * holding the PCI device lock is required.
+ */
+
+ /* As long as the function is configured we can reset */
+ if (probe)
+ return 0;
+
+ return zpci_hot_reset_device(zdev);
+}
+
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
@@ -76,6 +99,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
static const struct hotplug_slot_ops s390_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
+ .reset_slot = reset_slot,
.get_power_status = get_power_status,
.get_adapter_status = get_adapter_status,
};
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 1d7a7c5b530787..0267977c9f178b 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -164,15 +164,13 @@ static ssize_t sriov_vf_total_msix_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_driver *pdrv;
u32 vf_total_msix = 0;
device_lock(dev);
- pdrv = to_pci_driver(dev->driver);
- if (!pdrv || !pdrv->sriov_get_vf_total_msix)
+ if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix)
goto unlock;
- vf_total_msix = pdrv->sriov_get_vf_total_msix(pdev);
+ vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev);
unlock:
device_unlock(dev);
return sysfs_emit(buf, "%u\n", vf_total_msix);
@@ -185,7 +183,6 @@ static ssize_t sriov_vf_msix_count_store(struct device *dev,
{
struct pci_dev *vf_dev = to_pci_dev(dev);
struct pci_dev *pdev = pci_physfn(vf_dev);
- struct pci_driver *pdrv;
int val, ret = 0;
if (kstrtoint(buf, 0, &val) < 0)
@@ -195,14 +192,13 @@ static ssize_t sriov_vf_msix_count_store(struct device *dev,
return -EINVAL;
device_lock(&pdev->dev);
- pdrv = to_pci_driver(dev->driver);
- if (!pdrv || !pdrv->sriov_set_msix_vec_count) {
+ if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) {
ret = -EOPNOTSUPP;
goto err_pdev;
}
device_lock(&vf_dev->dev);
- if (to_pci_driver(vf_dev->dev.driver)) {
+ if (vf_dev->driver) {
/*
* A driver is already attached to this VF and has configured
* itself based on the current MSI-X vector count. Changing
@@ -212,7 +208,7 @@ static ssize_t sriov_vf_msix_count_store(struct device *dev,
goto err_dev;
}
- ret = pdrv->sriov_set_msix_vec_count(vf_dev, val);
+ ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val);
err_dev:
device_unlock(&vf_dev->dev);
@@ -379,7 +375,6 @@ static ssize_t sriov_numvfs_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_driver *pdrv;
int ret = 0;
u16 num_vfs;
@@ -395,15 +390,14 @@ static ssize_t sriov_numvfs_store(struct device *dev,
goto exit;
/* is PF driver loaded */
- pdrv = to_pci_driver(dev->driver);
- if (!pdrv) {
+ if (!pdev->driver) {
pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n");
ret = -ENOENT;
goto exit;
}
/* is PF driver loaded w/callback */
- if (!pdrv->sriov_configure) {
+ if (!pdev->driver->sriov_configure) {
pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n");
ret = -ENOENT;
goto exit;
@@ -411,7 +405,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
if (num_vfs == 0) {
/* disable VFs */
- ret = pdrv->sriov_configure(pdev, 0);
+ ret = pdev->driver->sriov_configure(pdev, 0);
goto exit;
}
@@ -423,7 +417,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
goto exit;
}
- ret = pdrv->sriov_configure(pdev, num_vfs);
+ ret = pdev->driver->sriov_configure(pdev, num_vfs);
if (ret < 0)
goto exit;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1d98c974381cfe..588588cfda481b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -319,10 +319,12 @@ static long local_pci_probe(void *_ddi)
* its remove routine.
*/
pm_runtime_get_sync(dev);
+ pci_dev->driver = pci_drv;
rc = pci_drv->probe(pci_dev, ddi->id);
if (!rc)
return rc;
if (rc < 0) {
+ pci_dev->driver = NULL;
pm_runtime_put_sync(dev);
return rc;
}
@@ -388,6 +390,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
* @pci_dev: PCI device being probed
*
* returns 0 on success, else error.
+ * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
*/
static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
{
@@ -454,7 +457,7 @@ static int pci_device_probe(struct device *dev)
static void pci_device_remove(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = to_pci_driver(dev->driver);
+ struct pci_driver *drv = pci_dev->driver;
if (drv->remove) {
pm_runtime_get_sync(dev);
@@ -462,6 +465,7 @@ static void pci_device_remove(struct device *dev)
pm_runtime_put_noidle(dev);
}
pcibios_free_irq(pci_dev);
+ pci_dev->driver = NULL;
pci_iov_remove(pci_dev);
/* Undo the runtime PM settings in local_pci_probe() */
@@ -489,7 +493,7 @@ static void pci_device_remove(struct device *dev)
static void pci_device_shutdown(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = to_pci_driver(dev->driver);
+ struct pci_driver *drv = pci_dev->driver;
pm_runtime_resume(dev);
@@ -585,7 +589,7 @@ static int pci_pm_reenable_device(struct pci_dev *pci_dev)
static int pci_legacy_suspend(struct device *dev, pm_message_t state)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = to_pci_driver(dev->driver);
+ struct pci_driver *drv = pci_dev->driver;
if (drv && drv->suspend) {
pci_power_t prev = pci_dev->current_state;
@@ -626,7 +630,7 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
static int pci_legacy_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = to_pci_driver(dev->driver);
+ struct pci_driver *drv = pci_dev->driver;
pci_fixup_device(pci_fixup_resume, pci_dev);
@@ -645,7 +649,7 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
{
- struct pci_driver *drv = to_pci_driver(pci_dev->dev.driver);
+ struct pci_driver *drv = pci_dev->driver;
bool ret = drv && (drv->suspend || drv->resume);
/*
@@ -1238,11 +1242,11 @@ static int pci_pm_runtime_suspend(struct device *dev)
int error;
/*
- * If the device has no driver, we leave it in D0, but it may go to
- * D3cold when the bridge above it runtime suspends. Save its
- * config space in case that happens.
+ * If pci_dev->driver is not set (unbound), we leave the device in D0,
+ * but it may go to D3cold when the bridge above it runtime suspends.
+ * Save its config space in case that happens.
*/
- if (!to_pci_driver(dev->driver)) {
+ if (!pci_dev->driver) {
pci_save_state(pci_dev);
return 0;
}
@@ -1299,7 +1303,7 @@ static int pci_pm_runtime_resume(struct device *dev)
*/
pci_restore_standard_config(pci_dev);
- if (!to_pci_driver(dev->driver))
+ if (!pci_dev->driver)
return 0;
pci_fixup_device(pci_fixup_resume_early, pci_dev);
@@ -1318,13 +1322,14 @@ static int pci_pm_runtime_resume(struct device *dev)
static int pci_pm_runtime_idle(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
/*
- * If the device has no driver, it should always remain in D0
- * regardless of the runtime PM status
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
*/
- if (!to_pci_driver(dev->driver))
+ if (!pci_dev->driver)
return 0;
if (!pm)
@@ -1431,10 +1436,8 @@ static struct pci_driver pci_compat_driver = {
*/
struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
{
- struct pci_driver *drv = to_pci_driver(dev->dev.driver);
-
- if (drv)
- return drv;
+ if (dev->driver)
+ return dev->driver;
else {
int i;
for (i = 0; i <= PCI_ROM_RESOURCE; i++)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f2cd11130737c6..3d2fb394986a4e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5106,12 +5106,13 @@ static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
return pci_parent_bus_reset(dev, probe);
}
-static void pci_dev_lock(struct pci_dev *dev)
+void pci_dev_lock(struct pci_dev *dev)
{
pci_cfg_access_lock(dev);
/* block PM suspend, driver probe, etc. */
device_lock(&dev->dev);
}
+EXPORT_SYMBOL_GPL(pci_dev_lock);
/* Return 1 on successful lock, 0 on contention */
int pci_dev_trylock(struct pci_dev *dev)
@@ -5135,14 +5136,13 @@ EXPORT_SYMBOL_GPL(pci_dev_unlock);
static void pci_dev_save_and_disable(struct pci_dev *dev)
{
- struct pci_driver *drv = to_pci_driver(dev->dev.driver);
const struct pci_error_handlers *err_handler =
- drv ? drv->err_handler : NULL;
+ dev->driver ? dev->driver->err_handler : NULL;
/*
- * drv->err_handler->reset_prepare() is protected against races
- * with ->remove() by the device lock, which must be held by the
- * caller.
+ * dev->driver->err_handler->reset_prepare() is protected against
+ * races with ->remove() by the device lock, which must be held by
+ * the caller.
*/
if (err_handler && err_handler->reset_prepare)
err_handler->reset_prepare(dev);
@@ -5167,15 +5167,15 @@ static void pci_dev_save_and_disable(struct pci_dev *dev)
static void pci_dev_restore(struct pci_dev *dev)
{
- struct pci_driver *drv = to_pci_driver(dev->dev.driver);
const struct pci_error_handlers *err_handler =
- drv ? drv->err_handler : NULL;
+ dev->driver ? dev->driver->err_handler : NULL;
pci_restore_state(dev);
/*
- * drv->err_handler->reset_done() is protected against races with
- * ->remove() by the device lock, which must be held by the caller.
+ * dev->driver->err_handler->reset_done() is protected against
+ * races with ->remove() by the device lock, which must be held by
+ * the caller.
*/
if (err_handler && err_handler->reset_done)
err_handler->reset_done(dev);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 356b9317297e57..0c5a143025af43 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -54,7 +54,7 @@ static int report_error_detected(struct pci_dev *dev,
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
- pdrv = to_pci_driver(dev->dev.driver);
+ pdrv = dev->driver;
if (!pci_dev_set_io_state(dev, state) ||
!pdrv ||
!pdrv->err_handler ||
@@ -98,7 +98,7 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
- pdrv = to_pci_driver(dev->dev.driver);
+ pdrv = dev->driver;
if (!pdrv ||
!pdrv->err_handler ||
!pdrv->err_handler->mmio_enabled)
@@ -119,7 +119,7 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
- pdrv = to_pci_driver(dev->dev.driver);
+ pdrv = dev->driver;
if (!pdrv ||
!pdrv->err_handler ||
!pdrv->err_handler->slot_reset)
@@ -139,7 +139,7 @@ static int report_resume(struct pci_dev *dev, void *data)
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
- pdrv = to_pci_driver(dev->dev.driver);
+ pdrv = dev->driver;
if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
!pdrv ||
!pdrv->err_handler ||
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index aa29841bbb7989..21e3b05a5153a9 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -476,7 +476,9 @@ config PWM_SAMSUNG
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
help
- Generic PWM framework driver for Samsung.
+ Generic PWM framework driver for Samsung S3C24xx, S3C64xx, S5Pv210
+ and Exynos SoCs.
+ Choose Y here only if you build for such Samsung SoC.
To compile this driver as a module, choose M here: the module
will be called pwm-samsung.
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 4527f09a5c5049..fb04a439462c74 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -532,6 +532,15 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
struct pwm_chip *chip;
int err;
+ /*
+ * Some lowlevel driver's implementations of .apply() make use of
+ * mutexes, also with some drivers only returning when the new
+ * configuration is active calling pwm_apply_state() from atomic context
+ * is a bad idea. So make it explicit that calling this function might
+ * sleep.
+ */
+ might_sleep();
+
if (!pwm || !state || !state->period ||
state->duty_cycle > state->period)
return -EINVAL;
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index e748604403cc2a..98b34ea9f38e22 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index dd94c4312a0c53..0a4ff55fad0407 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -117,6 +117,20 @@ static inline unsigned int to_tcon_channel(unsigned int channel)
return (channel == 0) ? 0 : (channel + 1);
}
+static void __pwm_samsung_manual_update(struct samsung_pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ unsigned int tcon_chan = to_tcon_channel(pwm->hwpwm);
+ u32 tcon;
+
+ tcon = readl(chip->base + REG_TCON);
+ tcon |= TCON_MANUALUPDATE(tcon_chan);
+ writel(tcon, chip->base + REG_TCON);
+
+ tcon &= ~TCON_MANUALUPDATE(tcon_chan);
+ writel(tcon, chip->base + REG_TCON);
+}
+
static void pwm_samsung_set_divisor(struct samsung_pwm_chip *pwm,
unsigned int channel, u8 divisor)
{
@@ -276,6 +290,13 @@ static void pwm_samsung_disable(struct pwm_chip *chip, struct pwm_device *pwm)
tcon &= ~TCON_AUTORELOAD(tcon_chan);
writel(tcon, our_chip->base + REG_TCON);
+ /*
+ * In case the PWM is at 100% duty cycle, force a manual
+ * update to prevent the signal from staying high.
+ */
+ if (readl(our_chip->base + REG_TCMPB(pwm->hwpwm)) == (u32)-1U)
+ __pwm_samsung_manual_update(our_chip, pwm);
+
our_chip->disabled_mask |= BIT(pwm->hwpwm);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
@@ -284,18 +305,11 @@ static void pwm_samsung_disable(struct pwm_chip *chip, struct pwm_device *pwm)
static void pwm_samsung_manual_update(struct samsung_pwm_chip *chip,
struct pwm_device *pwm)
{
- unsigned int tcon_chan = to_tcon_channel(pwm->hwpwm);
- u32 tcon;
unsigned long flags;
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = readl(chip->base + REG_TCON);
- tcon |= TCON_MANUALUPDATE(tcon_chan);
- writel(tcon, chip->base + REG_TCON);
-
- tcon &= ~TCON_MANUALUPDATE(tcon_chan);
- writel(tcon, chip->base + REG_TCON);
+ __pwm_samsung_manual_update(chip, pwm);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
index af4e37d3e3a670..927c4cbb1daf01 100644
--- a/drivers/pwm/pwm-visconti.c
+++ b/drivers/pwm/pwm-visconti.c
@@ -144,28 +144,17 @@ static int visconti_pwm_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- platform_set_drvdata(pdev, priv);
-
priv->chip.dev = dev;
priv->chip.ops = &visconti_pwm_ops;
priv->chip.npwm = 4;
- ret = pwmchip_add(&priv->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &priv->chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Cannot register visconti PWM\n");
return 0;
}
-static int visconti_pwm_remove(struct platform_device *pdev)
-{
- struct visconti_pwm_chip *priv = platform_get_drvdata(pdev);
-
- pwmchip_remove(&priv->chip);
-
- return 0;
-}
-
static const struct of_device_id visconti_pwm_of_match[] = {
{ .compatible = "toshiba,visconti-pwm", },
{ }
@@ -178,7 +167,6 @@ static struct platform_driver visconti_pwm_driver = {
.of_match_table = visconti_pwm_of_match,
},
.probe = visconti_pwm_probe,
- .remove = visconti_pwm_remove,
};
module_platform_driver(visconti_pwm_driver);
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index ea2aa151080a48..480bfc29782fed 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -56,7 +56,7 @@ struct vt8500_chip {
#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip)
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-static inline void pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 bitmask)
+static inline void vt8500_pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 bitmask)
{
int loops = msecs_to_loops(10);
u32 mask = bitmask << (nr << 8);
@@ -106,18 +106,18 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
dc = c;
writel(prescale, vt8500->base + REG_SCALAR(pwm->hwpwm));
- pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE);
+ vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE);
writel(pv, vt8500->base + REG_PERIOD(pwm->hwpwm));
- pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_PERIOD_UPDATE);
+ vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_PERIOD_UPDATE);
writel(dc, vt8500->base + REG_DUTY(pwm->hwpwm));
- pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_DUTY_UPDATE);
+ vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_DUTY_UPDATE);
val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
val |= CTRL_AUTOLOAD;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
return 0;
@@ -138,7 +138,7 @@ static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
val |= CTRL_ENABLE;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
return 0;
}
@@ -151,7 +151,7 @@ static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
val &= ~CTRL_ENABLE;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
}
@@ -171,7 +171,7 @@ static int vt8500_pwm_set_polarity(struct pwm_chip *chip,
val &= ~CTRL_INVERT;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
return 0;
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 7208eeb8459ac2..058e56a10ab815 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -441,6 +441,7 @@ config RTC_DRV_X1205
config RTC_DRV_PCF8523
tristate "NXP PCF8523"
+ select REGMAP_I2C
help
If you say yes here you get support for the NXP PCF8523 RTC
chips.
@@ -582,14 +583,6 @@ config RTC_DRV_TPS65910
This driver can also be built as a module. If so, the module
will be called rtc-tps65910.
-config RTC_DRV_TPS80031
- tristate "TI TPS80031/TPS80032 RTC driver"
- depends on MFD_TPS80031
- help
- TI Power Management IC TPS80031 supports RTC functionality
- along with alarm. This driver supports the RTC driver for
- the TPS80031 RTC module.
-
config RTC_DRV_RC5T583
tristate "RICOH 5T583 RTC driver"
depends on MFD_RC5T583
@@ -1929,4 +1922,14 @@ config RTC_DRV_WILCO_EC
This can also be built as a module. If so, the module will
be named "rtc_wilco_ec".
+config RTC_DRV_MSC313
+ tristate "MStar MSC313 RTC"
+ depends on ARCH_MSTARV7 || COMPILE_TEST
+ help
+ If you say yes here you get support for the Mstar MSC313e On-Chip
+ Real Time Clock.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-msc313".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 5ceeafe4d5b23a..678a8ef4abae77 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MCP795) += rtc-mcp795.o
obj-$(CONFIG_RTC_DRV_MESON) += rtc-meson.o
obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o
obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
+obj-$(CONFIG_RTC_DRV_MSC313) += rtc-msc313.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MT2712) += rtc-mt2712.o
obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o
@@ -169,7 +170,6 @@ obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
-obj-$(CONFIG_RTC_DRV_TPS80031) += rtc-tps80031.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index f77bc089eb6b75..4b460c61f1d8c4 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -232,6 +232,7 @@ static struct rtc_device *rtc_allocate_device(void)
rtc->pie_enabled = 0;
set_bit(RTC_FEATURE_ALARM, rtc->features);
+ set_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
return rtc;
}
@@ -334,7 +335,8 @@ static void devm_rtc_unregister_device(void *data)
* letting any rtc_class_open() users access it again
*/
rtc_proc_del_device(rtc);
- cdev_device_del(&rtc->char_dev, &rtc->dev);
+ if (!test_bit(RTC_NO_CDEV, &rtc->flags))
+ cdev_device_del(&rtc->char_dev, &rtc->dev);
rtc->ops = NULL;
mutex_unlock(&rtc->ops_lock);
}
@@ -363,7 +365,9 @@ struct rtc_device *devm_rtc_allocate_device(struct device *dev)
rtc->id = id;
rtc->dev.parent = dev;
- dev_set_name(&rtc->dev, "rtc%d", id);
+ err = dev_set_name(&rtc->dev, "rtc%d", id);
+ if (err)
+ return ERR_PTR(err);
err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc);
if (err)
@@ -386,6 +390,12 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
if (!rtc->ops->set_alarm)
clear_bit(RTC_FEATURE_ALARM, rtc->features);
+ if (rtc->uie_unsupported)
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+
+ if (rtc->ops->set_offset)
+ set_bit(RTC_FEATURE_CORRECTION, rtc->features);
+
rtc->owner = owner;
rtc_device_get_offset(rtc);
@@ -397,12 +407,14 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
rtc_dev_prepare(rtc);
err = cdev_device_add(&rtc->char_dev, &rtc->dev);
- if (err)
+ if (err) {
+ set_bit(RTC_NO_CDEV, &rtc->flags);
dev_warn(rtc->dev.parent, "failed to add char device %d:%d\n",
MAJOR(rtc->dev.devt), rtc->id);
- else
+ } else {
dev_dbg(rtc->dev.parent, "char device (%d:%d)\n",
MAJOR(rtc->dev.devt), rtc->id);
+ }
rtc_proc_add_device(rtc);
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 5b8ebe86124a90..e104972a28fdfd 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -208,6 +208,7 @@ static long rtc_dev_ioctl(struct file *file,
const struct rtc_class_ops *ops = rtc->ops;
struct rtc_time tm;
struct rtc_wkalrm alarm;
+ struct rtc_param param;
void __user *uarg = (void __user *)arg;
err = mutex_lock_interruptible(&rtc->ops_lock);
@@ -221,6 +222,7 @@ static long rtc_dev_ioctl(struct file *file,
switch (cmd) {
case RTC_EPOCH_SET:
case RTC_SET_TIME:
+ case RTC_PARAM_SET:
if (!capable(CAP_SYS_TIME))
err = -EACCES;
break;
@@ -382,6 +384,69 @@ static long rtc_dev_ioctl(struct file *file,
err = -EFAULT;
return err;
+ case RTC_PARAM_GET:
+ if (copy_from_user(&param, uarg, sizeof(param))) {
+ mutex_unlock(&rtc->ops_lock);
+ return -EFAULT;
+ }
+
+ switch(param.param) {
+ long offset;
+ case RTC_PARAM_FEATURES:
+ if (param.index != 0)
+ err = -EINVAL;
+ param.uvalue = rtc->features[0];
+ break;
+
+ case RTC_PARAM_CORRECTION:
+ mutex_unlock(&rtc->ops_lock);
+ if (param.index != 0)
+ return -EINVAL;
+ err = rtc_read_offset(rtc, &offset);
+ mutex_lock(&rtc->ops_lock);
+ if (err == 0)
+ param.svalue = offset;
+ break;
+
+ default:
+ if (rtc->ops->param_get)
+ err = rtc->ops->param_get(rtc->dev.parent, &param);
+ else
+ err = -EINVAL;
+ }
+
+ if (!err)
+ if (copy_to_user(uarg, &param, sizeof(param)))
+ err = -EFAULT;
+
+ break;
+
+ case RTC_PARAM_SET:
+ if (copy_from_user(&param, uarg, sizeof(param))) {
+ mutex_unlock(&rtc->ops_lock);
+ return -EFAULT;
+ }
+
+ switch(param.param) {
+ case RTC_PARAM_FEATURES:
+ err = -EINVAL;
+ break;
+
+ case RTC_PARAM_CORRECTION:
+ mutex_unlock(&rtc->ops_lock);
+ if (param.index != 0)
+ return -EINVAL;
+ return rtc_set_offset(rtc, param.svalue);
+
+ default:
+ if (rtc->ops->param_set)
+ err = rtc->ops->param_set(rtc->dev.parent, &param);
+ else
+ err = -EINVAL;
+ }
+
+ break;
+
default:
/* Finally try the driver's ioctl interface */
if (ops->ioctl) {
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 9a2bd4947007c7..d8e83579815373 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -423,6 +423,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
if (err)
return err;
now = rtc_tm_to_time64(&tm);
+
if (scheduled <= now)
return -ETIME;
/*
@@ -447,6 +448,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
+ ktime_t alarm_time;
int err;
if (!rtc->ops)
@@ -468,7 +470,15 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
if (rtc->aie_timer.enabled)
rtc_timer_remove(rtc, &rtc->aie_timer);
- rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
+ alarm_time = rtc_tm_to_ktime(alarm->time);
+ /*
+ * Round down so we never miss a deadline, checking for past deadline is
+ * done in __rtc_set_alarm
+ */
+ if (test_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features))
+ alarm_time = ktime_sub_ns(alarm_time, (u64)alarm->time.tm_sec * NSEC_PER_SEC);
+
+ rtc->aie_timer.node.expires = alarm_time;
rtc->aie_timer.period = 0;
if (alarm->enabled)
err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
@@ -561,7 +571,8 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (rtc->uie_rtctimer.enabled == enabled)
goto out;
- if (rtc->uie_unsupported || !test_bit(RTC_FEATURE_ALARM, rtc->features)) {
+ if (!test_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features) ||
+ !test_bit(RTC_FEATURE_ALARM, rtc->features)) {
mutex_unlock(&rtc->ops_lock);
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
return rtc_dev_update_irq_enable_emul(rtc, enabled);
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index a9b355510cd47d..e188ab517f1ed2 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -534,7 +534,6 @@ static int abeoz9_probe(struct i2c_client *client,
data->rtc->ops = &rtc_ops;
data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
data->rtc->range_max = RTC_TIMESTAMP_END_2099;
- data->rtc->uie_unsupported = 1;
clear_bit(RTC_FEATURE_ALARM, data->rtc->features);
if (client->irq > 0) {
@@ -546,6 +545,8 @@ static int abeoz9_probe(struct i2c_client *client,
dev_err(dev, "failed to request alarm irq\n");
return ret;
}
+ } else {
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, data->rtc->features);
}
if (client->irq > 0 || device_property_read_bool(dev, "wakeup-source")) {
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index b40048871295d1..ea33e149d54590 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -184,25 +184,9 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
int retval, i;
unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
- unsigned long mins, secs = 0, cursec = 0;
- struct rtc_time curtm;
+ unsigned long mins;
- /* Get the number of seconds since 1970 */
- secs = rtc_tm_to_time64(&alarm->time);
-
- /*
- * Check whether alarm is set less than 1min.
- * Since our RTC doesn't support alarm resolution less than 1min,
- * return -EINVAL, so UIE EMUL can take it up, incase of UIE_ON
- */
- ab8500_rtc_read_time(dev, &curtm); /* Read current time */
- cursec = rtc_tm_to_time64(&curtm);
- if ((secs - cursec) < 59) {
- dev_dbg(dev, "Alarm less than 1 minute not supported\r\n");
- return -EINVAL;
- }
-
- mins = secs / 60;
+ mins = (unsigned long)rtc_tm_to_time64(&alarm->time) / 60;
buf[2] = mins & 0xFF;
buf[1] = (mins >> 8) & 0xFF;
@@ -394,7 +378,8 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
dev_pm_set_wake_irq(&pdev->dev, irq);
platform_set_drvdata(pdev, rtc);
- rtc->uie_unsupported = 1;
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
rtc->range_max = (1ULL << 24) * 60 - 1; // 24-bit minutes + 59 secs
rtc->start_secs = RTC_TIMESTAMP_BEGIN_2000;
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index b3de6d2e680a40..2f83adef966eb5 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -199,11 +199,18 @@ static const struct of_device_id ds1302_dt_ids[] = {
MODULE_DEVICE_TABLE(of, ds1302_dt_ids);
#endif
+static const struct spi_device_id ds1302_spi_ids[] = {
+ { .name = "ds1302", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, ds1302_spi_ids);
+
static struct spi_driver ds1302_driver = {
.driver.name = "rtc-ds1302",
.driver.of_match_table = of_match_ptr(ds1302_dt_ids),
.probe = ds1302_probe,
.remove = ds1302_remove,
+ .id_table = ds1302_spi_ids,
};
module_spi_driver(ds1302_driver);
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index 66fc8617d07ee7..93ce72b9ae59e3 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -219,12 +219,19 @@ static const struct of_device_id ds1390_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ds1390_of_match);
+static const struct spi_device_id ds1390_spi_ids[] = {
+ { .name = "ds1390" },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ds1390_spi_ids);
+
static struct spi_driver ds1390_driver = {
.driver = {
.name = "rtc-ds1390",
.of_match_table = of_match_ptr(ds1390_of_match),
},
.probe = ds1390_probe,
+ .id_table = ds1390_spi_ids,
};
module_spi_driver(ds1390_driver);
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index f736f8c22e963b..6d383b629d20a2 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -557,7 +557,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
* registered automatically when being referenced.
*/
of_node_put(fixed_clock);
- return 0;
+ return NULL;
}
/* First disable the clock */
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index bad7792b6ca585..0d515b3df57103 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -430,12 +430,19 @@ static const struct of_device_id mcp795_of_match[] = {
MODULE_DEVICE_TABLE(of, mcp795_of_match);
#endif
+static const struct spi_device_id mcp795_spi_ids[] = {
+ { .name = "mcp795" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mcp795_spi_ids);
+
static struct spi_driver mcp795_driver = {
.driver = {
.name = "rtc-mcp795",
.of_match_table = of_match_ptr(mcp795_of_match),
},
.probe = mcp795_probe,
+ .id_table = mcp795_spi_ids,
};
module_spi_driver(mcp795_driver);
diff --git a/drivers/rtc/rtc-msc313.c b/drivers/rtc/rtc-msc313.c
new file mode 100644
index 00000000000000..f3fde013c4b8b1
--- /dev/null
+++ b/drivers/rtc/rtc-msc313.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Real time clocks driver for MStar/SigmaStar ARMv7 SoCs.
+ * Based on "Real Time Clock driver for msb252x." that was contained
+ * in various MStar kernels.
+ *
+ * (C) 2019 Daniel Palmer
+ * (C) 2021 Romain Perier
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+/* Registers */
+#define REG_RTC_CTRL 0x00
+#define REG_RTC_FREQ_CW_L 0x04
+#define REG_RTC_FREQ_CW_H 0x08
+#define REG_RTC_LOAD_VAL_L 0x0C
+#define REG_RTC_LOAD_VAL_H 0x10
+#define REG_RTC_MATCH_VAL_L 0x14
+#define REG_RTC_MATCH_VAL_H 0x18
+#define REG_RTC_STATUS_INT 0x1C
+#define REG_RTC_CNT_VAL_L 0x20
+#define REG_RTC_CNT_VAL_H 0x24
+
+/* Control bits for REG_RTC_CTRL */
+#define SOFT_RSTZ_BIT BIT(0)
+#define CNT_EN_BIT BIT(1)
+#define WRAP_EN_BIT BIT(2)
+#define LOAD_EN_BIT BIT(3)
+#define READ_EN_BIT BIT(4)
+#define INT_MASK_BIT BIT(5)
+#define INT_FORCE_BIT BIT(6)
+#define INT_CLEAR_BIT BIT(7)
+
+/* Control bits for REG_RTC_STATUS_INT */
+#define RAW_INT_BIT BIT(0)
+#define ALM_INT_BIT BIT(1)
+
+struct msc313_rtc {
+ struct rtc_device *rtc_dev;
+ void __iomem *rtc_base;
+};
+
+static int msc313_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct msc313_rtc *priv = dev_get_drvdata(dev);
+ unsigned long seconds;
+
+ seconds = readw(priv->rtc_base + REG_RTC_MATCH_VAL_L)
+ | ((unsigned long)readw(priv->rtc_base + REG_RTC_MATCH_VAL_H) << 16);
+
+ rtc_time64_to_tm(seconds, &alarm->time);
+
+ if (!(readw(priv->rtc_base + REG_RTC_CTRL) & INT_MASK_BIT))
+ alarm->enabled = 1;
+
+ return 0;
+}
+
+static int msc313_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct msc313_rtc *priv = dev_get_drvdata(dev);
+ u16 reg;
+
+ reg = readw(priv->rtc_base + REG_RTC_CTRL);
+ if (enabled)
+ reg &= ~INT_MASK_BIT;
+ else
+ reg |= INT_MASK_BIT;
+ writew(reg, priv->rtc_base + REG_RTC_CTRL);
+ return 0;
+}
+
+static int msc313_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct msc313_rtc *priv = dev_get_drvdata(dev);
+ unsigned long seconds;
+
+ seconds = rtc_tm_to_time64(&alarm->time);
+ writew((seconds & 0xFFFF), priv->rtc_base + REG_RTC_MATCH_VAL_L);
+ writew((seconds >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_MATCH_VAL_H);
+
+ msc313_rtc_alarm_irq_enable(dev, alarm->enabled);
+
+ return 0;
+}
+
+static bool msc313_rtc_get_enabled(struct msc313_rtc *priv)
+{
+ return readw(priv->rtc_base + REG_RTC_CTRL) & CNT_EN_BIT;
+}
+
+static void msc313_rtc_set_enabled(struct msc313_rtc *priv)
+{
+ u16 reg;
+
+ reg = readw(priv->rtc_base + REG_RTC_CTRL);
+ reg |= CNT_EN_BIT;
+ writew(reg, priv->rtc_base + REG_RTC_CTRL);
+}
+
+static int msc313_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct msc313_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds;
+ u16 reg;
+
+ if (!msc313_rtc_get_enabled(priv))
+ return -EINVAL;
+
+ reg = readw(priv->rtc_base + REG_RTC_CTRL);
+ writew(reg | READ_EN_BIT, priv->rtc_base + REG_RTC_CTRL);
+
+ /* Wait for HW latch done */
+ while (readw(priv->rtc_base + REG_RTC_CTRL) & READ_EN_BIT)
+ udelay(1);
+
+ seconds = readw(priv->rtc_base + REG_RTC_CNT_VAL_L)
+ | ((unsigned long)readw(priv->rtc_base + REG_RTC_CNT_VAL_H) << 16);
+
+ rtc_time64_to_tm(seconds, tm);
+
+ return 0;
+}
+
+static int msc313_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct msc313_rtc *priv = dev_get_drvdata(dev);
+ unsigned long seconds;
+ u16 reg;
+
+ seconds = rtc_tm_to_time64(tm);
+ writew(seconds & 0xFFFF, priv->rtc_base + REG_RTC_LOAD_VAL_L);
+ writew((seconds >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_LOAD_VAL_H);
+
+ /* Enable load for loading value into internal RTC counter */
+ reg = readw(priv->rtc_base + REG_RTC_CTRL);
+ writew(reg | LOAD_EN_BIT, priv->rtc_base + REG_RTC_CTRL);
+
+ /* Wait for HW latch done */
+ while (readw(priv->rtc_base + REG_RTC_CTRL) & LOAD_EN_BIT)
+ udelay(1);
+ msc313_rtc_set_enabled(priv);
+ return 0;
+}
+
+static const struct rtc_class_ops msc313_rtc_ops = {
+ .read_time = msc313_rtc_read_time,
+ .set_time = msc313_rtc_set_time,
+ .read_alarm = msc313_rtc_read_alarm,
+ .set_alarm = msc313_rtc_set_alarm,
+ .alarm_irq_enable = msc313_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t msc313_rtc_interrupt(s32 irq, void *dev_id)
+{
+ struct msc313_rtc *priv = dev_get_drvdata(dev_id);
+ u16 reg;
+
+ reg = readw(priv->rtc_base + REG_RTC_STATUS_INT);
+ if (!(reg & ALM_INT_BIT))
+ return IRQ_NONE;
+
+ reg = readw(priv->rtc_base + REG_RTC_CTRL);
+ reg |= INT_CLEAR_BIT;
+ reg &= ~INT_FORCE_BIT;
+ writew(reg, priv->rtc_base + REG_RTC_CTRL);
+
+ rtc_update_irq(priv->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static int msc313_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct msc313_rtc *priv;
+ unsigned long rate;
+ struct clk *clk;
+ int ret;
+ int irq;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct msc313_rtc), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->rtc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->rtc_base))
+ return PTR_ERR(priv->rtc_base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ priv->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(priv->rtc_dev))
+ return PTR_ERR(priv->rtc_dev);
+
+ priv->rtc_dev->ops = &msc313_rtc_ops;
+ priv->rtc_dev->range_max = U32_MAX;
+
+ ret = devm_request_irq(dev, irq, msc313_rtc_interrupt, IRQF_SHARED,
+ dev_name(&pdev->dev), &pdev->dev);
+ if (ret) {
+ dev_err(dev, "Could not request IRQ\n");
+ return ret;
+ }
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "No input reference clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable the reference clock, %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
+
+ rate = clk_get_rate(clk);
+ writew(rate & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_L);
+ writew((rate >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_H);
+
+ platform_set_drvdata(pdev, priv);
+
+ return devm_rtc_register_device(priv->rtc_dev);
+}
+
+static const struct of_device_id msc313_rtc_of_match_table[] = {
+ { .compatible = "mstar,msc313-rtc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msc313_rtc_of_match_table);
+
+static struct platform_driver msc313_rtc_driver = {
+ .probe = msc313_rtc_probe,
+ .driver = {
+ .name = "msc313-rtc",
+ .of_match_table = msc313_rtc_of_match_table,
+ },
+};
+
+module_platform_driver(msc313_rtc_driver);
+
+MODULE_AUTHOR("Daniel Palmer <daniel@thingy.jp>");
+MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
+MODULE_DESCRIPTION("MStar RTC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index d46e0f0cc50204..4d4f3b1a73093f 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -1029,6 +1029,5 @@ static struct platform_driver omap_rtc_driver = {
module_platform_driver(omap_rtc_driver);
-MODULE_ALIAS("platform:omap_rtc");
MODULE_AUTHOR("George G. Davis (and others)");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 0f58cac81d8c06..7473e6c8a183bf 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -451,12 +451,21 @@ static const struct of_device_id pcf2123_dt_ids[] = {
MODULE_DEVICE_TABLE(of, pcf2123_dt_ids);
#endif
+static const struct spi_device_id pcf2123_spi_ids[] = {
+ { .name = "pcf2123", },
+ { .name = "rv2123", },
+ { .name = "rtc-pcf2123", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, pcf2123_spi_ids);
+
static struct spi_driver pcf2123_driver = {
.driver = {
.name = "rtc-pcf2123",
.of_match_table = of_match_ptr(pcf2123_dt_ids),
},
.probe = pcf2123_probe,
+ .id_table = pcf2123_spi_ids,
};
module_spi_driver(pcf2123_driver);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 14da4ab3010445..15e50bb10cf004 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -34,6 +34,7 @@
#define PCF85063_REG_CTRL1 0x00 /* status */
#define PCF85063_REG_CTRL1_CAP_SEL BIT(0)
#define PCF85063_REG_CTRL1_STOP BIT(5)
+#define PCF85063_REG_CTRL1_EXT_TEST BIT(7)
#define PCF85063_REG_CTRL2 0x01
#define PCF85063_CTRL2_AF BIT(6)
@@ -117,6 +118,7 @@ static int pcf85063_rtc_set_time(struct device *dev, struct rtc_time *tm)
* reset state until all time/date registers are written
*/
rc = regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL1,
+ PCF85063_REG_CTRL1_EXT_TEST |
PCF85063_REG_CTRL1_STOP,
PCF85063_REG_CTRL1_STOP);
if (rc)
@@ -297,7 +299,7 @@ static int pcf85063_ioctl(struct device *dev, unsigned int cmd,
if (ret < 0)
return ret;
- status = status & PCF85063_REG_SC_OS ? RTC_VL_DATA_INVALID : 0;
+ status = (status & PCF85063_REG_SC_OS) ? RTC_VL_DATA_INVALID : 0;
return put_user(status, (unsigned int __user *)arg);
@@ -479,6 +481,18 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
struct clk *clk;
struct clk_init_data init;
struct device_node *node = pcf85063->rtc->dev.parent->of_node;
+ struct device_node *fixed_clock;
+
+ fixed_clock = of_get_child_by_name(node, "clock");
+ if (fixed_clock) {
+ /*
+ * skip registering square wave clock when a fixed
+ * clock has been registered. The fixed clock is
+ * registered automatically when being referenced.
+ */
+ of_node_put(fixed_clock);
+ return NULL;
+ }
init.name = "pcf85063-clkout";
init.ops = &pcf85063_clkout_ops;
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 8b6fb20774bfe4..c93acade720575 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -4,8 +4,10 @@
*/
#include <linux/bcd.h>
+#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/of.h>
#include <linux/pm_wakeirq.h>
@@ -19,11 +21,10 @@
#define PCF8523_CONTROL2_AF BIT(3)
#define PCF8523_REG_CONTROL3 0x02
-#define PCF8523_CONTROL3_PM_BLD BIT(7) /* battery low detection disabled */
-#define PCF8523_CONTROL3_PM_VDD BIT(6) /* switch-over disabled */
-#define PCF8523_CONTROL3_PM_DSM BIT(5) /* direct switching mode */
-#define PCF8523_CONTROL3_PM_MASK 0xe0
+#define PCF8523_CONTROL3_PM GENMASK(7,5)
+#define PCF8523_PM_STANDBY 0x7
#define PCF8523_CONTROL3_BLF BIT(2) /* battery low bit, read-only */
+#define PCF8523_CONTROL3_BSF BIT(3)
#define PCF8523_REG_SECONDS 0x03
#define PCF8523_SECONDS_OS BIT(7)
@@ -48,127 +49,45 @@
struct pcf8523 {
struct rtc_device *rtc;
- struct i2c_client *client;
+ struct regmap *regmap;
};
-static int pcf8523_read(struct i2c_client *client, u8 reg, u8 *valuep)
+static int pcf8523_load_capacitance(struct pcf8523 *pcf8523, struct device_node *node)
{
- struct i2c_msg msgs[2];
- u8 value = 0;
- int err;
-
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = sizeof(reg);
- msgs[0].buf = &reg;
-
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = sizeof(value);
- msgs[1].buf = &value;
-
- err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (err < 0)
- return err;
-
- *valuep = value;
-
- return 0;
-}
-
-static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value)
-{
- u8 buffer[2] = { reg, value };
- struct i2c_msg msg;
- int err;
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = sizeof(buffer);
- msg.buf = buffer;
-
- err = i2c_transfer(client->adapter, &msg, 1);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-static int pcf8523_voltage_low(struct i2c_client *client)
-{
- u8 value;
- int err;
-
- err = pcf8523_read(client, PCF8523_REG_CONTROL3, &value);
- if (err < 0)
- return err;
-
- return !!(value & PCF8523_CONTROL3_BLF);
-}
-
-static int pcf8523_load_capacitance(struct i2c_client *client)
-{
- u32 load;
- u8 value;
- int err;
-
- err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
- if (err < 0)
- return err;
+ u32 load, value = 0;
load = 12500;
- of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads",
- &load);
+ of_property_read_u32(node, "quartz-load-femtofarads", &load);
switch (load) {
default:
- dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500",
+ dev_warn(&pcf8523->rtc->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500",
load);
fallthrough;
case 12500:
value |= PCF8523_CONTROL1_CAP_SEL;
break;
case 7000:
- value &= ~PCF8523_CONTROL1_CAP_SEL;
break;
}
- err = pcf8523_write(client, PCF8523_REG_CONTROL1, value);
-
- return err;
-}
-
-static int pcf8523_set_pm(struct i2c_client *client, u8 pm)
-{
- u8 value;
- int err;
-
- err = pcf8523_read(client, PCF8523_REG_CONTROL3, &value);
- if (err < 0)
- return err;
-
- value = (value & ~PCF8523_CONTROL3_PM_MASK) | pm;
-
- err = pcf8523_write(client, PCF8523_REG_CONTROL3, value);
- if (err < 0)
- return err;
-
- return 0;
+ return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
+ PCF8523_CONTROL1_CAP_SEL, value);
}
static irqreturn_t pcf8523_irq(int irq, void *dev_id)
{
- struct pcf8523 *pcf8523 = i2c_get_clientdata(dev_id);
- u8 value;
+ struct pcf8523 *pcf8523 = dev_id;
+ u32 value;
int err;
- err = pcf8523_read(pcf8523->client, PCF8523_REG_CONTROL2, &value);
+ err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL2, &value);
if (err < 0)
return IRQ_HANDLED;
if (value & PCF8523_CONTROL2_AF) {
value &= ~PCF8523_CONTROL2_AF;
- pcf8523_write(pcf8523->client, PCF8523_REG_CONTROL2, value);
+ regmap_write(pcf8523->regmap, PCF8523_REG_CONTROL2, value);
rtc_update_irq(pcf8523->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
@@ -177,68 +96,14 @@ static irqreturn_t pcf8523_irq(int irq, void *dev_id)
return IRQ_NONE;
}
-static int pcf8523_stop_rtc(struct i2c_client *client)
-{
- u8 value;
- int err;
-
- err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
- if (err < 0)
- return err;
-
- value |= PCF8523_CONTROL1_STOP;
-
- err = pcf8523_write(client, PCF8523_REG_CONTROL1, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-static int pcf8523_start_rtc(struct i2c_client *client)
-{
- u8 value;
- int err;
-
- err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
- if (err < 0)
- return err;
-
- value &= ~PCF8523_CONTROL1_STOP;
-
- err = pcf8523_write(client, PCF8523_REG_CONTROL1, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
- u8 start = PCF8523_REG_SECONDS, regs[7];
- struct i2c_msg msgs[2];
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
+ u8 regs[7];
int err;
- err = pcf8523_voltage_low(client);
- if (err < 0) {
- return err;
- } else if (err > 0) {
- dev_err(dev, "low voltage detected, time is unreliable\n");
- return -EINVAL;
- }
-
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = 1;
- msgs[0].buf = &start;
-
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = sizeof(regs);
- msgs[1].buf = regs;
-
- err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ err = regmap_bulk_read(pcf8523->regmap, PCF8523_REG_SECONDS, regs,
+ sizeof(regs));
if (err < 0)
return err;
@@ -258,63 +123,50 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct i2c_msg msg;
- u8 regs[8];
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
+ u8 regs[7];
int err;
- err = pcf8523_stop_rtc(client);
+ err = regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
+ PCF8523_CONTROL1_STOP, PCF8523_CONTROL1_STOP);
if (err < 0)
return err;
- regs[0] = PCF8523_REG_SECONDS;
/* This will purposely overwrite PCF8523_SECONDS_OS */
- regs[1] = bin2bcd(tm->tm_sec);
- regs[2] = bin2bcd(tm->tm_min);
- regs[3] = bin2bcd(tm->tm_hour);
- regs[4] = bin2bcd(tm->tm_mday);
- regs[5] = tm->tm_wday;
- regs[6] = bin2bcd(tm->tm_mon + 1);
- regs[7] = bin2bcd(tm->tm_year - 100);
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = sizeof(regs);
- msg.buf = regs;
-
- err = i2c_transfer(client->adapter, &msg, 1);
+ regs[0] = bin2bcd(tm->tm_sec);
+ regs[1] = bin2bcd(tm->tm_min);
+ regs[2] = bin2bcd(tm->tm_hour);
+ regs[3] = bin2bcd(tm->tm_mday);
+ regs[4] = tm->tm_wday;
+ regs[5] = bin2bcd(tm->tm_mon + 1);
+ regs[6] = bin2bcd(tm->tm_year - 100);
+
+ err = regmap_bulk_write(pcf8523->regmap, PCF8523_REG_SECONDS, regs,
+ sizeof(regs));
if (err < 0) {
/*
* If the time cannot be set, restart the RTC anyway. Note
* that errors are ignored if the RTC cannot be started so
* that we have a chance to propagate the original error.
*/
- pcf8523_start_rtc(client);
+ regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
+ PCF8523_CONTROL1_STOP, 0);
return err;
}
- return pcf8523_start_rtc(client);
+ return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
+ PCF8523_CONTROL1_STOP, 0);
}
static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
- u8 start = PCF8523_REG_MINUTE_ALARM, regs[4];
- struct i2c_msg msgs[2];
- u8 value;
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
+ u8 regs[4];
+ u32 value;
int err;
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = 1;
- msgs[0].buf = &start;
-
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = sizeof(regs);
- msgs[1].buf = regs;
-
- err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ err = regmap_bulk_read(pcf8523->regmap, PCF8523_REG_MINUTE_ALARM, regs,
+ sizeof(regs));
if (err < 0)
return err;
@@ -324,12 +176,12 @@ static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
tm->time.tm_mday = bcd2bin(regs[2] & 0x3F);
tm->time.tm_wday = bcd2bin(regs[3] & 0x7);
- err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
+ err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL1, &value);
if (err < 0)
return err;
tm->enabled = !!(value & PCF8523_CONTROL1_AIE);
- err = pcf8523_read(client, PCF8523_REG_CONTROL2, &value);
+ err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL2, &value);
if (err < 0)
return err;
tm->pending = !!(value & PCF8523_CONTROL2_AF);
@@ -339,30 +191,16 @@ static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
static int pcf8523_irq_enable(struct device *dev, unsigned int enabled)
{
- struct i2c_client *client = to_i2c_client(dev);
- u8 value;
- int err;
-
- err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
- if (err < 0)
- return err;
-
- value &= PCF8523_CONTROL1_AIE;
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
- if (enabled)
- value |= PCF8523_CONTROL1_AIE;
-
- err = pcf8523_write(client, PCF8523_REG_CONTROL1, value);
- if (err < 0)
- return err;
-
- return 0;
+ return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
+ PCF8523_CONTROL1_AIE, enabled ?
+ PCF8523_CONTROL1_AIE : 0);
}
static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct i2c_msg msg;
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
u8 regs[5];
int err;
@@ -370,7 +208,7 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
if (err)
return err;
- err = pcf8523_write(client, PCF8523_REG_CONTROL2, 0);
+ err = regmap_write(pcf8523->regmap, PCF8523_REG_CONTROL2, 0);
if (err < 0)
return err;
@@ -382,16 +220,13 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
rtc_time64_to_tm(alarm_time, &tm->time);
}
- regs[0] = PCF8523_REG_MINUTE_ALARM;
- regs[1] = bin2bcd(tm->time.tm_min);
- regs[2] = bin2bcd(tm->time.tm_hour);
- regs[3] = bin2bcd(tm->time.tm_mday);
- regs[4] = ALARM_DIS;
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = sizeof(regs);
- msg.buf = regs;
- err = i2c_transfer(client->adapter, &msg, 1);
+ regs[0] = bin2bcd(tm->time.tm_min);
+ regs[1] = bin2bcd(tm->time.tm_hour);
+ regs[2] = bin2bcd(tm->time.tm_mday);
+ regs[3] = ALARM_DIS;
+
+ err = regmap_bulk_write(pcf8523->regmap, PCF8523_REG_MINUTE_ALARM, regs,
+ sizeof(regs));
if (err < 0)
return err;
@@ -401,24 +236,101 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
return 0;
}
-#ifdef CONFIG_RTC_INTF_DEV
+static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
+{
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
+ int ret;
+
+ switch(param->param) {
+ u32 value;
+
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(PCF8523_CONTROL3_PM, value);
+
+ switch(value) {
+ case 0x0:
+ case 0x4:
+ param->uvalue = RTC_BSM_LEVEL;
+ break;
+ case 0x1:
+ case 0x5:
+ param->uvalue = RTC_BSM_DIRECT;
+ break;
+ case PCF8523_PM_STANDBY:
+ param->uvalue = RTC_BSM_STANDBY;
+ break;
+ default:
+ param->uvalue = RTC_BSM_DISABLED;
+ }
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pcf8523_param_set(struct device *dev, struct rtc_param *param)
+{
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
+
+ switch(param->param) {
+ u8 mode;
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ switch (param->uvalue) {
+ case RTC_BSM_DISABLED:
+ mode = 0x2;
+ break;
+ case RTC_BSM_DIRECT:
+ mode = 0x1;
+ break;
+ case RTC_BSM_LEVEL:
+ mode = 0x0;
+ break;
+ case RTC_BSM_STANDBY:
+ mode = PCF8523_PM_STANDBY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL3,
+ PCF8523_CONTROL3_PM,
+ FIELD_PREP(PCF8523_CONTROL3_PM, mode));
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
unsigned long arg)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
unsigned int flags = 0;
- u8 value;
+ u32 value;
int ret;
switch (cmd) {
case RTC_VL_READ:
- ret = pcf8523_voltage_low(client);
+ ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
if (ret < 0)
return ret;
- if (ret)
+
+ if (value & PCF8523_CONTROL3_BLF)
flags |= RTC_VL_BACKUP_LOW;
- ret = pcf8523_read(client, PCF8523_REG_SECONDS, &value);
+ ret = regmap_read(pcf8523->regmap, PCF8523_REG_SECONDS, &value);
if (ret < 0)
return ret;
@@ -431,18 +343,15 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
return -ENOIOCTLCMD;
}
}
-#else
-#define pcf8523_rtc_ioctl NULL
-#endif
static int pcf8523_rtc_read_offset(struct device *dev, long *offset)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
int err;
- u8 value;
+ u32 value;
s8 val;
- err = pcf8523_read(client, PCF8523_REG_OFFSET, &value);
+ err = regmap_read(pcf8523->regmap, PCF8523_REG_OFFSET, &value);
if (err < 0)
return err;
@@ -455,9 +364,9 @@ static int pcf8523_rtc_read_offset(struct device *dev, long *offset)
static int pcf8523_rtc_set_offset(struct device *dev, long offset)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
long reg_m0, reg_m1;
- u8 value;
+ u32 value;
reg_m0 = clamp(DIV_ROUND_CLOSEST(offset, 4340), -64L, 63L);
reg_m1 = clamp(DIV_ROUND_CLOSEST(offset, 4069), -64L, 63L);
@@ -467,7 +376,7 @@ static int pcf8523_rtc_set_offset(struct device *dev, long offset)
else
value = (reg_m1 & 0x7f) | PCF8523_OFFSET_MODE;
- return pcf8523_write(client, PCF8523_REG_OFFSET, value);
+ return regmap_write(pcf8523->regmap, PCF8523_REG_OFFSET, value);
}
static const struct rtc_class_ops pcf8523_rtc_ops = {
@@ -479,6 +388,14 @@ static const struct rtc_class_ops pcf8523_rtc_ops = {
.ioctl = pcf8523_rtc_ioctl,
.read_offset = pcf8523_rtc_read_offset,
.set_offset = pcf8523_rtc_set_offset,
+ .param_get = pcf8523_param_get,
+ .param_set = pcf8523_param_set,
+};
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x13,
};
static int pcf8523_probe(struct i2c_client *client,
@@ -487,6 +404,7 @@ static int pcf8523_probe(struct i2c_client *client,
struct pcf8523 *pcf8523;
struct rtc_device *rtc;
bool wakeup_source = false;
+ u32 value;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
@@ -496,46 +414,60 @@ static int pcf8523_probe(struct i2c_client *client,
if (!pcf8523)
return -ENOMEM;
+ pcf8523->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(pcf8523->regmap))
+ return PTR_ERR(pcf8523->regmap);
+
i2c_set_clientdata(client, pcf8523);
- pcf8523->client = client;
- err = pcf8523_load_capacitance(client);
+ rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+ pcf8523->rtc = rtc;
+
+ err = pcf8523_load_capacitance(pcf8523, client->dev.of_node);
if (err < 0)
dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
err);
- err = pcf8523_set_pm(client, 0);
+ err = regmap_read(pcf8523->regmap, PCF8523_REG_SECONDS, &value);
if (err < 0)
return err;
- rtc = devm_rtc_allocate_device(&client->dev);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
+ if (value & PCF8523_SECONDS_OS) {
+ err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
+ if (err < 0)
+ return err;
+
+ if (FIELD_GET(PCF8523_CONTROL3_PM, value) == PCF8523_PM_STANDBY) {
+ err = regmap_write(pcf8523->regmap, PCF8523_REG_CONTROL3,
+ value & ~PCF8523_CONTROL3_PM);
+ if (err < 0)
+ return err;
+ }
+ }
- pcf8523->rtc = rtc;
rtc->ops = &pcf8523_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
rtc->uie_unsupported = 1;
if (client->irq > 0) {
- err = pcf8523_write(client, PCF8523_TMR_CLKOUT_CTRL, 0x38);
+ err = regmap_write(pcf8523->regmap, PCF8523_TMR_CLKOUT_CTRL, 0x38);
if (err < 0)
return err;
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf8523_irq,
IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
- dev_name(&rtc->dev), client);
+ dev_name(&rtc->dev), pcf8523);
if (err)
return err;
dev_pm_set_wake_irq(&client->dev, client->irq);
}
-#ifdef CONFIG_OF
wakeup_source = of_property_read_bool(client->dev.of_node, "wakeup-source");
-#endif
if (client->irq > 0 || wakeup_source)
device_init_wakeup(&client->dev, true);
@@ -548,19 +480,17 @@ static const struct i2c_device_id pcf8523_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pcf8523_id);
-#ifdef CONFIG_OF
static const struct of_device_id pcf8523_of_match[] = {
{ .compatible = "nxp,pcf8523" },
{ .compatible = "microcrystal,rv8523" },
{ }
};
MODULE_DEVICE_TABLE(of, pcf8523_of_match);
-#endif
static struct i2c_driver pcf8523_driver = {
.driver = {
.name = "rtc-pcf8523",
- .of_match_table = of_match_ptr(pcf8523_of_match),
+ .of_match_table = pcf8523_of_match,
},
.probe = pcf8523_probe,
.id_table = pcf8523_id,
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 12c807306893f0..cdc623b3e365bb 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/bcd.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -80,6 +81,10 @@
#define RV3028_BACKUP_TCE BIT(5)
#define RV3028_BACKUP_TCR_MASK GENMASK(1,0)
+#define RV3028_BACKUP_BSM GENMASK(3,2)
+
+#define RV3028_BACKUP_BSM_DSM 0x1
+#define RV3028_BACKUP_BSM_LSM 0x3
#define OFFSET_STEP_PPT 953674
@@ -512,6 +517,71 @@ exit_eerd:
}
+static int rv3028_param_get(struct device *dev, struct rtc_param *param)
+{
+ struct rv3028_data *rv3028 = dev_get_drvdata(dev);
+ int ret;
+
+ switch(param->param) {
+ u32 value;
+
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ ret = regmap_read(rv3028->regmap, RV3028_BACKUP, &value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(RV3028_BACKUP_BSM, value);
+
+ switch(value) {
+ case RV3028_BACKUP_BSM_DSM:
+ param->uvalue = RTC_BSM_DIRECT;
+ break;
+ case RV3028_BACKUP_BSM_LSM:
+ param->uvalue = RTC_BSM_LEVEL;
+ break;
+ default:
+ param->uvalue = RTC_BSM_DISABLED;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rv3028_param_set(struct device *dev, struct rtc_param *param)
+{
+ struct rv3028_data *rv3028 = dev_get_drvdata(dev);
+
+ switch(param->param) {
+ u8 mode;
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ switch (param->uvalue) {
+ case RTC_BSM_DISABLED:
+ mode = 0;
+ break;
+ case RTC_BSM_DIRECT:
+ mode = RV3028_BACKUP_BSM_DSM;
+ break;
+ case RTC_BSM_LEVEL:
+ mode = RV3028_BACKUP_BSM_LSM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rv3028_update_cfg(rv3028, RV3028_BACKUP, RV3028_BACKUP_BSM,
+ FIELD_PREP(RV3028_BACKUP_BSM, mode));
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rv3028_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct rv3028_data *rv3028 = dev_get_drvdata(dev);
@@ -776,6 +846,8 @@ static const struct rtc_class_ops rv3028_rtc_ops = {
.read_offset = rv3028_read_offset,
.set_offset = rv3028_set_offset,
.ioctl = rv3028_ioctl,
+ .param_get = rv3028_param_get,
+ .param_set = rv3028_param_set,
};
static const struct regmap_config regmap_config = {
@@ -878,6 +950,8 @@ static int rv3028_probe(struct i2c_client *client)
if (ret)
return ret;
+ set_bit(RTC_FEATURE_BACKUP_SWITCH_MODE, rv3028->rtc->features);
+
rv3028->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv3028->rtc->range_max = RTC_TIMESTAMP_END_2099;
rv3028->rtc->ops = &rv3028_rtc_ops;
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index d63102d5cb1e44..c3bee305eacc6b 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -106,6 +106,7 @@
struct rv3032_data {
struct regmap *regmap;
struct rtc_device *rtc;
+ bool trickle_charger_set;
#ifdef CONFIG_COMMON_CLK
struct clk_hw clkout_hw;
#endif
@@ -310,14 +311,6 @@ static int rv3032_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
u8 ctrl = 0;
int ret;
- /* The alarm has no seconds, round up to nearest minute */
- if (alrm->time.tm_sec) {
- time64_t alarm_time = rtc_tm_to_time64(&alrm->time);
-
- alarm_time += 60 - alrm->time.tm_sec;
- rtc_time64_to_tm(alarm_time, &alrm->time);
- }
-
ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2,
RV3032_CTRL2_AIE | RV3032_CTRL2_UIE, 0);
if (ret)
@@ -402,6 +395,75 @@ static int rv3032_set_offset(struct device *dev, long offset)
FIELD_PREP(RV3032_OFFSET_MSK, offset));
}
+static int rv3032_param_get(struct device *dev, struct rtc_param *param)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+ int ret;
+
+ switch(param->param) {
+ u32 value;
+
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ ret = regmap_read(rv3032->regmap, RV3032_PMU, &value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(RV3032_PMU_BSM, value);
+
+ switch(value) {
+ case RV3032_PMU_BSM_DSM:
+ param->uvalue = RTC_BSM_DIRECT;
+ break;
+ case RV3032_PMU_BSM_LSM:
+ param->uvalue = RTC_BSM_LEVEL;
+ break;
+ default:
+ param->uvalue = RTC_BSM_DISABLED;
+ }
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rv3032_param_set(struct device *dev, struct rtc_param *param)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+
+ switch(param->param) {
+ u8 mode;
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ if (rv3032->trickle_charger_set)
+ return -EINVAL;
+
+ switch (param->uvalue) {
+ case RTC_BSM_DISABLED:
+ mode = 0;
+ break;
+ case RTC_BSM_DIRECT:
+ mode = RV3032_PMU_BSM_DSM;
+ break;
+ case RTC_BSM_LEVEL:
+ mode = RV3032_PMU_BSM_LSM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rv3032_update_cfg(rv3032, RV3032_PMU, RV3032_PMU_BSM,
+ FIELD_PREP(RV3032_PMU_BSM, mode));
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rv3032_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct rv3032_data *rv3032 = dev_get_drvdata(dev);
@@ -541,6 +603,8 @@ static int rv3032_trickle_charger_setup(struct device *dev, struct rv3032_data *
return 0;
}
+ rv3032->trickle_charger_set = true;
+
return rv3032_update_cfg(rv3032, RV3032_PMU,
RV3032_PMU_TCR | RV3032_PMU_TCM | RV3032_PMU_BSM,
val | FIELD_PREP(RV3032_PMU_TCR, i));
@@ -617,11 +681,11 @@ static int rv3032_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
ret = rv3032_enter_eerd(rv3032, &eerd);
if (ret)
- goto exit_eerd;
+ return ret;
ret = regmap_write(rv3032->regmap, RV3032_CLKOUT1, hfd & 0xff);
if (ret)
- return ret;
+ goto exit_eerd;
ret = regmap_write(rv3032->regmap, RV3032_CLKOUT2, RV3032_CLKOUT2_OS |
FIELD_PREP(RV3032_CLKOUT2_HFD_MSK, hfd >> 8));
@@ -813,6 +877,8 @@ static const struct rtc_class_ops rv3032_rtc_ops = {
.read_alarm = rv3032_get_alarm,
.set_alarm = rv3032_set_alarm,
.alarm_irq_enable = rv3032_alarm_irq_enable,
+ .param_get = rv3032_param_get,
+ .param_set = rv3032_param_set,
};
static const struct regmap_config regmap_config = {
@@ -883,6 +949,9 @@ static int rv3032_probe(struct i2c_client *client)
rv3032_trickle_charger_setup(&client->dev, rv3032);
+ set_bit(RTC_FEATURE_BACKUP_SWITCH_MODE, rv3032->rtc->features);
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rv3032->rtc->features);
+
rv3032->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv3032->rtc->range_max = RTC_TIMESTAMP_END_2099;
rv3032->rtc->ops = &rv3032_rtc_ops;
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 72adef5a5ebe80..0d5ed38bf60cce 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -340,8 +340,8 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
}
- ctrl[1] &= ~RV8803_FLAG_AF;
- err = rv8803_write_reg(rv8803->client, RV8803_FLAG, ctrl[1]);
+ ctrl[0] &= ~RV8803_FLAG_AF;
+ err = rv8803_write_reg(rv8803->client, RV8803_FLAG, ctrl[0]);
mutex_unlock(&rv8803->flags_lock);
if (err)
return err;
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index f4d425002f7fdb..758fd6e11a1544 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -422,7 +422,7 @@ static struct regmap_config regmap_i2c_config = {
static int rx6110_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct rx6110_data *rx6110;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index d38aaf08108c22..5bfdd34a72fffa 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -248,9 +248,6 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
u8 date[7];
int ret;
- if ((dt->tm_year < 100) || (dt->tm_year > 199))
- return -EINVAL;
-
/*
* Here the read-only bits are written as "0". I'm not sure if that
* is sound.
@@ -318,9 +315,6 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 ald[2];
int ctrl2, err;
- if (client->irq <= 0)
- return -EINVAL;
-
err = rx8025_read_regs(client, RX8025_REG_ALDMIN, 2, ald);
if (err)
return err;
@@ -355,20 +349,6 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 ald[2];
int err;
- if (client->irq <= 0)
- return -EINVAL;
-
- /*
- * Hardware alarm precision is 1 minute!
- * round up to nearest minute
- */
- if (t->time.tm_sec) {
- time64_t alarm_time = rtc_tm_to_time64(&t->time);
-
- alarm_time += 60 - t->time.tm_sec;
- rtc_time64_to_tm(alarm_time, &t->time);
- }
-
ald[0] = bin2bcd(t->time.tm_min);
if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
ald[1] = bin2bcd(t->time.tm_hour);
@@ -423,17 +403,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static const struct rtc_class_ops rx8025_rtc_ops = {
- .read_time = rx8025_get_time,
- .set_time = rx8025_set_time,
- .read_alarm = rx8025_read_alarm,
- .set_alarm = rx8025_set_alarm,
- .alarm_irq_enable = rx8025_alarm_irq_enable,
-};
-
/*
- * Clock precision adjustment support
- *
* According to the RX8025 SA/NB application manual the frequency and
* temperature characteristics can be approximated using the following
* equation:
@@ -444,11 +414,8 @@ static const struct rtc_class_ops rx8025_rtc_ops = {
* a : Coefficient = (-35 +-5) * 10**-9
* ut: Ultimate temperature in degree = +25 +-5 degree
* t : Any temperature in degree
- *
- * Note that the clock adjustment in ppb must be entered (which is
- * the negative value of the deviation).
*/
-static int rx8025_get_clock_adjust(struct device *dev, int *adj)
+static int rx8025_read_offset(struct device *dev, long *offset)
{
struct i2c_client *client = to_i2c_client(dev);
int digoff;
@@ -457,63 +424,75 @@ static int rx8025_get_clock_adjust(struct device *dev, int *adj)
if (digoff < 0)
return digoff;
- *adj = digoff >= 64 ? digoff - 128 : digoff;
- if (*adj > 0)
- (*adj)--;
- *adj *= -RX8025_ADJ_RESOLUTION;
+ *offset = digoff >= 64 ? digoff - 128 : digoff;
+ if (*offset > 0)
+ (*offset)--;
+ *offset *= RX8025_ADJ_RESOLUTION;
return 0;
}
-static int rx8025_set_clock_adjust(struct device *dev, int adj)
+static int rx8025_set_offset(struct device *dev, long offset)
{
struct i2c_client *client = to_i2c_client(dev);
u8 digoff;
int err;
- adj /= -RX8025_ADJ_RESOLUTION;
- if (adj > RX8025_ADJ_DATA_MAX)
- adj = RX8025_ADJ_DATA_MAX;
- else if (adj < RX8025_ADJ_DATA_MIN)
- adj = RX8025_ADJ_DATA_MIN;
- else if (adj > 0)
- adj++;
- else if (adj < 0)
- adj += 128;
- digoff = adj;
+ offset /= RX8025_ADJ_RESOLUTION;
+ if (offset > RX8025_ADJ_DATA_MAX)
+ offset = RX8025_ADJ_DATA_MAX;
+ else if (offset < RX8025_ADJ_DATA_MIN)
+ offset = RX8025_ADJ_DATA_MIN;
+ else if (offset > 0)
+ offset++;
+ else if (offset < 0)
+ offset += 128;
+ digoff = offset;
err = rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff);
if (err)
return err;
- dev_dbg(dev, "%s: write 0x%02x\n", __func__, digoff);
-
return 0;
}
+static const struct rtc_class_ops rx8025_rtc_ops = {
+ .read_time = rx8025_get_time,
+ .set_time = rx8025_set_time,
+ .read_alarm = rx8025_read_alarm,
+ .set_alarm = rx8025_set_alarm,
+ .alarm_irq_enable = rx8025_alarm_irq_enable,
+ .read_offset = rx8025_read_offset,
+ .set_offset = rx8025_set_offset,
+};
+
static ssize_t rx8025_sysfs_show_clock_adjust(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int err, adj;
+ long adj;
+ int err;
- err = rx8025_get_clock_adjust(dev, &adj);
+ dev_warn_once(dev, "clock_adjust_ppb is deprecated, use offset\n");
+ err = rx8025_read_offset(dev, &adj);
if (err)
return err;
- return sprintf(buf, "%d\n", adj);
+ return sprintf(buf, "%ld\n", -adj);
}
static ssize_t rx8025_sysfs_store_clock_adjust(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int adj, err;
+ long adj;
+ int err;
- if (sscanf(buf, "%i", &adj) != 1)
+ dev_warn_once(dev, "clock_adjust_ppb is deprecated, use offset\n");
+ if (kstrtol(buf, 10, &adj) != 0)
return -EINVAL;
- err = rx8025_set_clock_adjust(dev, adj);
+ err = rx8025_set_offset(dev, -adj);
return err ? err : count;
}
@@ -522,15 +501,14 @@ static DEVICE_ATTR(clock_adjust_ppb, S_IRUGO | S_IWUSR,
rx8025_sysfs_show_clock_adjust,
rx8025_sysfs_store_clock_adjust);
-static int rx8025_sysfs_register(struct device *dev)
-{
- return device_create_file(dev, &dev_attr_clock_adjust_ppb);
-}
+static struct attribute *rx8025_attrs[] = {
+ &dev_attr_clock_adjust_ppb.attr,
+ NULL
+};
-static void rx8025_sysfs_unregister(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_clock_adjust_ppb);
-}
+static const struct attribute_group rx8025_attr_group = {
+ .attrs = rx8025_attrs,
+};
static int rx8025_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -559,12 +537,13 @@ static int rx8025_probe(struct i2c_client *client,
if (err)
return err;
- rx8025->rtc = devm_rtc_device_register(&client->dev, client->name,
- &rx8025_rtc_ops, THIS_MODULE);
- if (IS_ERR(rx8025->rtc)) {
- dev_err(&client->dev, "unable to register the class device\n");
+ rx8025->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rx8025->rtc))
return PTR_ERR(rx8025->rtc);
- }
+
+ rx8025->rtc->ops = &rx8025_rtc_ops;
+ rx8025->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rx8025->rtc->range_max = RTC_TIMESTAMP_END_2099;
if (client->irq > 0) {
dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
@@ -572,25 +551,20 @@ static int rx8025_probe(struct i2c_client *client,
rx8025_handle_irq,
IRQF_ONESHOT,
"rx8025", client);
- if (err) {
- dev_err(&client->dev, "unable to request IRQ, alarms disabled\n");
- client->irq = 0;
- }
+ if (err)
+ clear_bit(RTC_FEATURE_ALARM, rx8025->rtc->features);
}
rx8025->rtc->max_user_freq = 1;
- /* the rx8025 alarm only supports a minute accuracy */
- rx8025->rtc->uie_unsupported = 1;
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rx8025->rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rx8025->rtc->features);
- err = rx8025_sysfs_register(&client->dev);
- return err;
-}
+ err = rtc_add_group(rx8025->rtc, &rx8025_attr_group);
+ if (err)
+ return err;
-static int rx8025_remove(struct i2c_client *client)
-{
- rx8025_sysfs_unregister(&client->dev);
- return 0;
+ return devm_rtc_register_device(rx8025->rtc);
}
static struct i2c_driver rx8025_driver = {
@@ -598,7 +572,6 @@ static struct i2c_driver rx8025_driver = {
.name = "rtc-rx8025",
},
.probe = rx8025_probe,
- .remove = rx8025_remove,
.id_table = rx8025_id,
};
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index b5bdeda7d76781..26278c77073153 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -285,9 +285,6 @@ static int s35390a_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
alm->time.tm_min, alm->time.tm_hour, alm->time.tm_mday,
alm->time.tm_mon, alm->time.tm_year, alm->time.tm_wday);
- if (alm->time.tm_sec != 0)
- dev_warn(&client->dev, "Alarms are only supported on a per minute basis!\n");
-
/* disable interrupt (which deasserts the irq line) */
err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts));
if (err < 0)
@@ -491,8 +488,8 @@ static int s35390a_probe(struct i2c_client *client,
s35390a->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
s35390a->rtc->range_max = RTC_TIMESTAMP_END_2099;
- /* supports per-minute alarms only, therefore set uie_unsupported */
- s35390a->rtc->uie_unsupported = 1;
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, s35390a->rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, s35390a->rtc->features );
if (status1 & S35390A_FLAG_INT2)
rtc_update_irq(s35390a->rtc, 1, RTC_AF);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index e57d3ca70a7828..db529733c9c46c 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -127,10 +127,9 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
return ret;
}
-/* Time read/write */
-static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+/* Read time from RTC and convert it from BCD */
+static int s3c_rtc_read_time(struct s3c_rtc *info, struct rtc_time *tm)
{
- struct s3c_rtc *info = dev_get_drvdata(dev);
unsigned int have_retried = 0;
int ret;
@@ -139,54 +138,40 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
return ret;
retry_get_time:
- rtc_tm->tm_min = readb(info->base + S3C2410_RTCMIN);
- rtc_tm->tm_hour = readb(info->base + S3C2410_RTCHOUR);
- rtc_tm->tm_mday = readb(info->base + S3C2410_RTCDATE);
- rtc_tm->tm_mon = readb(info->base + S3C2410_RTCMON);
- rtc_tm->tm_year = readb(info->base + S3C2410_RTCYEAR);
- rtc_tm->tm_sec = readb(info->base + S3C2410_RTCSEC);
-
- /* the only way to work out whether the system was mid-update
+ tm->tm_min = readb(info->base + S3C2410_RTCMIN);
+ tm->tm_hour = readb(info->base + S3C2410_RTCHOUR);
+ tm->tm_mday = readb(info->base + S3C2410_RTCDATE);
+ tm->tm_mon = readb(info->base + S3C2410_RTCMON);
+ tm->tm_year = readb(info->base + S3C2410_RTCYEAR);
+ tm->tm_sec = readb(info->base + S3C2410_RTCSEC);
+
+ /*
+ * The only way to work out whether the system was mid-update
* when we read it is to check the second counter, and if it
* is zero, then we re-try the entire read
*/
-
- if (rtc_tm->tm_sec == 0 && !have_retried) {
+ if (tm->tm_sec == 0 && !have_retried) {
have_retried = 1;
goto retry_get_time;
}
- rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
- rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
- rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
- rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
- rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
- rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
-
s3c_rtc_disable_clk(info);
- rtc_tm->tm_year += 100;
- rtc_tm->tm_mon -= 1;
+ tm->tm_sec = bcd2bin(tm->tm_sec);
+ tm->tm_min = bcd2bin(tm->tm_min);
+ tm->tm_hour = bcd2bin(tm->tm_hour);
+ tm->tm_mday = bcd2bin(tm->tm_mday);
+ tm->tm_mon = bcd2bin(tm->tm_mon);
+ tm->tm_year = bcd2bin(tm->tm_year);
- dev_dbg(dev, "read time %ptR\n", rtc_tm);
return 0;
}
-static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
+/* Convert time to BCD and write it to RTC */
+static int s3c_rtc_write_time(struct s3c_rtc *info, const struct rtc_time *tm)
{
- struct s3c_rtc *info = dev_get_drvdata(dev);
- int year = tm->tm_year - 100;
int ret;
- dev_dbg(dev, "set time %ptR\n", tm);
-
- /* we get around y2k by simply not supporting it */
-
- if (year < 0 || year >= 100) {
- dev_err(dev, "rtc only supports 100 years\n");
- return -EINVAL;
- }
-
ret = s3c_rtc_enable_clk(info);
if (ret)
return ret;
@@ -195,14 +180,48 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
writeb(bin2bcd(tm->tm_min), info->base + S3C2410_RTCMIN);
writeb(bin2bcd(tm->tm_hour), info->base + S3C2410_RTCHOUR);
writeb(bin2bcd(tm->tm_mday), info->base + S3C2410_RTCDATE);
- writeb(bin2bcd(tm->tm_mon + 1), info->base + S3C2410_RTCMON);
- writeb(bin2bcd(year), info->base + S3C2410_RTCYEAR);
+ writeb(bin2bcd(tm->tm_mon), info->base + S3C2410_RTCMON);
+ writeb(bin2bcd(tm->tm_year), info->base + S3C2410_RTCYEAR);
s3c_rtc_disable_clk(info);
return 0;
}
+static int s3c_rtc_gettime(struct device *dev, struct rtc_time *tm)
+{
+ struct s3c_rtc *info = dev_get_drvdata(dev);
+ int ret;
+
+ ret = s3c_rtc_read_time(info, tm);
+ if (ret)
+ return ret;
+
+ /* Convert internal representation to actual date/time */
+ tm->tm_year += 100;
+ tm->tm_mon -= 1;
+
+ dev_dbg(dev, "read time %ptR\n", tm);
+ return 0;
+}
+
+static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct s3c_rtc *info = dev_get_drvdata(dev);
+ struct rtc_time rtc_tm = *tm;
+
+ dev_dbg(dev, "set time %ptR\n", tm);
+
+ /*
+ * Convert actual date/time to internal representation.
+ * We get around Y2K by simply not supporting it.
+ */
+ rtc_tm.tm_year -= 100;
+ rtc_tm.tm_mon += 1;
+
+ return s3c_rtc_write_time(info, &rtc_tm);
+}
+
static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct s3c_rtc *info = dev_get_drvdata(dev);
@@ -447,15 +466,20 @@ static int s3c_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- /* register RTC and exit */
- info->rtc = devm_rtc_device_register(&pdev->dev, "s3c", &s3c_rtcops,
- THIS_MODULE);
+ info->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc)) {
- dev_err(&pdev->dev, "cannot attach rtc\n");
ret = PTR_ERR(info->rtc);
goto err_nortc;
}
+ info->rtc->ops = &s3c_rtcops;
+ info->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ info->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ ret = devm_rtc_register_device(info->rtc);
+ if (ret)
+ goto err_nortc;
+
ret = devm_request_irq(&pdev->dev, info->irq_alarm, s3c_rtc_alarmirq,
0, "s3c2410-rtc alarm", info);
if (ret) {
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index fb9c6b709e1364..4243fe6d384228 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -861,4 +861,3 @@ module_platform_driver(s5m_rtc_driver);
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
MODULE_DESCRIPTION("Samsung S5M/S2MPS14 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s5m-rtc");
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index adec1b14a8deb1..711832c758aeae 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -673,8 +673,17 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
struct sun6i_rtc_dev *chip = sun6i_rtc;
int ret;
- if (!chip)
- return -ENODEV;
+ if (!chip) {
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ spin_lock_init(&chip->lock);
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+ }
platform_set_drvdata(pdev, chip);
diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c
deleted file mode 100644
index c77b8eab94a0fe..00000000000000
--- a/drivers/rtc/rtc-tps80031.c
+++ /dev/null
@@ -1,324 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * rtc-tps80031.c -- TI TPS80031/TPS80032 RTC driver
- *
- * RTC driver for TI TPS80031/TPS80032 Fully Integrated
- * Power Management with Power Path and Battery Charger
- *
- * Copyright (c) 2012, NVIDIA Corporation.
- *
- * Author: Laxman Dewangan <ldewangan@nvidia.com>
- */
-
-#include <linux/bcd.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mfd/tps80031.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/rtc.h>
-#include <linux/slab.h>
-
-#define ENABLE_ALARM_INT 0x08
-#define ALARM_INT_STATUS 0x40
-
-/**
- * Setting bit to 1 in STOP_RTC will run the RTC and
- * setting this bit to 0 will freeze RTC.
- */
-#define STOP_RTC 0x1
-
-/* Power on reset Values of RTC registers */
-#define TPS80031_RTC_POR_YEAR 0
-#define TPS80031_RTC_POR_MONTH 1
-#define TPS80031_RTC_POR_DAY 1
-
-/* Numbers of registers for time and alarms */
-#define TPS80031_RTC_TIME_NUM_REGS 7
-#define TPS80031_RTC_ALARM_NUM_REGS 6
-
-/**
- * PMU RTC have only 2 nibbles to store year information, so using an
- * offset of 100 to set the base year as 2000 for our driver.
- */
-#define RTC_YEAR_OFFSET 100
-
-struct tps80031_rtc {
- struct rtc_device *rtc;
- int irq;
-};
-
-static int tps80031_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- u8 buff[TPS80031_RTC_TIME_NUM_REGS];
- int ret;
-
- ret = tps80031_reads(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_SECONDS_REG, TPS80031_RTC_TIME_NUM_REGS, buff);
- if (ret < 0) {
- dev_err(dev, "reading RTC_SECONDS_REG failed, err = %d\n", ret);
- return ret;
- }
-
- tm->tm_sec = bcd2bin(buff[0]);
- tm->tm_min = bcd2bin(buff[1]);
- tm->tm_hour = bcd2bin(buff[2]);
- tm->tm_mday = bcd2bin(buff[3]);
- tm->tm_mon = bcd2bin(buff[4]) - 1;
- tm->tm_year = bcd2bin(buff[5]) + RTC_YEAR_OFFSET;
- tm->tm_wday = bcd2bin(buff[6]);
- return 0;
-}
-
-static int tps80031_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- u8 buff[7];
- int ret;
-
- buff[0] = bin2bcd(tm->tm_sec);
- buff[1] = bin2bcd(tm->tm_min);
- buff[2] = bin2bcd(tm->tm_hour);
- buff[3] = bin2bcd(tm->tm_mday);
- buff[4] = bin2bcd(tm->tm_mon + 1);
- buff[5] = bin2bcd(tm->tm_year % RTC_YEAR_OFFSET);
- buff[6] = bin2bcd(tm->tm_wday);
-
- /* Stop RTC while updating the RTC time registers */
- ret = tps80031_clr_bits(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_RTC_CTRL_REG, STOP_RTC);
- if (ret < 0) {
- dev_err(dev->parent, "Stop RTC failed, err = %d\n", ret);
- return ret;
- }
-
- ret = tps80031_writes(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_SECONDS_REG,
- TPS80031_RTC_TIME_NUM_REGS, buff);
- if (ret < 0) {
- dev_err(dev, "writing RTC_SECONDS_REG failed, err %d\n", ret);
- return ret;
- }
-
- ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_RTC_CTRL_REG, STOP_RTC);
- if (ret < 0)
- dev_err(dev->parent, "Start RTC failed, err = %d\n", ret);
- return ret;
-}
-
-static int tps80031_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enable)
-{
- int ret;
-
- if (enable)
- ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_RTC_INTERRUPTS_REG, ENABLE_ALARM_INT);
- else
- ret = tps80031_clr_bits(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_RTC_INTERRUPTS_REG, ENABLE_ALARM_INT);
- if (ret < 0) {
- dev_err(dev, "Update on RTC_INT failed, err = %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static int tps80031_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- u8 buff[TPS80031_RTC_ALARM_NUM_REGS];
- int ret;
-
- buff[0] = bin2bcd(alrm->time.tm_sec);
- buff[1] = bin2bcd(alrm->time.tm_min);
- buff[2] = bin2bcd(alrm->time.tm_hour);
- buff[3] = bin2bcd(alrm->time.tm_mday);
- buff[4] = bin2bcd(alrm->time.tm_mon + 1);
- buff[5] = bin2bcd(alrm->time.tm_year % RTC_YEAR_OFFSET);
- ret = tps80031_writes(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_ALARM_SECONDS_REG,
- TPS80031_RTC_ALARM_NUM_REGS, buff);
- if (ret < 0) {
- dev_err(dev, "Writing RTC_ALARM failed, err %d\n", ret);
- return ret;
- }
- return tps80031_rtc_alarm_irq_enable(dev, alrm->enabled);
-}
-
-static int tps80031_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- u8 buff[6];
- int ret;
-
- ret = tps80031_reads(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_ALARM_SECONDS_REG,
- TPS80031_RTC_ALARM_NUM_REGS, buff);
- if (ret < 0) {
- dev_err(dev->parent,
- "reading RTC_ALARM failed, err = %d\n", ret);
- return ret;
- }
-
- alrm->time.tm_sec = bcd2bin(buff[0]);
- alrm->time.tm_min = bcd2bin(buff[1]);
- alrm->time.tm_hour = bcd2bin(buff[2]);
- alrm->time.tm_mday = bcd2bin(buff[3]);
- alrm->time.tm_mon = bcd2bin(buff[4]) - 1;
- alrm->time.tm_year = bcd2bin(buff[5]) + RTC_YEAR_OFFSET;
- return 0;
-}
-
-static int clear_alarm_int_status(struct device *dev, struct tps80031_rtc *rtc)
-{
- int ret;
- u8 buf;
-
- /**
- * As per datasheet, A dummy read of this RTC_STATUS_REG register
- * is necessary before each I2C read in order to update the status
- * register value.
- */
- ret = tps80031_read(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_RTC_STATUS_REG, &buf);
- if (ret < 0) {
- dev_err(dev, "reading RTC_STATUS failed. err = %d\n", ret);
- return ret;
- }
-
- /* clear Alarm status bits.*/
- ret = tps80031_set_bits(dev->parent, TPS80031_SLAVE_ID1,
- TPS80031_RTC_STATUS_REG, ALARM_INT_STATUS);
- if (ret < 0) {
- dev_err(dev, "clear Alarm INT failed, err = %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static irqreturn_t tps80031_rtc_irq(int irq, void *data)
-{
- struct device *dev = data;
- struct tps80031_rtc *rtc = dev_get_drvdata(dev);
- int ret;
-
- ret = clear_alarm_int_status(dev, rtc);
- if (ret < 0)
- return ret;
-
- rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
- return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops tps80031_rtc_ops = {
- .read_time = tps80031_rtc_read_time,
- .set_time = tps80031_rtc_set_time,
- .set_alarm = tps80031_rtc_set_alarm,
- .read_alarm = tps80031_rtc_read_alarm,
- .alarm_irq_enable = tps80031_rtc_alarm_irq_enable,
-};
-
-static int tps80031_rtc_probe(struct platform_device *pdev)
-{
- struct tps80031_rtc *rtc;
- struct rtc_time tm;
- int ret;
-
- rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
- if (!rtc)
- return -ENOMEM;
-
- rtc->irq = platform_get_irq(pdev, 0);
- platform_set_drvdata(pdev, rtc);
-
- /* Start RTC */
- ret = tps80031_set_bits(pdev->dev.parent, TPS80031_SLAVE_ID1,
- TPS80031_RTC_CTRL_REG, STOP_RTC);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to start RTC. err = %d\n", ret);
- return ret;
- }
-
- /* If RTC have POR values, set time 01:01:2000 */
- tps80031_rtc_read_time(&pdev->dev, &tm);
- if ((tm.tm_year == RTC_YEAR_OFFSET + TPS80031_RTC_POR_YEAR) &&
- (tm.tm_mon == (TPS80031_RTC_POR_MONTH - 1)) &&
- (tm.tm_mday == TPS80031_RTC_POR_DAY)) {
- tm.tm_year = 2000;
- tm.tm_mday = 1;
- tm.tm_mon = 1;
- ret = tps80031_rtc_set_time(&pdev->dev, &tm);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "RTC set time failed, err = %d\n", ret);
- return ret;
- }
- }
-
- /* Clear alarm intretupt status if it is there */
- ret = clear_alarm_int_status(&pdev->dev, rtc);
- if (ret < 0) {
- dev_err(&pdev->dev, "Clear alarm int failed, err = %d\n", ret);
- return ret;
- }
-
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &tps80031_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtc)) {
- ret = PTR_ERR(rtc->rtc);
- dev_err(&pdev->dev, "RTC registration failed, err %d\n", ret);
- return ret;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
- tps80031_rtc_irq,
- IRQF_ONESHOT,
- dev_name(&pdev->dev), rtc);
- if (ret < 0) {
- dev_err(&pdev->dev, "request IRQ:%d failed, err = %d\n",
- rtc->irq, ret);
- return ret;
- }
- device_set_wakeup_capable(&pdev->dev, 1);
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tps80031_rtc_suspend(struct device *dev)
-{
- struct tps80031_rtc *rtc = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(rtc->irq);
- return 0;
-}
-
-static int tps80031_rtc_resume(struct device *dev)
-{
- struct tps80031_rtc *rtc = dev_get_drvdata(dev);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(rtc->irq);
- return 0;
-};
-#endif
-
-static SIMPLE_DEV_PM_OPS(tps80031_pm_ops, tps80031_rtc_suspend,
- tps80031_rtc_resume);
-
-static struct platform_driver tps80031_rtc_driver = {
- .driver = {
- .name = "tps80031-rtc",
- .pm = &tps80031_pm_ops,
- },
- .probe = tps80031_rtc_probe,
-};
-
-module_platform_driver(tps80031_rtc_driver);
-
-MODULE_ALIAS("platform:tps80031-rtc");
-MODULE_DESCRIPTION("TI TPS80031/TPS80032 RTC driver");
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 1f5fab617b6790..f7e75d9fedf61d 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -53,7 +53,6 @@ int
tape_std_assign(struct tape_device *device)
{
int rc;
- struct timer_list timeout;
struct tape_request *request;
request = tape_alloc_request(2, 11);
@@ -70,7 +69,7 @@ tape_std_assign(struct tape_device *device)
* So we set up a timeout for this call.
*/
timer_setup(&request->timer, tape_std_assign_timeout, 0);
- mod_timer(&timeout, jiffies + 2 * HZ);
+ mod_timer(&request->timer, jiffies + msecs_to_jiffies(2000));
rc = tape_do_io_interruptible(device, request);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 2bc55ccf3f2372..ce9e7517430f7f 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -437,8 +437,8 @@ static ssize_t dev_busid_show(struct device *dev,
struct subchannel *sch = to_subchannel(dev);
struct pmcw *pmcw = &sch->schib.pmcw;
- if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
- pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
+ if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
+ (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
pmcw->dev);
else
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index 6c6c04e1b74db9..907d67aeac23c6 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -4145,7 +4145,7 @@ static int
sli_get_read_config(struct sli4 *sli4)
{
struct sli4_rsp_read_config *conf = sli4->bmbx.virt;
- u32 i, total, total_size;
+ u32 i, total;
u32 *base;
if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) {
@@ -4203,8 +4203,7 @@ sli_get_read_config(struct sli4 *sli4)
for (i = 0; i < SLI4_RSRC_MAX; i++) {
total = sli4->ext[i].number * sli4->ext[i].size;
- total_size = BITS_TO_LONGS(total) * sizeof(long);
- sli4->ext[i].use_map = kzalloc(total_size, GFP_KERNEL);
+ sli4->ext[i].use_map = bitmap_zalloc(total, GFP_KERNEL);
if (!sli4->ext[i].use_map) {
efc_log_err(sli4, "bitmap memory allocation failed %d\n",
i);
@@ -4743,7 +4742,7 @@ sli_reset(struct sli4 *sli4)
sli4->ext[0].base = NULL;
for (i = 0; i < SLI4_RSRC_MAX; i++) {
- kfree(sli4->ext[i].use_map);
+ bitmap_free(sli4->ext[i].use_map);
sli4->ext[i].use_map = NULL;
sli4->ext[i].base = NULL;
}
@@ -4784,7 +4783,7 @@ sli_teardown(struct sli4 *sli4)
for (i = 0; i < SLI4_RSRC_MAX; i++) {
sli4->ext[i].base = NULL;
- kfree(sli4->ext[i].use_map);
+ bitmap_free(sli4->ext[i].use_map);
sli4->ext[i].use_map = NULL;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index cda0135c43db7f..8049b00b6766aa 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -388,6 +388,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->shost_state = SHOST_CREATED;
INIT_LIST_HEAD(&shost->__devices);
INIT_LIST_HEAD(&shost->__targets);
+ INIT_LIST_HEAD(&shost->eh_abort_list);
INIT_LIST_HEAD(&shost->eh_cmd_q);
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 30f9545d228560..032efb294ee52f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2762,7 +2762,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
if (fcport->loop_id != FC_NO_LOOP_ID)
fcport->logout_on_delete = 1;
- qlt_schedule_sess_for_deletion(fcport);
+ if (!EDIF_NEGOTIATION_PENDING(fcport)) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
+ "%s %d schedule session deletion\n", __func__,
+ __LINE__);
+ qlt_schedule_sess_for_deletion(fcport);
+ }
} else {
qla2x00_port_logout(fcport->vha, fcport);
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 8924eeb9367dae..9ebf4a234d9a90 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -639,9 +639,9 @@ struct qla_els_pt_arg {
u8 els_opcode;
u8 vp_idx;
__le16 nport_handle;
- u16 control_flags;
+ u16 control_flags, ox_id;
__le32 rx_xchg_address;
- port_id_t did;
+ port_id_t did, sid;
u32 tx_len, tx_byte_count, rx_len, rx_byte_count;
dma_addr_t tx_addr, rx_addr;
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index ad746c62f0d440..2e37b189cb7559 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -218,7 +218,7 @@ fc_port_t *fcport)
"%s edif not enabled\n", __func__);
goto done;
}
- if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ if (DBELL_INACTIVE(vha)) {
ql_dbg(ql_dbg_edif, vha, 0x09102,
"%s doorbell not enabled\n", __func__);
goto done;
@@ -290,63 +290,6 @@ qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
return false;
}
-static void qla_edif_reset_auth_wait(struct fc_port *fcport, int state,
- int waitonly)
-{
- int cnt, max_cnt = 200;
- bool traced = false;
-
- fcport->keep_nport_handle = 1;
-
- if (!waitonly) {
- qla2x00_set_fcport_disc_state(fcport, state);
- qlt_schedule_sess_for_deletion(fcport);
- } else {
- qla2x00_set_fcport_disc_state(fcport, state);
- }
-
- ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
- "%s: waiting for session, max_cnt=%u\n",
- __func__, max_cnt);
-
- cnt = 0;
-
- if (waitonly) {
- /* Marker wait min 10 msecs. */
- msleep(50);
- cnt += 50;
- }
- while (1) {
- if (!traced) {
- ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
- "%s: session sleep.\n",
- __func__);
- traced = true;
- }
- msleep(20);
- cnt++;
- if (waitonly && (fcport->disc_state == state ||
- fcport->disc_state == DSC_LOGIN_COMPLETE))
- break;
- if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
- break;
- if (cnt > max_cnt)
- break;
- }
-
- if (!waitonly) {
- ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
- "%s: waited for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
- __func__, fcport->port_name, fcport->loop_id,
- fcport->d_id.b24, fcport, fcport->disc_state, cnt);
- } else {
- ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
- "%s: waited ONLY for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
- __func__, fcport->port_name, fcport->loop_id,
- fcport->d_id.b24, fcport, fcport->disc_state, cnt);
- }
-}
-
static void
qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
int index)
@@ -529,7 +472,8 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
struct app_start_reply appreply;
struct fc_port *fcport, *tf;
- ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start\n", __func__);
+ ql_log(ql_log_info, vha, 0x1313,
+ "EDIF application registration with driver, FC device connections will be re-established.\n");
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, &appstart,
@@ -538,9 +482,9 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
__func__, appstart.app_info.app_vid, appstart.app_start_flags);
- if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ if (DBELL_INACTIVE(vha)) {
/* mark doorbell as active since an app is now present */
- vha->e_dbell.db_flags = EDB_ACTIVE;
+ vha->e_dbell.db_flags |= EDB_ACTIVE;
} else {
ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
__func__);
@@ -554,37 +498,36 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
qla2xxx_wake_dpc(vha);
} else {
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ ql_dbg(ql_dbg_edif, vha, 0x2058,
+ "FCSP - nn %8phN pn %8phN portid=%06x.\n",
+ fcport->node_name, fcport->port_name,
+ fcport->d_id.b24);
ql_dbg(ql_dbg_edif, vha, 0xf084,
- "%s: sess %p %8phC lid %#04x s_id %06x logout %d\n",
- __func__, fcport, fcport->port_name,
- fcport->loop_id, fcport->d_id.b24,
- fcport->logout_on_delete);
-
- ql_dbg(ql_dbg_edif, vha, 0xf084,
- "keep %d els_logo %d disc state %d auth state %d stop state %d\n",
- fcport->keep_nport_handle,
- fcport->send_els_logo, fcport->disc_state,
- fcport->edif.auth_state, fcport->edif.app_stop);
+ "%s: se_sess %p / sess %p from port %8phC "
+ "loop_id %#04x s_id %06x logout %d "
+ "keep %d els_logo %d disc state %d auth state %d"
+ "stop state %d\n",
+ __func__, fcport->se_sess, fcport,
+ fcport->port_name, fcport->loop_id,
+ fcport->d_id.b24, fcport->logout_on_delete,
+ fcport->keep_nport_handle, fcport->send_els_logo,
+ fcport->disc_state, fcport->edif.auth_state,
+ fcport->edif.app_stop);
if (atomic_read(&vha->loop_state) == LOOP_DOWN)
break;
- if (!(fcport->flags & FCF_FCSP_DEVICE))
- continue;
fcport->edif.app_started = 1;
- if (fcport->edif.app_stop ||
- (fcport->disc_state != DSC_LOGIN_COMPLETE &&
- fcport->disc_state != DSC_LOGIN_PEND &&
- fcport->disc_state != DSC_DELETED)) {
- /* no activity */
- fcport->edif.app_stop = 0;
-
- ql_dbg(ql_dbg_edif, vha, 0x911e,
- "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
- __func__, fcport->port_name);
- fcport->edif.app_sess_online = 1;
- qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
- }
+ fcport->login_retry = vha->hw->login_retry_count;
+
+ /* no activity */
+ fcport->edif.app_stop = 0;
+
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
+ __func__, fcport->port_name);
+ fcport->edif.app_sess_online = 0;
+ qlt_schedule_sess_for_deletion(fcport);
qla_edif_sa_ctl_init(vha, fcport);
}
}
@@ -601,14 +544,14 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
appreply.edif_edb_active = vha->e_dbell.db_flags;
- bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
- sizeof(struct app_start_reply);
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
SET_DID_STATUS(bsg_reply->result, DID_OK);
- sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, &appreply,
- sizeof(struct app_start_reply));
+ bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &appreply,
+ sizeof(struct app_start_reply));
ql_dbg(ql_dbg_edif, vha, 0x911d,
"%s app start completed with 0x%x\n",
@@ -800,15 +743,15 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
ql_dbg(ql_dbg_edif, vha, 0x911e,
"%s AUTH complete - RESUME with prli for wwpn %8phC\n",
__func__, fcport->port_name);
- qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 1);
qla24xx_post_prli_work(vha, fcport);
}
errstate_exit:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, &appplogireply,
- sizeof(struct app_plogi_reply));
+ bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &appplogireply,
+ sizeof(struct app_plogi_reply));
return rval;
}
@@ -873,7 +816,7 @@ qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (qla_ini_mode_enabled(fcport->vha)) {
fcport->send_els_logo = 1;
- qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
+ qlt_schedule_sess_for_deletion(fcport);
}
}
@@ -891,7 +834,7 @@ static int
qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
{
int32_t rval = 0;
- int32_t num_cnt;
+ int32_t pcnt = 0;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
struct app_pinfo_req app_req;
struct app_pinfo_reply *app_reply;
@@ -903,16 +846,14 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
bsg_job->request_payload.sg_cnt, &app_req,
sizeof(struct app_pinfo_req));
- num_cnt = app_req.num_ports; /* num of ports alloc'd by app */
-
app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
- sizeof(struct app_pinfo) * num_cnt), GFP_KERNEL);
+ sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL);
+
if (!app_reply) {
SET_DID_STATUS(bsg_reply->result, DID_ERROR);
rval = -1;
} else {
struct fc_port *fcport = NULL, *tf;
- uint32_t pcnt = 0;
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (!(fcport->flags & FCF_FCSP_DEVICE))
@@ -981,9 +922,11 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
SET_DID_STATUS(bsg_reply->result, DID_OK);
}
- sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, app_reply,
- sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * num_cnt);
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ app_reply,
+ sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt);
kfree(app_reply);
@@ -1000,10 +943,11 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
{
int32_t rval = 0;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
- uint32_t ret_size, size;
+ uint32_t size;
struct app_sinfo_req app_req;
struct app_stats_reply *app_reply;
+ uint32_t pcnt = 0;
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, &app_req,
@@ -1019,18 +963,12 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
size = sizeof(struct app_stats_reply) +
(sizeof(struct app_sinfo) * app_req.num_ports);
- if (size > bsg_job->reply_payload.payload_len)
- ret_size = bsg_job->reply_payload.payload_len;
- else
- ret_size = size;
-
app_reply = kzalloc(size, GFP_KERNEL);
if (!app_reply) {
SET_DID_STATUS(bsg_reply->result, DID_ERROR);
rval = -1;
} else {
struct fc_port *fcport = NULL, *tf;
- uint32_t pcnt = 0;
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
@@ -1054,9 +992,11 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
SET_DID_STATUS(bsg_reply->result, DID_OK);
}
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, app_reply, ret_size);
+ bsg_job->reply_payload.sg_cnt, app_reply,
+ sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt));
kfree(app_reply);
@@ -1130,8 +1070,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
__func__,
bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
rval = EXT_STATUS_INVALID_PARAM;
- bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+ done = false;
break;
}
@@ -1330,7 +1269,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
goto done;
}
- if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ if (DBELL_INACTIVE(vha)) {
ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
rval = -EIO;
SET_DID_STATUS(bsg_reply->result, DID_ERROR);
@@ -1651,6 +1590,40 @@ qla_enode_stop(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
}
+static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid)
+{
+ unsigned long flags;
+ struct enode *e, *tmp;
+ struct purexevent *purex;
+ LIST_HEAD(enode_list);
+
+ if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s enode not active\n", __func__);
+ return;
+ }
+ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+ list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) {
+ purex = &e->u.purexinfo;
+ if (purex->pur_info.pur_sid.b24 == portid.b24) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d,
+ "%s free ELS sid=%06x. xchg %x, nb=%xh\n",
+ __func__, portid.b24,
+ purex->pur_info.pur_rx_xchg_address,
+ purex->pur_info.pur_bytes_rcvd);
+
+ list_del_init(&e->list);
+ list_add_tail(&e->list, &enode_list);
+ }
+ }
+ spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+ list_for_each_entry_safe(e, tmp, &enode_list, list) {
+ list_del_init(&e->list);
+ qla_enode_free(vha, e);
+ }
+}
+
/*
* allocate enode struct and populate buffer
* returns: enode pointer with buffers
@@ -1695,41 +1668,25 @@ static struct enode *
qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
{
struct enode *node_rtn = NULL;
- struct enode *list_node = NULL;
+ struct enode *list_node, *q;
unsigned long flags;
- struct list_head *pos, *q;
uint32_t sid;
- uint32_t rw_flag;
struct purexevent *purex;
/* secure the list from moving under us */
spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
- list_for_each_safe(pos, q, &vha->pur_cinfo.head) {
- list_node = list_entry(pos, struct enode, list);
+ list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) {
/* node type determines what p1 and p2 are */
purex = &list_node->u.purexinfo;
sid = p1;
- rw_flag = p2;
if (purex->pur_info.pur_sid.b24 == sid) {
- if (purex->pur_info.pur_pend == 1 &&
- rw_flag == PUR_GET) {
- /*
- * if the receive is in progress
- * and its a read/get then can't
- * transfer yet
- */
- ql_dbg(ql_dbg_edif, vha, 0x9106,
- "%s purex xfer in progress for sid=%x\n",
- __func__, sid);
- } else {
- /* found it and its complete */
- node_rtn = list_node;
- list_del(pos);
- break;
- }
+ /* found it and its complete */
+ node_rtn = list_node;
+ list_del(&list_node->list);
+ break;
}
}
@@ -1802,7 +1759,8 @@ qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
qla_els_pt_iocb(vha, els_iocb, a);
ql_dbg(ql_dbg_edif, vha, 0x0183,
- "Sending ELS reject...\n");
+ "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n",
+ a->ox_id, a->sid.b24, a->did.b24);
ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
/* flush iocb to mem before notifying hw doorbell */
@@ -1814,7 +1772,7 @@ qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
void
qla_edb_init(scsi_qla_host_t *vha)
{
- if (vha->e_dbell.db_flags == EDB_ACTIVE) {
+ if (DBELL_ACTIVE(vha)) {
/* list already init'd - error */
ql_dbg(ql_dbg_edif, vha, 0x09102,
"edif db already initialized, cannot reinit\n");
@@ -1850,6 +1808,57 @@ qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
node->ntype = N_UNDEF;
}
+static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
+{
+ unsigned long flags;
+ struct edb_node *e, *tmp;
+ port_id_t sid;
+ LIST_HEAD(edb_list);
+
+ if (DBELL_INACTIVE(vha)) {
+ /* doorbell list not enabled */
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s doorbell not enabled\n", __func__);
+ return;
+ }
+
+ /* grab lock so list doesn't move */
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) {
+ switch (e->ntype) {
+ case VND_CMD_AUTH_STATE_NEEDED:
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ sid = e->u.plogi_did;
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ sid = e->u.els_sid;
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ /* app wants to see this */
+ continue;
+ default:
+ ql_log(ql_log_warn, vha, 0x09102,
+ "%s unknown node type: %x\n", __func__, e->ntype);
+ sid.b24 = 0;
+ break;
+ }
+ if (sid.b24 == portid.b24) {
+ ql_dbg(ql_dbg_edif, vha, 0x910f,
+ "%s free doorbell event : node type = %x %p\n",
+ __func__, e->ntype, e);
+ list_del_init(&e->list);
+ list_add_tail(&e->list, &edb_list);
+ }
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ list_for_each_entry_safe(e, tmp, &edb_list, list) {
+ qla_edb_node_free(vha, e);
+ list_del_init(&e->list);
+ kfree(e);
+ }
+}
+
/* function called when app is stopping */
void
@@ -1858,7 +1867,7 @@ qla_edb_stop(scsi_qla_host_t *vha)
unsigned long flags;
struct edb_node *node, *q;
- if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ if (DBELL_INACTIVE(vha)) {
/* doorbell list not enabled */
ql_dbg(ql_dbg_edif, vha, 0x09102,
"%s doorbell not enabled\n", __func__);
@@ -1909,7 +1918,7 @@ qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
{
unsigned long flags;
- if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ if (DBELL_INACTIVE(vha)) {
/* doorbell list not enabled */
ql_dbg(ql_dbg_edif, vha, 0x09102,
"%s doorbell not enabled\n", __func__);
@@ -1940,7 +1949,7 @@ qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
return;
}
- if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ if (DBELL_INACTIVE(vha)) {
if (fcport)
fcport->edif.auth_state = dbtype;
/* doorbell list not enabled */
@@ -2035,7 +2044,7 @@ qla_edif_timer(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
- if (vha->e_dbell.db_flags != EDB_ACTIVE &&
+ if (DBELL_INACTIVE(vha) &&
ha->edif_post_stop_cnt_down) {
ha->edif_post_stop_cnt_down--;
@@ -2073,7 +2082,7 @@ edif_doorbell_show(struct device *dev, struct device_attribute *attr,
sz = 256;
/* stop new threads from waiting if we're not init'd */
- if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+ if (DBELL_INACTIVE(vha)) {
ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
"%s error - edif db not enabled\n", __func__);
return 0;
@@ -2346,6 +2355,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
a.tx_addr = vha->hw->elsrej.cdma;
a.vp_idx = vha->vp_idx;
a.control_flags = EPD_ELS_RJT;
+ a.ox_id = le16_to_cpu(p->ox_id);
sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
@@ -2357,7 +2367,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
return;
}
- if (totlen > MAX_PAYLOAD) {
+ if (totlen > ELS_MAX_PAYLOAD) {
ql_dbg(ql_dbg_edif, vha, 0x0910d,
"%s WARNING: verbose ELS frame received (totlen=%x)\n",
__func__, totlen);
@@ -2387,7 +2397,6 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
purex = &ptr->u.purexinfo;
purex->pur_info.pur_sid = a.did;
- purex->pur_info.pur_pend = 0;
purex->pur_info.pur_bytes_rcvd = totlen;
purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
@@ -2396,6 +2405,8 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
purex->pur_info.pur_did.b.al_pa = p->d_id[0];
purex->pur_info.vp_idx = p->vp_idx;
+ a.sid = purex->pur_info.pur_did;
+
rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
purex->msgp_len);
if (rc) {
@@ -2419,7 +2430,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
- if (host->e_dbell.db_flags != EDB_ACTIVE ||
+ if (DBELL_INACTIVE(vha) ||
(fcport && EDIF_SESSION_DOWN(fcport))) {
ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
__func__, host->e_dbell.db_flags,
@@ -2436,7 +2447,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
ql_dbg(ql_dbg_edif, host, 0x0910c,
"%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
__func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
- purex->pur_info.pur_did.b24, p->rx_xchg_addr);
+ purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address);
qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
}
@@ -3139,18 +3150,14 @@ static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
/* release any sadb entries -- only done at teardown */
void qla_edif_sadb_release(struct qla_hw_data *ha)
{
- struct list_head *pos;
- struct list_head *tmp;
- struct edif_sa_index_entry *entry;
+ struct edif_sa_index_entry *entry, *tmp;
- list_for_each_safe(pos, tmp, &ha->sadb_rx_index_list) {
- entry = list_entry(pos, struct edif_sa_index_entry, next);
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
list_del(&entry->next);
kfree(entry);
}
- list_for_each_safe(pos, tmp, &ha->sadb_tx_index_list) {
- entry = list_entry(pos, struct edif_sa_index_entry, next);
+ list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
list_del(&entry->next);
kfree(entry);
}
@@ -3449,7 +3456,7 @@ done:
void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
{
- if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) {
+ if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) {
ql_dbg(ql_dbg_disc, vha, 0xf09c,
"%s: sess %8phN send port_offline event\n",
__func__, sess->port_name);
@@ -3459,3 +3466,12 @@ void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
}
}
+
+void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport)
+{
+ if (!(fcport->flags & FCF_FCSP_DEVICE))
+ return;
+
+ qla_edb_clear(vha, fcport->d_id);
+ qla_enode_clear(vha, fcport->d_id);
+}
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
index 9e8f28d0caa1bc..a965ca8e47ce75 100644
--- a/drivers/scsi/qla2xxx/qla_edif.h
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -41,9 +41,12 @@ struct pur_core {
};
enum db_flags_t {
- EDB_ACTIVE = 0x1,
+ EDB_ACTIVE = BIT_0,
};
+#define DBELL_ACTIVE(_v) (_v->e_dbell.db_flags & EDB_ACTIVE)
+#define DBELL_INACTIVE(_v) (!(_v->e_dbell.db_flags & EDB_ACTIVE))
+
struct edif_dbell {
enum db_flags_t db_flags;
spinlock_t db_lock;
@@ -93,7 +96,6 @@ struct sa_update_28xx {
};
#define NUM_ENTRIES 256
-#define MAX_PAYLOAD 1024
#define PUR_GET 1
struct dinfo {
@@ -102,7 +104,6 @@ struct dinfo {
};
struct pur_ninfo {
- unsigned int pur_pend:1;
port_id_t pur_sid;
port_id_t pur_did;
uint8_t vp_idx;
@@ -128,9 +129,15 @@ struct enode {
} u;
};
+#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
+
#define EDIF_SESSION_DOWN(_s) \
(qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
_s->disc_state == DSC_DELETED || \
!_s->edif.app_sess_online))
+#define EDIF_NEGOTIATION_PENDING(_fcport) \
+ (DBELL_ACTIVE(_fcport->vha) && \
+ (_fcport->disc_state == DSC_LOGIN_AUTH_PEND))
+
#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
index 58b718d35d19a9..53026d82ebffe3 100644
--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -8,7 +8,7 @@
#define __QLA_EDIF_BSG_H
/* BSG Vendor specific commands */
-#define ELS_MAX_PAYLOAD 1024
+#define ELS_MAX_PAYLOAD 2112
#ifndef WWN_SIZE
#define WWN_SIZE 8
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3c4fa8bac88de0..8d8503a2847909 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -142,6 +142,8 @@ void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport);
int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob);
void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
+void qla_edif_clear_appdata(struct scsi_qla_host *vha,
+ struct fc_port *fcport);
const char *sc_to_str(uint16_t cmd);
/*
@@ -171,7 +173,6 @@ extern int ql2xasynctmfenable;
extern int ql2xgffidenable;
extern int ql2xenabledif;
extern int ql2xenablehba_err_chk;
-extern int ql2xtargetreset;
extern int ql2xdontresethba;
extern uint64_t ql2xmaxlun;
extern int ql2xmdcapmask;
@@ -818,7 +819,6 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
extern void qlafx00_timer_routine(scsi_qla_host_t *);
extern int qlafx00_rescan_isp(scsi_qla_host_t *);
-extern int qlafx00_loop_reset(scsi_qla_host_t *vha);
/* qla82xx related functions */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 908a72ebf7c2aa..070b636802d041 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -330,12 +330,9 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
} else {
if (vha->hw->flags.edif_enabled &&
- vha->e_dbell.db_flags & EDB_ACTIVE) {
+ DBELL_ACTIVE(vha)) {
lio->u.logio.flags |=
(SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
- ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n",
- fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24);
} else {
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
}
@@ -344,12 +341,14 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+ rval = qla2x00_start_sp(sp);
+
ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b24, fcport->login_retry);
+ fcport->d_id.b24, fcport->login_retry,
+ lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : "");
- rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -862,7 +861,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
break;
case DSC_LS_PLOGI_COMP:
if (vha->hw->flags.edif_enabled &&
- vha->e_dbell.db_flags & EDB_ACTIVE) {
+ DBELL_ACTIVE(vha)) {
/* check to see if App support secure or not */
qla24xx_post_gpdb_work(vha, fcport, 0);
break;
@@ -987,8 +986,6 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
sp->u.iocb_cmd.u.mbx.in_mb[2]);
- if (res == QLA_FUNCTION_TIMEOUT)
- return;
sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
@@ -1026,8 +1023,8 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
- list_del_init(&fcport->gnl_entry);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_del_init(&fcport->gnl_entry);
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
ea.fcport = fcport;
@@ -1454,7 +1451,7 @@ static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
fcport->d_id.b24);
- if (vha->e_dbell.db_flags == EDB_ACTIVE) {
+ if (DBELL_ACTIVE(vha)) {
ql_dbg(ql_dbg_disc, vha, 0x20ef,
"%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
__func__, __LINE__, fcport->port_name);
@@ -1786,16 +1783,72 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
fc_port_t *fcport;
unsigned long flags;
- fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
- if (fcport) {
- if (fcport->flags & FCF_FCP2_DEVICE) {
- ql_dbg(ql_dbg_disc, vha, 0x2115,
- "Delaying session delete for FCP2 portid=%06x %8phC ",
- fcport->d_id.b24, fcport->port_name);
- return;
+ switch (ea->id.b.rsvd_1) {
+ case RSCN_PORT_ADDR:
+ fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ if (fcport) {
+ if (fcport->flags & FCF_FCP2_DEVICE) {
+ ql_dbg(ql_dbg_disc, vha, 0x2115,
+ "Delaying session delete for FCP2 portid=%06x %8phC ",
+ fcport->d_id.b24, fcport->port_name);
+ return;
+ }
+
+ if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) {
+ /*
+ * On ipsec start by remote port, Target port
+ * may use RSCN to trigger initiator to
+ * relogin. If driver is already in the
+ * process of a relogin, then ignore the RSCN
+ * and allow the current relogin to continue.
+ * This reduces thrashing of the connection.
+ */
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ /*
+ * If state = online, then set scan_needed=1 to do relogin.
+ * Otherwise we're already in the middle of a relogin
+ */
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+ } else {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+ }
+ break;
+ case RSCN_AREA_ADDR:
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ continue;
+
+ if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+ }
+ break;
+ case RSCN_DOM_ADDR:
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ continue;
+
+ if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+ }
+ break;
+ case RSCN_FAB_ADDR:
+ default:
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ continue;
+
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
}
- fcport->scan_needed = 1;
- fcport->rscn_gen++;
+ break;
}
spin_lock_irqsave(&vha->work_lock, flags);
@@ -4187,7 +4240,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
* fw shal not send PRLI after PLOGI Acc
*/
if (ha->flags.edif_enabled &&
- vha->e_dbell.db_flags & EDB_ACTIVE) {
+ DBELL_ACTIVE(vha)) {
ha->fw_options[3] |= BIT_15;
ha->flags.n2n_fw_acc_sec = 1;
} else {
@@ -4433,6 +4486,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
+ /* ELS pass through payload is limit by frame size. */
+ if (ha->flags.edif_enabled)
+ mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
+
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
next_check:
if (rval) {
@@ -5339,8 +5396,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
* use link up to wake up app to get ready for
* authentication.
*/
- if (ha->flags.edif_enabled &&
- !(vha->e_dbell.db_flags & EDB_ACTIVE))
+ if (ha->flags.edif_enabled && DBELL_INACTIVE(vha))
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
ha->link_data_rate);
@@ -5833,6 +5889,10 @@ void qla_register_fcport_fn(struct work_struct *work)
qla2x00_update_fcport(fcport->vha, fcport);
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
+ "%s rscn gen %d/%d next DS %d\n", __func__,
+ rscn_gen, fcport->rscn_gen, fcport->next_disc_state);
+
if (rscn_gen != fcport->rscn_gen) {
/* RSCN(s) came in while registration */
switch (fcport->next_disc_state) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 9d4ad1d2b00a29..ed604f2185bf25 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -3034,8 +3034,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
elsio->u.els_plogi.els_cmd = els_opcode;
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
- if (els_opcode == ELS_DCMD_PLOGI && vha->hw->flags.edif_enabled &&
- vha->e_dbell.db_flags & EDB_ACTIVE) {
+ if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
struct fc_els_flogi *p = ptr;
p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index b26f2699adb270..aaf6504570fdd0 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2233,6 +2233,10 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
}
} else if (comp_status == CS_PORT_LOGGED_OUT) {
+ ql_dbg(ql_dbg_disc, vha, 0x911e,
+ "%s %d schedule session deletion\n",
+ __func__, __LINE__);
+
els->u.els_plogi.len = 0;
res = DID_IMM_RETRY << 16;
qlt_schedule_sess_for_deletion(sp->fcport);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 6e920da64863ee..350b0c4346fb61 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -739,29 +739,6 @@ qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag)
}
int
-qlafx00_loop_reset(scsi_qla_host_t *vha)
-{
- int ret;
- struct fc_port *fcport;
- struct qla_hw_data *ha = vha->hw;
-
- if (ql2xtargetreset) {
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->port_type != FCT_TARGET)
- continue;
-
- ret = ha->isp_ops->target_reset(fcport, 0, 0);
- if (ret != QLA_SUCCESS) {
- ql_dbg(ql_dbg_taskm, vha, 0x803d,
- "Bus Reset failed: Reset=%d "
- "d_id=%x.\n", ret, fcport->d_id.b24);
- }
- }
- }
- return QLA_SUCCESS;
-}
-
-int
qlafx00_iospace_config(struct qla_hw_data *ha)
{
if (pci_request_selected_regions(ha->pdev, ha->bars,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c3ff50ffe2055a..abcd3091726380 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -202,12 +202,6 @@ MODULE_PARM_DESC(ql2xdbwr,
" 0 -- Regular doorbell.\n"
" 1 -- CAMRAM doorbell (faster).\n");
-int ql2xtargetreset = 1;
-module_param(ql2xtargetreset, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xtargetreset,
- "Enable target reset."
- "Default is 1 - use hw defaults.");
-
int ql2xgffidenable;
module_param(ql2xgffidenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xgffidenable,
@@ -1695,27 +1689,10 @@ int
qla2x00_loop_reset(scsi_qla_host_t *vha)
{
int ret;
- struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLAFX00(ha)) {
- return qlafx00_loop_reset(vha);
- }
-
- if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->port_type != FCT_TARGET)
- continue;
-
- ret = ha->isp_ops->target_reset(fcport, 0, 0);
- if (ret != QLA_SUCCESS) {
- ql_dbg(ql_dbg_taskm, vha, 0x802c,
- "Bus Reset failed: Reset=%d "
- "d_id=%x.\n", ret, fcport->d_id.b24);
- }
- }
- }
-
+ if (IS_QLAFX00(ha))
+ return QLA_SUCCESS;
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -3908,13 +3885,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
static inline void
qla24xx_free_purex_list(struct purex_list *list)
{
- struct list_head *item, *next;
+ struct purex_item *item, *next;
ulong flags;
spin_lock_irqsave(&list->lock, flags);
- list_for_each_safe(item, next, &list->head) {
- list_del(item);
- kfree(list_entry(item, struct purex_item, list));
+ list_for_each_entry_safe(item, next, &list->head, list) {
+ list_del(&item->list);
+ kfree(item);
}
spin_unlock_irqrestore(&list->lock, flags);
}
@@ -4375,7 +4352,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
/* allocate the purex dma pool */
ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
- MAX_PAYLOAD, 8, 0);
+ ELS_MAX_PAYLOAD, 8, 0);
if (!ha->purex_dma_pool) {
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 7d8242c120fc77..8993d438e0b72a 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1003,6 +1003,7 @@ void qlt_free_session_done(struct work_struct *work)
"%s bypassing release_all_sadb\n",
__func__);
}
+ qla_edif_clear_appdata(vha, sess);
qla_edif_sess_down(vha, sess);
}
qla2x00_mark_device_lost(vha, sess, 0);
@@ -4812,7 +4813,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
}
if (vha->hw->flags.edif_enabled) {
- if (!(vha->e_dbell.db_flags & EDB_ACTIVE)) {
+ if (DBELL_INACTIVE(vha)) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s %d Term INOT due to app not started lid=%d, NportID %06X ",
__func__, __LINE__, loop_id, port_id.b24);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4b117165bf8b27..27e440f8a70216 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.100-k"
+#define QLA2XXX_VERSION "10.02.07.200-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 100
+#define QLA_DRIVER_BETA_VER 200
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 06e7266018c70a..1d0278da904139 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -4259,6 +4259,8 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
+ if (vnum == 0)
+ return 0; /* not an error */
a_num = is_bytchk3 ? 1 : vnum;
/* Treat following check like one for read (i.e. no write) access */
ret = check_device_access_params(scp, lba, a_num, false);
@@ -4322,6 +4324,8 @@ static int resp_report_zones(struct scsi_cmnd *scp,
}
zs_lba = get_unaligned_be64(cmd + 2);
alloc_len = get_unaligned_be32(cmd + 10);
+ if (alloc_len == 0)
+ return 0; /* not an error */
rep_opts = cmd[14] & 0x3f;
partial = cmd[14] & 0x80;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a531336f6a0a7e..2371edbc3af4bc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -133,6 +133,23 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
return true;
}
+static void scsi_eh_complete_abort(struct scsi_cmnd *scmd, struct Scsi_Host *shost)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del_init(&scmd->eh_entry);
+ /*
+ * If the abort succeeds, and there is no further
+ * EH action, clear the ->last_reset time.
+ */
+ if (list_empty(&shost->eh_abort_list) &&
+ list_empty(&shost->eh_cmd_q))
+ if (shost->eh_deadline != -1)
+ shost->last_reset = 0;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
@@ -150,6 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work)
container_of(work, struct scsi_cmnd, abort_work.work);
struct scsi_device *sdev = scmd->device;
enum scsi_disposition rtn;
+ unsigned long flags;
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
@@ -173,12 +191,14 @@ scmd_eh_abort_handler(struct work_struct *work)
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"retry aborted command\n"));
+ scsi_eh_complete_abort(scmd, sdev->host);
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
return;
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"finish aborted command\n"));
+ scsi_eh_complete_abort(scmd, sdev->host);
scsi_finish_command(scmd);
return;
}
@@ -191,6 +211,9 @@ scmd_eh_abort_handler(struct work_struct *work)
}
}
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ list_del_init(&scmd->eh_entry);
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
scsi_eh_scmd_add(scmd);
}
@@ -221,6 +244,8 @@ scsi_abort_command(struct scsi_cmnd *scmd)
spin_lock_irqsave(shost->host_lock, flags);
if (shost->eh_deadline != -1 && !shost->last_reset)
shost->last_reset = jiffies;
+ BUG_ON(!list_empty(&scmd->eh_entry));
+ list_add_tail(&scmd->eh_entry, &shost->eh_abort_list);
spin_unlock_irqrestore(shost->host_lock, flags);
scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 34412eac4566b4..400df3354cd647 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -347,6 +347,8 @@ static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
{
struct scsi_request *req = scsi_req(rq);
+ if (hdr->cmd_len < 6)
+ return -EMSGSIZE;
if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
if (!scsi_cmd_allowed(req->cmd, mode))
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b731c2983515a2..621d841d819a37 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1153,6 +1153,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
cmd->sense_buffer = buf;
cmd->prot_sdb = prot;
cmd->flags = flags;
+ INIT_LIST_HEAD(&cmd->eh_entry);
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies_at_alloc;
cmd->retries = retries;
@@ -1184,8 +1185,6 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
}
cmd->cmd_len = scsi_req(req)->cmd_len;
- if (cmd->cmd_len == 0)
- cmd->cmd_len = scsi_command_size(cmd->cmnd);
cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = scsi_req(req)->retries;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d3d362289ecc79..55addd78fde442 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1383,6 +1383,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
* We're treating error on bsg register as non-fatal, so
* pretend nothing went wrong.
*/
+ error = PTR_ERR(sdev->bsg_dev);
sdev_printk(KERN_INFO, sdev,
"Failed to register bsg queue, errno=%d\n",
error);
@@ -1580,7 +1581,6 @@ static struct device_type scsi_dev_type = {
void scsi_sysfs_device_initialize(struct scsi_device *sdev)
{
- int i, j = 0;
unsigned long flags;
struct Scsi_Host *shost = sdev->host;
struct scsi_host_template *hostt = shost->hostt;
@@ -1592,15 +1592,7 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
scsi_enable_async_suspend(&sdev->sdev_gendev);
dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
- sdev->gendev_attr_groups[j++] = &scsi_sdev_attr_group;
- if (hostt->sdev_groups) {
- for (i = 0; hostt->sdev_groups[i] &&
- j < ARRAY_SIZE(sdev->gendev_attr_groups);
- i++, j++) {
- sdev->gendev_attr_groups[j] = hostt->sdev_groups[i];
- }
- }
- WARN_ON_ONCE(j >= ARRAY_SIZE(sdev->gendev_attr_groups));
+ sdev->sdev_gendev.groups = hostt->sdev_groups;
device_initialize(&sdev->sdev_dev);
sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 50f8dc960ae2c3..8e4af111c07878 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -693,7 +693,6 @@ static int sr_probe(struct device *dev)
cd->device = sdev;
cd->disk = disk;
cd->driver = &sr_template;
- cd->disk = disk;
cd->capacity = 0x1fffff;
cd->device->changed = 1; /* force recheck CD type */
cd->media_present = 1;
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index 4e1ff209b9336e..4a0bbcf1757aba 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -8,6 +8,18 @@
static struct dentry *ufs_debugfs_root;
+struct ufs_debugfs_attr {
+ const char *name;
+ mode_t mode;
+ const struct file_operations *fops;
+};
+
+/* @file corresponds to a debugfs attribute in directory hba->debugfs_root. */
+static inline struct ufs_hba *hba_from_file(const struct file *file)
+{
+ return d_inode(file->f_path.dentry->d_parent)->i_private;
+}
+
void __init ufs_debugfs_init(void)
{
ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
@@ -20,7 +32,7 @@ void ufs_debugfs_exit(void)
static int ufs_debugfs_stats_show(struct seq_file *s, void *data)
{
- struct ufs_hba *hba = s->private;
+ struct ufs_hba *hba = hba_from_file(s->file);
struct ufs_event_hist *e = hba->ufs_stats.event;
#define PRT(fmt, typ) \
@@ -126,13 +138,93 @@ static void ufs_debugfs_restart_ee(struct work_struct *work)
ufs_debugfs_put_user_access(hba);
}
+static int ufs_saved_err_show(struct seq_file *s, void *data)
+{
+ struct ufs_debugfs_attr *attr = s->private;
+ struct ufs_hba *hba = hba_from_file(s->file);
+ const int *p;
+
+ if (strcmp(attr->name, "saved_err") == 0) {
+ p = &hba->saved_err;
+ } else if (strcmp(attr->name, "saved_uic_err") == 0) {
+ p = &hba->saved_uic_err;
+ } else {
+ return -ENOENT;
+ }
+
+ seq_printf(s, "%d\n", *p);
+ return 0;
+}
+
+static ssize_t ufs_saved_err_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ufs_debugfs_attr *attr = file->f_inode->i_private;
+ struct ufs_hba *hba = hba_from_file(file);
+ char val_str[16] = { };
+ int val, ret;
+
+ if (count > sizeof(val_str))
+ return -EINVAL;
+ if (copy_from_user(val_str, buf, count))
+ return -EFAULT;
+ ret = kstrtoint(val_str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irq(hba->host->host_lock);
+ if (strcmp(attr->name, "saved_err") == 0) {
+ hba->saved_err = val;
+ } else if (strcmp(attr->name, "saved_uic_err") == 0) {
+ hba->saved_uic_err = val;
+ } else {
+ ret = -ENOENT;
+ }
+ if (ret == 0)
+ ufshcd_schedule_eh_work(hba);
+ spin_unlock_irq(hba->host->host_lock);
+
+ return ret < 0 ? ret : count;
+}
+
+static int ufs_saved_err_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufs_saved_err_show, inode->i_private);
+}
+
+static const struct file_operations ufs_saved_err_fops = {
+ .owner = THIS_MODULE,
+ .open = ufs_saved_err_open,
+ .read = seq_read,
+ .write = ufs_saved_err_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct ufs_debugfs_attr ufs_attrs[] = {
+ { "stats", 0400, &ufs_debugfs_stats_fops },
+ { "saved_err", 0600, &ufs_saved_err_fops },
+ { "saved_uic_err", 0600, &ufs_saved_err_fops },
+ { }
+};
+
void ufs_debugfs_hba_init(struct ufs_hba *hba)
{
+ const struct ufs_debugfs_attr *attr;
+ struct dentry *root;
+
/* Set default exception event rate limit period to 20ms */
hba->debugfs_ee_rate_limit_ms = 20;
INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee);
- hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
- debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops);
+
+ root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
+ if (IS_ERR_OR_NULL(root))
+ return;
+ hba->debugfs_root = root;
+ d_inode(root)->i_private = hba;
+ for (attr = ufs_attrs; attr->name; attr++)
+ debugfs_create_file(attr->name, attr->mode, root, (void *)attr,
+ attr->fops);
debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root,
hba, &ee_usr_mask_fops);
debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index 30d0c1aba0c7ce..cd26bc82462ed0 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
@@ -48,10 +50,11 @@
#define HCI_ERR_EN_T_LAYER 0x84
#define HCI_ERR_EN_DME_LAYER 0x88
#define HCI_CLKSTOP_CTRL 0xB0
+#define REFCLKOUT_STOP BIT(4)
#define REFCLK_STOP BIT(2)
#define UNIPRO_MCLK_STOP BIT(1)
#define UNIPRO_PCLK_STOP BIT(0)
-#define CLK_STOP_MASK (REFCLK_STOP |\
+#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
UNIPRO_MCLK_STOP |\
UNIPRO_PCLK_STOP)
#define HCI_MISC 0xB4
@@ -74,6 +77,52 @@
UIC_TRANSPORT_NO_CONNECTION_RX |\
UIC_TRANSPORT_BAD_TC)
+/* FSYS UFS Shareability */
+#define UFS_WR_SHARABLE BIT(2)
+#define UFS_RD_SHARABLE BIT(1)
+#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
+#define UFS_SHAREABILITY_OFFSET 0x710
+
+/* Multi-host registers */
+#define MHCTRL 0xC4
+#define MHCTRL_EN_VH_MASK (0xE)
+#define MHCTRL_EN_VH(vh) (vh << 1)
+#define PH2VH_MBOX 0xD8
+
+#define MH_MSG_MASK (0xFF)
+
+#define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF))
+#define MH_MSG_PH_READY 0x1
+#define MH_MSG_VH_READY 0x2
+
+#define ALLOW_INQUIRY BIT(25)
+#define ALLOW_MODE_SELECT BIT(24)
+#define ALLOW_MODE_SENSE BIT(23)
+#define ALLOW_PRE_FETCH GENMASK(22, 21)
+#define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */
+#define ALLOW_READ_BUFFER BIT(17)
+#define ALLOW_READ_CAPACITY GENMASK(16, 15)
+#define ALLOW_REPORT_LUNS BIT(14)
+#define ALLOW_REQUEST_SENSE BIT(13)
+#define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7)
+#define ALLOW_TEST_UNIT_READY BIT(6)
+#define ALLOW_UNMAP BIT(5)
+#define ALLOW_VERIFY BIT(4)
+#define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */
+
+#define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \
+ ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \
+ ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \
+ ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \
+ ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \
+ ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \
+ ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL)
+
+#define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C
+#define HCI_MH_IID_IN_TASK_TAG 0X308
+
+#define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC)
+
enum {
UNIPRO_L1_5 = 0,/* PHY Adapter */
UNIPRO_L2, /* Data Link */
@@ -149,6 +198,117 @@ static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
return 0;
}
+static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
+{
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+
+ /* IO Coherency setting */
+ if (ufs->sysreg) {
+ return regmap_update_bits(ufs->sysreg,
+ ufs->shareability_reg_offset,
+ UFS_SHARABLE, UFS_SHARABLE);
+ }
+
+ attr->tx_dif_p_nsec = 3200000;
+
+ return 0;
+}
+
+static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ /* Enable Virtual Host #1 */
+ ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL);
+ /* Default VH Transfer permissions */
+ hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH);
+ /* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */
+ hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG);
+
+ return 0;
+}
+
+static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ int i;
+ u32 tx_line_reset_period, rx_line_reset_period;
+
+ rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
+ tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i),
+ (rx_line_reset_period >> 16) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i),
+ (rx_line_reset_period >> 8) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i),
+ (rx_line_reset_period) & 0xFF);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6);
+ }
+
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ /* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i),
+ 0x02);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i),
+ (tx_line_reset_period >> 16) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i),
+ (tx_line_reset_period >> 8) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i),
+ (tx_line_reset_period) & 0xFF);
+
+ /* TX PWM Gear Capability / PWM_G1_ONLY */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000);
+
+ return 0;
+}
+
+static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ /* PACP_PWR_req and delivered to the remote DME */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
+
+ return 0;
+}
+
+static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+ u32 enabled_vh;
+
+ enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK;
+
+ /* Send physical host ready message to virtual hosts */
+ ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX);
+
+ return 0;
+}
+
static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
@@ -793,6 +953,27 @@ static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
}
}
+static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ if (!ufs)
+ return 0;
+
+ if (on && status == PRE_CHANGE) {
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+ exynos_ufs_disable_auto_ctrl_hcc(ufs);
+ exynos_ufs_ungate_clks(ufs);
+ } else if (!on && status == POST_CHANGE) {
+ exynos_ufs_gate_clks(ufs);
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+ exynos_ufs_enable_auto_ctrl_hcc(ufs);
+ }
+
+ return 0;
+}
+
static int exynos_ufs_pre_link(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -808,8 +989,12 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
/* m-phy */
exynos_ufs_phy_init(ufs);
- exynos_ufs_config_phy_time_attr(ufs);
- exynos_ufs_config_phy_cap_attr(ufs);
+ if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
+ exynos_ufs_config_phy_time_attr(ufs);
+ exynos_ufs_config_phy_cap_attr(ufs);
+ }
+
+ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
if (ufs->drv_data->pre_link)
ufs->drv_data->pre_link(ufs);
@@ -893,17 +1078,10 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
{
struct device_node *np = dev->of_node;
- struct exynos_ufs_drv_data *drv_data = &exynos_ufs_drvs;
struct exynos_ufs_uic_attr *attr;
int ret = 0;
- while (drv_data->compatible) {
- if (of_device_is_compatible(np, drv_data->compatible)) {
- ufs->drv_data = drv_data;
- break;
- }
- drv_data++;
- }
+ ufs->drv_data = device_get_match_data(dev);
if (ufs->drv_data && ufs->drv_data->uic_attr) {
attr = ufs->drv_data->uic_attr;
@@ -913,6 +1091,17 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
goto out;
}
+ ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
+ if (IS_ERR(ufs->sysreg))
+ ufs->sysreg = NULL;
+ else {
+ if (of_property_read_u32_index(np, "samsung,sysreg", 1,
+ &ufs->shareability_reg_offset)) {
+ dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
+ ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
+ }
+ }
+
ufs->pclk_avail_min = PCLK_AVAIL_MIN;
ufs->pclk_avail_max = PCLK_AVAIL_MAX;
@@ -927,6 +1116,18 @@ out:
return ret;
}
+static inline void exynos_ufs_priv_init(struct ufs_hba *hba,
+ struct exynos_ufs *ufs)
+{
+ ufs->hba = hba;
+ ufs->opts = ufs->drv_data->opts;
+ ufs->rx_sel_idx = PA_MAXDATALANES;
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
+ ufs->rx_sel_idx = 0;
+ hba->priv = (void *)ufs;
+ hba->quirks = ufs->drv_data->quirks;
+}
+
static int exynos_ufs_init(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
@@ -976,13 +1177,8 @@ static int exynos_ufs_init(struct ufs_hba *hba)
if (ret)
goto phy_off;
- ufs->hba = hba;
- ufs->opts = ufs->drv_data->opts;
- ufs->rx_sel_idx = PA_MAXDATALANES;
- if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
- ufs->rx_sel_idx = 0;
- hba->priv = (void *)ufs;
- hba->quirks = ufs->drv_data->quirks;
+ exynos_ufs_priv_init(hba, ufs);
+
if (ufs->drv_data->drv_init) {
ret = ufs->drv_data->drv_init(dev, ufs);
if (ret) {
@@ -1110,6 +1306,12 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
+ if (ufs->drv_data->pre_hce_enable) {
+ ret = ufs->drv_data->pre_hce_enable(ufs);
+ if (ret)
+ return ret;
+ }
+
ret = exynos_ufs_host_reset(hba);
if (ret)
return ret;
@@ -1119,6 +1321,10 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
exynos_ufs_calc_pwm_clk_div(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL))
exynos_ufs_enable_auto_ctrl_hcc(ufs);
+
+ if (ufs->drv_data->post_hce_enable)
+ ret = ufs->drv_data->post_hce_enable(ufs);
+
break;
}
@@ -1202,12 +1408,77 @@ static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
return 0;
}
+static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ if (status == POST_CHANGE) {
+ ufshcd_set_link_active(hba);
+ ufshcd_set_ufs_dev_active(hba);
+ }
+
+ return 0;
+}
+
+static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba)
+{
+ u32 mbox;
+ ktime_t start, stop;
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS));
+
+ do {
+ mbox = ufshcd_readl(hba, PH2VH_MBOX);
+ /* TODO: Mailbox message protocols between the PH and VHs are
+ * not implemented yet. This will be supported later
+ */
+ if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY)
+ return 0;
+
+ usleep_range(40, 50);
+ } while (ktime_before(ktime_get(), stop));
+
+ return -ETIME;
+}
+
+static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_ufs *ufs;
+ int ret;
+
+ ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
+ if (!ufs)
+ return -ENOMEM;
+
+ /* exynos-specific hci */
+ ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
+ if (IS_ERR(ufs->reg_hci)) {
+ dev_err(dev, "cannot ioremap for hci vendor register\n");
+ return PTR_ERR(ufs->reg_hci);
+ }
+
+ ret = exynosauto_ufs_vh_wait_ph_ready(hba);
+ if (ret)
+ return ret;
+
+ ufs->drv_data = device_get_match_data(dev);
+ if (!ufs->drv_data)
+ return -ENODEV;
+
+ exynos_ufs_priv_init(hba, ufs);
+
+ return 0;
+}
+
static struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
+ .setup_clocks = exynos_ufs_setup_clocks,
.setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
.setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req,
.hibern8_notify = exynos_ufs_hibern8_notify,
@@ -1215,12 +1486,24 @@ static struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.resume = exynos_ufs_resume,
};
+static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
+ .name = "exynosauto_ufs_vh",
+ .init = exynosauto_ufs_vh_init,
+ .link_startup_notify = exynosauto_ufs_vh_link_startup_notify,
+};
+
static int exynos_ufs_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
+ const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops;
+ const struct exynos_ufs_drv_data *drv_data =
+ device_get_match_data(dev);
- err = ufshcd_pltfrm_init(pdev, &ufs_hba_exynos_ops);
+ if (drv_data && drv_data->vops)
+ vops = drv_data->vops;
+
+ err = ufshcd_pltfrm_init(pdev, vops);
if (err)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
@@ -1261,8 +1544,35 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = {
.pa_dbg_option_suite = 0x30103,
};
+static struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
+ .uic_attr = &exynos7_uic_attr,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
+ .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .drv_init = exynosauto_ufs_drv_init,
+ .post_hce_enable = exynosauto_ufs_post_hce_enable,
+ .pre_link = exynosauto_ufs_pre_link,
+ .pre_pwr_change = exynosauto_ufs_pre_pwr_change,
+ .post_pwr_change = exynosauto_ufs_post_pwr_change,
+};
+
+static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
+ .vops = &ufs_hba_exynosauto_vh_ops,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCI_QUIRK_BROKEN_HCE |
+ UFSHCD_QUIRK_BROKEN_UIC_CMD |
+ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
+ .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+};
+
static struct exynos_ufs_drv_data exynos_ufs_drvs = {
- .compatible = "samsung,exynos7-ufs",
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
@@ -1287,6 +1597,10 @@ static struct exynos_ufs_drv_data exynos_ufs_drvs = {
static const struct of_device_id exynos_ufs_of_match[] = {
{ .compatible = "samsung,exynos7-ufs",
.data = &exynos_ufs_drvs },
+ { .compatible = "samsung,exynosautov9-ufs",
+ .data = &exynosauto_ufs_drvs },
+ { .compatible = "samsung,exynosautov9-ufs-vh",
+ .data = &exynosauto_ufs_vh_drvs },
{},
};
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
index dadf4fd10dd805..1c33e5466082bf 100644
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ b/drivers/scsi/ufs/ufs-exynos.h
@@ -56,6 +56,22 @@
#define TX_GRAN_NVAL_10_08 0x0296
#define TX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3)
+#define VND_TX_CLK_PRD 0xAA
+#define VND_TX_CLK_PRD_EN 0xA9
+#define VND_TX_LINERESET_PVALUE0 0xAD
+#define VND_TX_LINERESET_PVALUE1 0xAC
+#define VND_TX_LINERESET_PVALUE2 0xAB
+
+#define TX_LINE_RESET_TIME 3200
+
+#define VND_RX_CLK_PRD 0x12
+#define VND_RX_CLK_PRD_EN 0x11
+#define VND_RX_LINERESET_VALUE0 0x1D
+#define VND_RX_LINERESET_VALUE1 0x1C
+#define VND_RX_LINERESET_VALUE2 0x1B
+
+#define RX_LINE_RESET_TIME 1000
+
#define RX_FILLER_ENABLE 0x0316
#define RX_FILLER_EN (1 << 1)
#define RX_LINERESET_VAL 0x0317
@@ -99,7 +115,7 @@ struct exynos_ufs;
#define PA_HIBERN8TIME_VAL 0x20
#define PCLK_AVAIL_MIN 70000000
-#define PCLK_AVAIL_MAX 133000000
+#define PCLK_AVAIL_MAX 167000000
struct exynos_ufs_uic_attr {
/* TX Attributes */
@@ -142,7 +158,7 @@ struct exynos_ufs_uic_attr {
};
struct exynos_ufs_drv_data {
- char *compatible;
+ const struct ufs_hba_variant_ops *vops;
struct exynos_ufs_uic_attr *uic_attr;
unsigned int quirks;
unsigned int opts;
@@ -154,6 +170,8 @@ struct exynos_ufs_drv_data {
struct ufs_pa_layer_attr *pwr);
int (*post_pwr_change)(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr);
+ int (*pre_hce_enable)(struct exynos_ufs *ufs);
+ int (*post_hce_enable)(struct exynos_ufs *ufs);
};
struct ufs_phy_time_cfg {
@@ -191,7 +209,9 @@ struct exynos_ufs {
struct ufs_pa_layer_attr dev_req_params;
struct ufs_phy_time_cfg t_cfg;
ktime_t entry_hibern8_t;
- struct exynos_ufs_drv_data *drv_data;
+ const struct exynos_ufs_drv_data *drv_data;
+ struct regmap *sysreg;
+ u32 shareability_reg_offset;
u32 opts;
#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0)
@@ -199,6 +219,7 @@ struct exynos_ufs {
#define EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL BIT(2)
#define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3)
#define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4)
+#define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5)
};
#define for_each_ufs_rx_lane(ufs, i) \
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 5c6a58a666d2f9..afd38142b1c028 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -132,6 +132,14 @@ enum {
UFSHCD_CAN_QUEUE = 32,
};
+static const char *const ufshcd_state_name[] = {
+ [UFSHCD_STATE_RESET] = "reset",
+ [UFSHCD_STATE_OPERATIONAL] = "operational",
+ [UFSHCD_STATE_ERROR] = "error",
+ [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
+ [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
+};
+
/* UFSHCD error handling flags */
enum {
UFSHCD_EH_IN_PROGRESS = (1 << 0),
@@ -236,7 +244,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
-static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -711,7 +718,7 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
* This function is used to get the OCS field from UTRD
* Returns the OCS field in the UTRD
*/
-static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
{
return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
}
@@ -2323,6 +2330,9 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
int ret;
unsigned long flags;
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
+ return 0;
+
ufshcd_hold(hba, false);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -2367,17 +2377,24 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
sizeof(struct ufshcd_sg_entry)));
else
lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ cpu_to_le16(sg_segments);
- prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+ prd_table = lrbp->ucd_prdt_ptr;
scsi_for_each_sg(cmd, sg, sg_segments, i) {
- prd_table[i].size =
- cpu_to_le32(((u32) sg_dma_len(sg))-1);
- prd_table[i].base_addr =
- cpu_to_le32(lower_32_bits(sg->dma_address));
- prd_table[i].upper_addr =
- cpu_to_le32(upper_32_bits(sg->dma_address));
+ const unsigned int len = sg_dma_len(sg);
+
+ /*
+ * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
+ * based value that indicates the length, in bytes, of
+ * the data block. A maximum of length of 256KB may
+ * exist for any entry. Bits 1:0 of this field shall be
+ * 11b to indicate Dword granularity. A value of '3'
+ * indicates 4 bytes, '7' indicates 8 bytes, etc."
+ */
+ WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
+ prd_table[i].size = cpu_to_le32(len - 1);
+ prd_table[i].addr = cpu_to_le64(sg->dma_address);
prd_table[i].reserved = 0;
}
} else {
@@ -2661,7 +2678,7 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
lrb->ucd_req_dma_addr = cmd_desc_element_addr;
lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
}
@@ -5084,7 +5101,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
int result = 0;
int scsi_status;
- int ocs;
+ enum utp_ocs ocs;
/* overall command status of utrd */
ocs = ufshcd_get_tr_ocs(lrbp);
@@ -5243,11 +5260,9 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
* @completed_reqs: bitmask that indicates which requests to complete
- * @retry_requests: whether to ask the SCSI core to retry completed requests
*/
static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs,
- bool retry_requests)
+ unsigned long completed_reqs)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
@@ -5263,8 +5278,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- result = retry_requests ? DID_BUS_BUSY << 16 :
- ufshcd_transfer_rsp_status(hba, lrbp);
+ result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
/* Mark completed command as NULL in LRB */
@@ -5290,14 +5304,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
- * @retry_requests: whether or not to ask to retry requests
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
- bool retry_requests)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs, flags;
u32 tr_doorbell;
@@ -5326,8 +5338,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs,
- retry_requests);
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
@@ -5826,13 +5837,7 @@ out:
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
- ufshcd_tmc_handler(hba);
-}
-
-static void ufshcd_retry_aborted_requests(struct ufs_hba *hba)
-{
- ufshcd_transfer_req_compl(hba, /*retry_requests=*/true);
+ ufshcd_transfer_req_compl(hba);
ufshcd_tmc_handler(hba);
}
@@ -5914,9 +5919,10 @@ static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
(hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
}
-/* host lock must be held before calling this func */
-static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+void ufshcd_schedule_eh_work(struct ufs_hba *hba)
{
+ lockdep_assert_held(hba->host->host_lock);
+
/* handle fatal errors only when link is not in error state */
if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
if (hba->force_reset || ufshcd_is_link_broken(hba) ||
@@ -6076,6 +6082,13 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
+ dev_info(hba->dev,
+ "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
+ __func__, ufshcd_state_name[hba->ufshcd_state],
+ hba->is_powered, hba->shutting_down, hba->saved_err,
+ hba->saved_uic_err, hba->force_reset,
+ ufshcd_is_link_broken(hba) ? "; link is broken" : "");
+
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
@@ -6170,6 +6183,8 @@ again:
err_xfer = true;
goto lock_skip_pending_xfer_clear;
}
+ dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1);
}
/* Clear pending task management requests */
@@ -6181,7 +6196,8 @@ again:
}
lock_skip_pending_xfer_clear:
- ufshcd_retry_aborted_requests(hba);
+ /* Complete the requests that are cleared by s/w */
+ ufshcd_complete_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = false;
@@ -6249,6 +6265,9 @@ skip_err_handling:
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_unprepare(hba);
up(&hba->host_sem);
+
+ dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
+ ufshcd_state_name[hba->ufshcd_state]);
}
/**
@@ -6473,7 +6492,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
+ retval |= ufshcd_transfer_req_compl(hba);
return retval;
}
@@ -6545,6 +6564,10 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL,
mask, 0, 1000, 1000);
+
+ dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
+ tag, err ? "succeeded" : "failed");
+
out:
return err;
}
@@ -6637,7 +6660,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
u8 tm_function, u8 *tm_response)
{
struct utp_task_req_desc treq = { { 0 }, };
- int ocs_value, err;
+ enum utp_ocs ocs_value;
+ int err;
/* Configure task request descriptor */
treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
@@ -6815,7 +6839,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
int err;
enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
struct utp_task_req_desc treq = { { 0 }, };
- int ocs_value;
+ enum utp_ocs ocs_value;
u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
switch (msgcode) {
@@ -6893,7 +6917,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
- __ufshcd_transfer_req_compl(hba, 1U << pos, false);
+ __ufshcd_transfer_req_compl(hba, 1U << pos);
}
}
@@ -7055,7 +7079,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
- __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false);
+ __ufshcd_transfer_req_compl(hba, 1UL << tag);
goto release;
}
@@ -7121,7 +7145,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshpb_reset_host(hba);
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
- ufshcd_retry_aborted_requests(hba);
+ ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
@@ -8002,6 +8026,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (ret)
goto out;
+ if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
+ goto out;
+
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
@@ -9772,6 +9799,11 @@ static int __init ufshcd_core_init(void)
{
int ret;
+ /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
+ static_assert(sizeof(struct utp_transfer_cmd_desc) ==
+ 2 * ALIGNED_UPIU_SIZE +
+ SG_ALL * sizeof(struct ufshcd_sg_entry));
+
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index dd765863b05fc5..54750d72c8fb09 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -589,6 +589,18 @@ enum ufshcd_quirks {
* This quirk allows only sg entries aligned with page size.
*/
UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
+
+ /*
+ * This quirk needs to be enabled if the host controller does not
+ * support UIC command
+ */
+ UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
+
+ /*
+ * This quirk needs to be enabled if the host controller cannot
+ * support physical host configuration.
+ */
+ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
};
enum ufshcd_caps {
@@ -1023,6 +1035,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
+void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static inline void check_upiu_size(void)
{
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index de95be5d11d4e3..6a295c88d850f8 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -389,7 +389,7 @@ enum {
};
/* Overall command status values */
-enum {
+enum utp_ocs {
OCS_SUCCESS = 0x0,
OCS_INVALID_CMD_TABLE_ATTR = 0x1,
OCS_INVALID_PRDT_ATTR = 0x2,
@@ -402,6 +402,9 @@ enum {
OCS_INVALID_CRYPTO_CONFIG = 0x9,
OCS_GENERAL_CRYPTO_ERROR = 0xA,
OCS_INVALID_COMMAND_STATUS = 0x0F,
+};
+
+enum {
MASK_OCS = 0x0F,
};
@@ -412,20 +415,18 @@ enum {
/**
* struct ufshcd_sg_entry - UFSHCI PRD Entry
- * @base_addr: Lower 32bit physical address DW-0
- * @upper_addr: Upper 32bit physical address DW-1
+ * @addr: Physical address; DW-0 and DW-1.
* @reserved: Reserved for future use DW-2
* @size: size of physical segment DW-3
*/
struct ufshcd_sg_entry {
- __le32 base_addr;
- __le32 upper_addr;
+ __le64 addr;
__le32 reserved;
__le32 size;
};
/**
- * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
+ * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
* @command_upiu: Command UPIU Frame address
* @response_upiu: Response UPIU Frame address
* @prd_table: Physical Region Descriptor
@@ -451,7 +452,7 @@ struct request_desc_header {
};
/**
- * struct utp_transfer_req_desc - UTRD structure
+ * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
* @header: UTRD header DW-0 to DW-3
* @command_desc_base_addr_lo: UCD base address low DW-4
* @command_desc_base_addr_hi: UCD base address high DW-5
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index f4eca441b43386..2e31e14138262c 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -394,8 +394,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
if (!ufshpb_is_supported_chunk(hpb, transfer_len))
return 0;
- WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
-
if (hpb->is_hcm) {
/*
* in host control mode, reads are the main source for
@@ -1572,7 +1570,7 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
if (ufshpb_is_legacy(hba))
hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
else
- hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
+ hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
hpb->lu_pinned_start = hpb_lu_info->pinned_start;
hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
@@ -2582,7 +2580,7 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
{
struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
int version, ret;
- u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
+ int max_single_cmd;
hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
@@ -2598,18 +2596,22 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
if (version == HPB_SUPPORT_LEGACY_VERSION)
hpb_dev_info->is_legacy = true;
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
- if (ret)
- dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
- __func__);
- hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
-
/*
* Get the number of user logical unit to check whether all
* scsi_device finish initialization
*/
hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
+
+ if (hpb_dev_info->is_legacy)
+ return;
+
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
+
+ if (ret)
+ hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
+ else
+ hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
}
void ufshpb_init(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
index f15d8fdbce2eff..b475dbd7898837 100644
--- a/drivers/scsi/ufs/ufshpb.h
+++ b/drivers/scsi/ufs/ufshpb.h
@@ -31,7 +31,6 @@
/* hpb support chunk size */
#define HPB_LEGACY_CHUNK_HIGH 1
-#define HPB_MULTI_CHUNK_LOW 7
#define HPB_MULTI_CHUNK_HIGH 255
/* hpb vender defined opcode */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index e7fcbc09f9dbc3..bac111456fa1de 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -50,15 +50,6 @@ EXPORT_SYMBOL(core_tmr_alloc_req);
void core_tmr_release_req(struct se_tmr_req *tmr)
{
- struct se_device *dev = tmr->tmr_dev;
- unsigned long flags;
-
- if (dev) {
- spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del_init(&tmr->tmr_list);
- spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
- }
-
kfree(tmr);
}
@@ -156,13 +147,6 @@ void core_tmr_abort_task(
se_cmd->state_active = false;
spin_unlock_irqrestore(&dev->queues[i].lock, flags);
- /*
- * Ensure that this ABORT request is visible to the LU
- * RESET code.
- */
- if (!tmr->tmr_dev)
- WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < 0);
-
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, TMR_ABORT_TASK,
&aborted_list);
@@ -234,6 +218,7 @@ static void core_tmr_drain_tmr_list(
}
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+ tmr_p->tmr_dev = NULL;
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4a2e749eb1826e..7838dc20f7130c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -676,6 +676,21 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
+static void target_remove_from_tmr_list(struct se_cmd *cmd)
+{
+ struct se_device *dev = NULL;
+ unsigned long flags;
+
+ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ dev = cmd->se_tmr_req->tmr_dev;
+
+ if (dev) {
+ spin_lock_irqsave(&dev->se_tmr_lock, flags);
+ if (cmd->se_tmr_req->tmr_dev)
+ list_del_init(&cmd->se_tmr_req->tmr_list);
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+ }
+}
/*
* This function is called by the target core after the target core has
* finished processing a SCSI command or SCSI TMF. Both the regular command
@@ -687,13 +702,6 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
unsigned long flags;
- target_remove_from_state_list(cmd);
-
- /*
- * Clear struct se_cmd->se_lun before the handoff to FE.
- */
- cmd->se_lun = NULL;
-
spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* Determine if frontend context caller is requesting the stopping of
@@ -728,8 +736,16 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
if (!lun)
return;
+ target_remove_from_state_list(cmd);
+ target_remove_from_tmr_list(cmd);
+
if (cmpxchg(&cmd->lun_ref_active, true, false))
percpu_ref_put(&lun->lun_ref);
+
+ /*
+ * Clear struct se_cmd->se_lun before the handoff to FE.
+ */
+ cmd->se_lun = NULL;
}
static void target_complete_failure_work(struct work_struct *work)
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
index a86521973dad4f..01008ae00e7f78 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "processor_thermal_device.h"
#define MBOX_CMD_WORKLOAD_TYPE_READ 0x0E
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a3311e93784754..4d326ee12c36a7 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2795,7 +2795,6 @@ int usb_add_hcd(struct usb_hcd *hcd,
{
int retval;
struct usb_device *rhdev;
- struct usb_hcd *shared_hcd;
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2956,26 +2955,13 @@ int usb_add_hcd(struct usb_hcd *hcd,
goto err_hcd_driver_start;
}
- /* starting here, usbcore will pay attention to the shared HCD roothub */
- shared_hcd = hcd->shared_hcd;
- if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
- retval = register_root_hub(shared_hcd);
- if (retval != 0)
- goto err_register_root_hub;
-
- if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
- usb_hcd_poll_rh_status(shared_hcd);
- }
-
/* starting here, usbcore will pay attention to this root hub */
- if (!HCD_DEFER_RH_REGISTER(hcd)) {
- retval = register_root_hub(hcd);
- if (retval != 0)
- goto err_register_root_hub;
+ retval = register_root_hub(hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
- usb_hcd_poll_rh_status(hcd);
- }
+ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+ usb_hcd_poll_rh_status(hcd);
return retval;
@@ -3013,7 +2999,6 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
void usb_remove_hcd(struct usb_hcd *hcd)
{
struct usb_device *rhdev = hcd->self.root_hub;
- bool rh_registered;
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
@@ -3024,7 +3009,6 @@ void usb_remove_hcd(struct usb_hcd *hcd)
dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
spin_lock_irq (&hcd_root_hub_lock);
- rh_registered = hcd->rh_registered;
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
@@ -3034,8 +3018,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
- if (rh_registered)
- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
+ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
/*
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a3f875eea75192..af946c42b6f0a0 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -257,7 +257,6 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
{
u16 temp;
- desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
@@ -292,6 +291,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
desc->bDescriptorType = USB_DT_HUB;
temp = 1 + (ports / 8);
desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp;
+ desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.8 says 20ms */
/* The Device Removable bits are reported on a byte granularity.
* If the port doesn't exist within that byte, the bit is set to 0.
@@ -344,6 +344,7 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
xhci_common_hub_descriptor(xhci, desc, ports);
desc->bDescriptorType = USB_DT_SS_HUB;
desc->bDescLength = USB_DT_SS_HUB_SIZE;
+ desc->bPwrOn2PwrGood = 50; /* usb 3.1 may fail if less than 100ms */
/* header decode latency should be zero for roothubs,
* see section 4.23.5.2.
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 541fe4dcc43a22..902f410874e8ea 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -692,7 +692,6 @@ int xhci_run(struct usb_hcd *hcd)
if (ret)
xhci_free_command(xhci, command);
}
- set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index f98e8f298bc19a..01fae2c96965c7 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -43,21 +43,6 @@ static void update_attr(u8 *dst, u8 *src, int attribute,
}
}
-static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int dy, int dx, int height, int width)
-{
- struct fb_copyarea area;
-
- area.sx = sx * vc->vc_font.width;
- area.sy = sy * vc->vc_font.height;
- area.dx = dx * vc->vc_font.width;
- area.dy = dy * vc->vc_font.height;
- area.height = height * vc->vc_font.height;
- area.width = width * vc->vc_font.width;
-
- info->fbops->fb_copyarea(info, &area);
-}
-
static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
@@ -393,7 +378,6 @@ static int bit_update_start(struct fb_info *info)
void fbcon_set_bitops(struct fbcon_ops *ops)
{
- ops->bmove = bit_bmove;
ops->clear = bit_clear;
ops->putcs = bit_putcs;
ops->clear_margins = bit_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 22bb3892f6bd1d..99ecd9a6d844ad 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -173,8 +173,6 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos);
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
static void fbcon_cursor(struct vc_data *vc, int mode);
-static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
- int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
@@ -182,16 +180,8 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
/*
* Internal routines
*/
-static __inline__ void ywrap_up(struct vc_data *vc, int count);
-static __inline__ void ywrap_down(struct vc_data *vc, int count);
-static __inline__ void ypan_up(struct vc_data *vc, int count);
-static __inline__ void ypan_down(struct vc_data *vc, int count);
-static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
- int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
int unit);
-static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
- int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
static void fbcon_start(void);
@@ -1136,14 +1126,6 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->graphics = 0;
/*
- * No more hw acceleration for fbcon.
- *
- * FIXME: Garbage collect all the now dead code after sufficient time
- * has passed.
- */
- p->scrollmode = SCROLL_REDRAW;
-
- /*
* ++guenther: console.c:vc_allocate() relies on initializing
* vc_{cols,rows}, but we must not set those if we are only
* resizing the console.
@@ -1229,14 +1211,13 @@ finished:
* This system is now divided into two levels because of complications
* caused by hardware scrolling. Top level functions:
*
- * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
+ * fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
*
* handles y values in range [0, scr_height-1] that correspond to real
* screen positions. y_wrap shift means that first line of bitmap may be
* anywhere on this display. These functions convert lineoffsets to
* bitmap offsets and deal with the wrap-around case by splitting blits.
*
- * fbcon_bmove_physical_8() -- These functions fast implementations
* fbcon_clear_physical_8() -- of original fbcon_XXX fns.
* fbcon_putc_physical_8() -- (font width != 8) may be added later
*
@@ -1409,224 +1390,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
}
}
-static __inline__ void ywrap_up(struct vc_data *vc, int count)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- struct fbcon_display *p = &fb_display[vc->vc_num];
-
- p->yscroll += count;
- if (p->yscroll >= p->vrows) /* Deal with wrap */
- p->yscroll -= p->vrows;
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode |= FB_VMODE_YWRAP;
- ops->update_start(info);
- scrollback_max += count;
- if (scrollback_max > scrollback_phys_max)
- scrollback_max = scrollback_phys_max;
- scrollback_current = 0;
-}
-
-static __inline__ void ywrap_down(struct vc_data *vc, int count)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- struct fbcon_display *p = &fb_display[vc->vc_num];
-
- p->yscroll -= count;
- if (p->yscroll < 0) /* Deal with wrap */
- p->yscroll += p->vrows;
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode |= FB_VMODE_YWRAP;
- ops->update_start(info);
- scrollback_max -= count;
- if (scrollback_max < 0)
- scrollback_max = 0;
- scrollback_current = 0;
-}
-
-static __inline__ void ypan_up(struct vc_data *vc, int count)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
-
- p->yscroll += count;
- if (p->yscroll > p->vrows - vc->vc_rows) {
- ops->bmove(vc, info, p->vrows - vc->vc_rows,
- 0, 0, 0, vc->vc_rows, vc->vc_cols);
- p->yscroll -= p->vrows - vc->vc_rows;
- }
-
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
- fbcon_clear_margins(vc, 1);
- scrollback_max += count;
- if (scrollback_max > scrollback_phys_max)
- scrollback_max = scrollback_phys_max;
- scrollback_current = 0;
-}
-
-static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- struct fbcon_display *p = &fb_display[vc->vc_num];
-
- p->yscroll += count;
-
- if (p->yscroll > p->vrows - vc->vc_rows) {
- p->yscroll -= p->vrows - vc->vc_rows;
- fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
- }
-
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
- fbcon_clear_margins(vc, 1);
- scrollback_max += count;
- if (scrollback_max > scrollback_phys_max)
- scrollback_max = scrollback_phys_max;
- scrollback_current = 0;
-}
-
-static __inline__ void ypan_down(struct vc_data *vc, int count)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
-
- p->yscroll -= count;
- if (p->yscroll < 0) {
- ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
- 0, vc->vc_rows, vc->vc_cols);
- p->yscroll += p->vrows - vc->vc_rows;
- }
-
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
- fbcon_clear_margins(vc, 1);
- scrollback_max -= count;
- if (scrollback_max < 0)
- scrollback_max = 0;
- scrollback_current = 0;
-}
-
-static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- struct fbcon_display *p = &fb_display[vc->vc_num];
-
- p->yscroll -= count;
-
- if (p->yscroll < 0) {
- p->yscroll += p->vrows - vc->vc_rows;
- fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
- }
-
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
- fbcon_clear_margins(vc, 1);
- scrollback_max -= count;
- if (scrollback_max < 0)
- scrollback_max = 0;
- scrollback_current = 0;
-}
-
-static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
- int line, int count, int dy)
-{
- unsigned short *s = (unsigned short *)
- (vc->vc_origin + vc->vc_size_row * line);
-
- while (count--) {
- unsigned short *start = s;
- unsigned short *le = advance_row(s, 1);
- unsigned short c;
- int x = 0;
- unsigned short attr = 1;
-
- do {
- c = scr_readw(s);
- if (attr != (c & 0xff00)) {
- attr = c & 0xff00;
- if (s > start) {
- fbcon_putcs(vc, start, s - start,
- dy, x);
- x += s - start;
- start = s;
- }
- }
- console_conditional_schedule();
- s++;
- } while (s < le);
- if (s > start)
- fbcon_putcs(vc, start, s - start, dy, x);
- console_conditional_schedule();
- dy++;
- }
-}
-
-static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
- struct fbcon_display *p, int line, int count, int ycount)
-{
- int offset = ycount * vc->vc_cols;
- unsigned short *d = (unsigned short *)
- (vc->vc_origin + vc->vc_size_row * line);
- unsigned short *s = d + offset;
- struct fbcon_ops *ops = info->fbcon_par;
-
- while (count--) {
- unsigned short *start = s;
- unsigned short *le = advance_row(s, 1);
- unsigned short c;
- int x = 0;
-
- do {
- c = scr_readw(s);
-
- if (c == scr_readw(d)) {
- if (s > start) {
- ops->bmove(vc, info, line + ycount, x,
- line, x, 1, s-start);
- x += s - start + 1;
- start = s + 1;
- } else {
- x++;
- start++;
- }
- }
-
- scr_writew(c, d);
- console_conditional_schedule();
- s++;
- d++;
- } while (s < le);
- if (s > start)
- ops->bmove(vc, info, line + ycount, x, line, x, 1,
- s-start);
- console_conditional_schedule();
- if (ycount > 0)
- line++;
- else {
- line--;
- /* NOTE: We subtract two lines from these pointers */
- s -= vc->vc_size_row;
- d -= vc->vc_size_row;
- }
- }
-}
-
static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int offset)
{
@@ -1687,7 +1450,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_display *p = &fb_display[vc->vc_num];
- int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
return true;
@@ -1704,249 +1466,32 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SM_UP:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- if (logo_shown >= 0)
- goto redraw_up;
- switch (p->scrollmode) {
- case SCROLL_MOVE:
- fbcon_redraw_blit(vc, info, p, t, b - t - count,
- count);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- (b - count)),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
-
- case SCROLL_WRAP_MOVE:
- if (b - t - count > 3 * vc->vc_rows >> 2) {
- if (t > 0)
- fbcon_bmove(vc, 0, 0, count, 0, t,
- vc->vc_cols);
- ywrap_up(vc, count);
- if (vc->vc_rows - b > 0)
- fbcon_bmove(vc, b - count, 0, b, 0,
- vc->vc_rows - b,
- vc->vc_cols);
- } else if (info->flags & FBINFO_READS_FAST)
- fbcon_bmove(vc, t + count, 0, t, 0,
- b - t - count, vc->vc_cols);
- else
- goto redraw_up;
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
- break;
-
- case SCROLL_PAN_REDRAW:
- if ((p->yscroll + count <=
- 2 * (p->vrows - vc->vc_rows))
- && ((!scroll_partial && (b - t == vc->vc_rows))
- || (scroll_partial
- && (b - t - count >
- 3 * vc->vc_rows >> 2)))) {
- if (t > 0)
- fbcon_redraw_move(vc, p, 0, t, count);
- ypan_up_redraw(vc, t, count);
- if (vc->vc_rows - b > 0)
- fbcon_redraw_move(vc, p, b,
- vc->vc_rows - b, b);
- } else
- fbcon_redraw_move(vc, p, t + count, b - t - count, t);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
- break;
-
- case SCROLL_PAN_MOVE:
- if ((p->yscroll + count <=
- 2 * (p->vrows - vc->vc_rows))
- && ((!scroll_partial && (b - t == vc->vc_rows))
- || (scroll_partial
- && (b - t - count >
- 3 * vc->vc_rows >> 2)))) {
- if (t > 0)
- fbcon_bmove(vc, 0, 0, count, 0, t,
- vc->vc_cols);
- ypan_up(vc, count);
- if (vc->vc_rows - b > 0)
- fbcon_bmove(vc, b - count, 0, b, 0,
- vc->vc_rows - b,
- vc->vc_cols);
- } else if (info->flags & FBINFO_READS_FAST)
- fbcon_bmove(vc, t + count, 0, t, 0,
- b - t - count, vc->vc_cols);
- else
- goto redraw_up;
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
- break;
-
- case SCROLL_REDRAW:
- redraw_up:
- fbcon_redraw(vc, p, t, b - t - count,
- count * vc->vc_cols);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- (b - count)),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
- }
- break;
+ fbcon_redraw(vc, p, t, b - t - count,
+ count * vc->vc_cols);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
case SM_DOWN:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- if (logo_shown >= 0)
- goto redraw_down;
- switch (p->scrollmode) {
- case SCROLL_MOVE:
- fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
- -count);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- t),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
-
- case SCROLL_WRAP_MOVE:
- if (b - t - count > 3 * vc->vc_rows >> 2) {
- if (vc->vc_rows - b > 0)
- fbcon_bmove(vc, b, 0, b - count, 0,
- vc->vc_rows - b,
- vc->vc_cols);
- ywrap_down(vc, count);
- if (t > 0)
- fbcon_bmove(vc, count, 0, 0, 0, t,
- vc->vc_cols);
- } else if (info->flags & FBINFO_READS_FAST)
- fbcon_bmove(vc, t, 0, t + count, 0,
- b - t - count, vc->vc_cols);
- else
- goto redraw_down;
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
- break;
-
- case SCROLL_PAN_MOVE:
- if ((count - p->yscroll <= p->vrows - vc->vc_rows)
- && ((!scroll_partial && (b - t == vc->vc_rows))
- || (scroll_partial
- && (b - t - count >
- 3 * vc->vc_rows >> 2)))) {
- if (vc->vc_rows - b > 0)
- fbcon_bmove(vc, b, 0, b - count, 0,
- vc->vc_rows - b,
- vc->vc_cols);
- ypan_down(vc, count);
- if (t > 0)
- fbcon_bmove(vc, count, 0, 0, 0, t,
- vc->vc_cols);
- } else if (info->flags & FBINFO_READS_FAST)
- fbcon_bmove(vc, t, 0, t + count, 0,
- b - t - count, vc->vc_cols);
- else
- goto redraw_down;
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
- break;
-
- case SCROLL_PAN_REDRAW:
- if ((count - p->yscroll <= p->vrows - vc->vc_rows)
- && ((!scroll_partial && (b - t == vc->vc_rows))
- || (scroll_partial
- && (b - t - count >
- 3 * vc->vc_rows >> 2)))) {
- if (vc->vc_rows - b > 0)
- fbcon_redraw_move(vc, p, b, vc->vc_rows - b,
- b - count);
- ypan_down_redraw(vc, t, count);
- if (t > 0)
- fbcon_redraw_move(vc, p, count, t, 0);
- } else
- fbcon_redraw_move(vc, p, t, b - t - count, t + count);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
- break;
-
- case SCROLL_REDRAW:
- redraw_down:
- fbcon_redraw(vc, p, b - 1, b - t - count,
- -count * vc->vc_cols);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- t),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
- }
+ fbcon_redraw(vc, p, b - 1, b - t - count,
+ -count * vc->vc_cols);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
}
return false;
}
-
-static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
- int height, int width)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_display *p = &fb_display[vc->vc_num];
-
- if (fbcon_is_inactive(vc, info))
- return;
-
- if (!width || !height)
- return;
-
- /* Split blits that cross physical y_wrap case.
- * Pathological case involves 4 blits, better to use recursive
- * code rather than unrolled case
- *
- * Recursive invocations don't need to erase the cursor over and
- * over again, so we use fbcon_bmove_rec()
- */
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
- p->vrows - p->yscroll);
-}
-
-static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
- int dy, int dx, int height, int width, u_int y_break)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- u_int b;
-
- if (sy < y_break && sy + height > y_break) {
- b = y_break - sy;
- if (dy < sy) { /* Avoid trashing self */
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- } else {
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- }
- return;
- }
-
- if (dy < y_break && dy + height > y_break) {
- b = y_break - dy;
- if (dy < sy) { /* Avoid trashing self */
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- } else {
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- }
- return;
- }
- ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
- height, width);
-}
-
static void updatescrollmode(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
@@ -2119,21 +1664,7 @@ static int fbcon_switch(struct vc_data *vc)
updatescrollmode(p, info, vc);
- switch (p->scrollmode) {
- case SCROLL_WRAP_MOVE:
- scrollback_phys_max = p->vrows - vc->vc_rows;
- break;
- case SCROLL_PAN_MOVE:
- case SCROLL_PAN_REDRAW:
- scrollback_phys_max = p->vrows - 2 * vc->vc_rows;
- if (scrollback_phys_max < 0)
- scrollback_phys_max = 0;
- break;
- default:
- scrollback_phys_max = 0;
- break;
- }
-
+ scrollback_phys_max = 0;
scrollback_max = 0;
scrollback_current = 0;
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 9315b360c89813..a00603b4451ae7 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -29,7 +29,6 @@ struct fbcon_display {
/* Filled in by the low-level console driver */
const u_char *fontdata;
int userfont; /* != 0 if fontdata kmalloc()ed */
- u_short scrollmode; /* Scroll Method */
u_short inverse; /* != 0 text black on white as default */
short yscroll; /* Hardware scrolling */
int vrows; /* number of virtual rows */
@@ -52,8 +51,6 @@ struct fbcon_display {
};
struct fbcon_ops {
- void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int dy, int dx, int height, int width);
void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width);
void (*putcs)(struct vc_data *vc, struct fb_info *info,
@@ -152,62 +149,6 @@ static inline int attr_col_ec(int shift, struct vc_data *vc,
#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
- /*
- * Scroll Method
- */
-
-/* There are several methods fbcon can use to move text around the screen:
- *
- * Operation Pan Wrap
- *---------------------------------------------
- * SCROLL_MOVE copyarea No No
- * SCROLL_PAN_MOVE copyarea Yes No
- * SCROLL_WRAP_MOVE copyarea No Yes
- * SCROLL_REDRAW imageblit No No
- * SCROLL_PAN_REDRAW imageblit Yes No
- * SCROLL_WRAP_REDRAW imageblit No Yes
- *
- * (SCROLL_WRAP_REDRAW is not implemented yet)
- *
- * In general, fbcon will choose the best scrolling
- * method based on the rule below:
- *
- * Pan/Wrap > accel imageblit > accel copyarea >
- * soft imageblit > (soft copyarea)
- *
- * Exception to the rule: Pan + accel copyarea is
- * preferred over Pan + accel imageblit.
- *
- * The above is typical for PCI/AGP cards. Unless
- * overridden, fbcon will never use soft copyarea.
- *
- * If you need to override the above rule, set the
- * appropriate flags in fb_info->flags. For example,
- * to prefer copyarea over imageblit, set
- * FBINFO_READS_FAST.
- *
- * Other notes:
- * + use the hardware engine to move the text
- * (hw-accelerated copyarea() and fillrect())
- * + use hardware-supported panning on a large virtual screen
- * + amifb can not only pan, but also wrap the display by N lines
- * (i.e. visible line i = physical line (i+N) % yres).
- * + read what's already rendered on the screen and
- * write it in a different place (this is cfb_copyarea())
- * + re-render the text to the screen
- *
- * Whether to use wrapping or panning can only be figured out at
- * runtime (when we know whether our font height is a multiple
- * of the pan/wrap step)
- *
- */
-
-#define SCROLL_MOVE 0x001
-#define SCROLL_PAN_MOVE 0x002
-#define SCROLL_WRAP_MOVE 0x003
-#define SCROLL_REDRAW 0x004
-#define SCROLL_PAN_REDRAW 0x005
-
#ifdef CONFIG_FB_TILEBLITTING
extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
#endif
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index 9cd2c4b05c3286..ffa78936eaab0f 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -59,31 +59,12 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute,
}
}
-
-static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int dy, int dx, int height, int width)
-{
- struct fbcon_ops *ops = info->fbcon_par;
- struct fb_copyarea area;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
-
- area.sx = sy * vc->vc_font.height;
- area.sy = vyres - ((sx + width) * vc->vc_font.width);
- area.dx = dy * vc->vc_font.height;
- area.dy = vyres - ((dx + width) * vc->vc_font.width);
- area.width = height * vc->vc_font.height;
- area.height = width * vc->vc_font.width;
-
- info->fbops->fb_copyarea(info, &area);
-}
-
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
- struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
+ u32 vyres = info->var.yres;
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = sy * vc->vc_font.height;
@@ -140,7 +121,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
+ u32 vyres = info->var.yres;
if (!ops->fontbuffer)
return;
@@ -229,7 +210,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
+ u32 vyres = info->var.yres;
if (!ops->fontbuffer)
return;
@@ -387,7 +368,7 @@ static int ccw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
u32 yoffset;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
+ u32 vyres = info->var.yres;
int err;
yoffset = (vyres - info->var.yres) - ops->var.xoffset;
@@ -402,7 +383,6 @@ static int ccw_update_start(struct fb_info *info)
void fbcon_rotate_ccw(struct fbcon_ops *ops)
{
- ops->bmove = ccw_bmove;
ops->clear = ccw_clear;
ops->putcs = ccw_putcs;
ops->clear_margins = ccw_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index 88d89fad3f05e3..92e5b7fb51ee2b 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -44,31 +44,12 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute,
}
}
-
-static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int dy, int dx, int height, int width)
-{
- struct fbcon_ops *ops = info->fbcon_par;
- struct fb_copyarea area;
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
-
- area.sx = vxres - ((sy + height) * vc->vc_font.height);
- area.sy = sx * vc->vc_font.width;
- area.dx = vxres - ((dy + height) * vc->vc_font.height);
- area.dy = dx * vc->vc_font.width;
- area.width = height * vc->vc_font.height;
- area.height = width * vc->vc_font.width;
-
- info->fbops->fb_copyarea(info, &area);
-}
-
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
- struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
+ u32 vxres = info->var.xres;
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = vxres - ((sy + height) * vc->vc_font.height);
@@ -125,7 +106,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
+ u32 vxres = info->var.xres;
if (!ops->fontbuffer)
return;
@@ -212,7 +193,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
+ u32 vxres = info->var.xres;
if (!ops->fontbuffer)
return;
@@ -369,7 +350,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
static int cw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
+ u32 vxres = info->var.xres;
u32 xoffset;
int err;
@@ -385,7 +366,6 @@ static int cw_update_start(struct fb_info *info)
void fbcon_rotate_cw(struct fbcon_ops *ops)
{
- ops->bmove = cw_bmove;
ops->clear = cw_clear;
ops->putcs = cw_putcs;
ops->clear_margins = cw_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h
index e233444cda6647..b528b2e54283a5 100644
--- a/drivers/video/fbdev/core/fbcon_rotate.h
+++ b/drivers/video/fbdev/core/fbcon_rotate.h
@@ -11,15 +11,6 @@
#ifndef _FBCON_ROTATE_H
#define _FBCON_ROTATE_H
-#define GETVYRES(s,i) ({ \
- (s == SCROLL_REDRAW || s == SCROLL_MOVE) ? \
- (i)->var.yres : (i)->var.yres_virtual; })
-
-#define GETVXRES(s,i) ({ \
- (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \
- (i)->var.xres : (i)->var.xres_virtual; })
-
-
static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat)
{
u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8;
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 8d5e66b1bdfbb7..09619bd8e02178 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -44,33 +44,13 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute,
}
}
-
-static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int dy, int dx, int height, int width)
-{
- struct fbcon_ops *ops = info->fbcon_par;
- struct fb_copyarea area;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
-
- area.sy = vyres - ((sy + height) * vc->vc_font.height);
- area.sx = vxres - ((sx + width) * vc->vc_font.width);
- area.dy = vyres - ((dy + height) * vc->vc_font.height);
- area.dx = vxres - ((dx + width) * vc->vc_font.width);
- area.height = height * vc->vc_font.height;
- area.width = width * vc->vc_font.width;
-
- info->fbops->fb_copyarea(info, &area);
-}
-
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
- struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
+ u32 vyres = info->var.yres;
+ u32 vxres = info->var.xres;
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dy = vyres - ((sy + height) * vc->vc_font.height);
@@ -162,8 +142,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info,
u32 mod = vc->vc_font.width % 8, cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
+ u32 vyres = info->var.yres;
+ u32 vxres = info->var.xres;
if (!ops->fontbuffer)
return;
@@ -259,8 +239,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
+ u32 vyres = info->var.yres;
+ u32 vxres = info->var.xres;
if (!ops->fontbuffer)
return;
@@ -410,8 +390,8 @@ static int ud_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
int xoffset, yoffset;
- u32 vyres = GETVYRES(ops->p->scrollmode, info);
- u32 vxres = GETVXRES(ops->p->scrollmode, info);
+ u32 vyres = info->var.yres;
+ u32 vxres = info->var.xres;
int err;
xoffset = vxres - info->var.xres - ops->var.xoffset;
@@ -429,7 +409,6 @@ static int ud_update_start(struct fb_info *info)
void fbcon_rotate_ud(struct fbcon_ops *ops)
{
- ops->bmove = ud_bmove;
ops->clear = ud_clear;
ops->putcs = ud_putcs;
ops->clear_margins = ud_clear_margins;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 7420d2c16e47e3..826175ad88a2fd 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1702,8 +1702,11 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)
{
unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
- (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
+ (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) {
kfree(fb_info->pixmap.addr);
+ fb_info->pixmap.addr = NULL;
+ }
+
fb_destroy_modelist(&fb_info->modelist);
registered_fb[fb_info->node] = NULL;
num_registered_fb--;
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index 2768eff247ba46..72af95053bcb53 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -16,21 +16,6 @@
#include <asm/types.h>
#include "fbcon.h"
-static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int dy, int dx, int height, int width)
-{
- struct fb_tilearea area;
-
- area.sx = sx;
- area.sy = sy;
- area.dx = dx;
- area.dy = dy;
- area.height = height;
- area.width = width;
-
- info->tileops->fb_tilecopy(info, &area);
-}
-
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
@@ -133,7 +118,6 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info)
struct fb_tilemap map;
struct fbcon_ops *ops = info->fbcon_par;
- ops->bmove = tile_bmove;
ops->clear = tile_clear;
ops->putcs = tile_putcs;
ops->clear_margins = tile_clear_margins;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index bcacfb6934fa9a..0fe922f726e98a 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -505,15 +505,15 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
}
/**
- * xxxfb_copyarea - REQUIRED function. Can use generic routines if
- * non acclerated hardware and packed pixel based.
+ * xxxfb_copyarea - OBSOLETE function.
* Copies one area of the screen to another area.
+ * Will be deleted in a future version
*
* @info: frame buffer structure that represents a single frame buffer
* @area: Structure providing the data to copy the framebuffer contents
* from one region to another.
*
- * This drawing operation copies a rectangular area from one area of the
+ * This drawing operation copied a rectangular area from one area of the
* screen to another area.
*/
void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
@@ -645,9 +645,9 @@ static const struct fb_ops xxxfb_ops = {
.fb_setcolreg = xxxfb_setcolreg,
.fb_blank = xxxfb_blank,
.fb_pan_display = xxxfb_pan_display,
- .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
- .fb_copyarea = xxxfb_copyarea, /* Needed !!! */
- .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
+ .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
+ .fb_copyarea = xxxfb_copyarea, /* Obsolete */
+ .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
.fb_cursor = xxxfb_cursor, /* Optional !!! */
.fb_sync = xxxfb_sync,
.fb_ioctl = xxxfb_ioctl,
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 0da0af251c73d0..96e5a878276976 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -2889,6 +2889,7 @@ static unsigned int virtio_mem_features[] = {
#if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
VIRTIO_MEM_F_ACPI_PXM,
#endif
+ VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE,
};
static const struct virtio_device_id virtio_mem_id_table[] = {
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index adafdf86f42fde..fac918ccb30516 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -108,7 +108,9 @@ static const struct netfs_read_request_ops v9fs_req_ops = {
*/
static int v9fs_vfs_readpage(struct file *file, struct page *page)
{
- return netfs_readpage(file, page, &v9fs_req_ops, NULL);
+ struct folio *folio = page_folio(page);
+
+ return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
}
/**
@@ -130,13 +132,15 @@ static void v9fs_vfs_readahead(struct readahead_control *ractl)
static int v9fs_release_page(struct page *page, gfp_t gfp)
{
- if (PagePrivate(page))
+ struct folio *folio = page_folio(page);
+
+ if (folio_test_private(folio))
return 0;
#ifdef CONFIG_9P_FSCACHE
- if (PageFsCache(page)) {
+ if (folio_test_fscache(folio)) {
if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
return 0;
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
}
#endif
return 1;
@@ -152,55 +156,58 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
static void v9fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
- wait_on_page_fscache(page);
+ struct folio *folio = page_folio(page);
+
+ folio_wait_fscache(folio);
}
-static int v9fs_vfs_writepage_locked(struct page *page)
+static int v9fs_vfs_write_folio_locked(struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio_inode(folio);
struct v9fs_inode *v9inode = V9FS_I(inode);
- loff_t start = page_offset(page);
- loff_t size = i_size_read(inode);
+ loff_t start = folio_pos(folio);
+ loff_t i_size = i_size_read(inode);
struct iov_iter from;
- int err, len;
+ size_t len = folio_size(folio);
+ int err;
+
+ if (start >= i_size)
+ return 0; /* Simultaneous truncation occurred */
- if (page->index == size >> PAGE_SHIFT)
- len = size & ~PAGE_MASK;
- else
- len = PAGE_SIZE;
+ len = min_t(loff_t, i_size - start, len);
- iov_iter_xarray(&from, WRITE, &page->mapping->i_pages, start, len);
+ iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
/* We should have writeback_fid always set */
BUG_ON(!v9inode->writeback_fid);
- set_page_writeback(page);
+ folio_start_writeback(folio);
p9_client_write(v9inode->writeback_fid, start, &from, &err);
- end_page_writeback(page);
+ folio_end_writeback(folio);
return err;
}
static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(page);
int retval;
- p9_debug(P9_DEBUG_VFS, "page %p\n", page);
+ p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
- retval = v9fs_vfs_writepage_locked(page);
+ retval = v9fs_vfs_write_folio_locked(folio);
if (retval < 0) {
if (retval == -EAGAIN) {
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
retval = 0;
} else {
- SetPageError(page);
- mapping_set_error(page->mapping, retval);
+ mapping_set_error(folio_mapping(folio), retval);
}
} else
retval = 0;
- unlock_page(page);
+ folio_unlock(folio);
return retval;
}
@@ -213,14 +220,15 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
static int v9fs_launder_page(struct page *page)
{
+ struct folio *folio = page_folio(page);
int retval;
- if (clear_page_dirty_for_io(page)) {
- retval = v9fs_vfs_writepage_locked(page);
+ if (folio_clear_dirty_for_io(folio)) {
+ retval = v9fs_vfs_write_folio_locked(folio);
if (retval)
return retval;
}
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
return 0;
}
@@ -265,10 +273,10 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int flags,
- struct page **pagep, void **fsdata)
+ struct page **subpagep, void **fsdata)
{
int retval;
- struct page *page;
+ struct folio *folio;
struct v9fs_inode *v9inode = V9FS_I(mapping->host);
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
@@ -279,31 +287,32 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
* file. We need to do this before we get a lock on the page in case
* there's more than one writer competing for the same cache block.
*/
- retval = netfs_write_begin(filp, mapping, pos, len, flags, &page, fsdata,
+ retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
&v9fs_req_ops, NULL);
if (retval < 0)
return retval;
- *pagep = find_subpage(page, pos / PAGE_SIZE);
+ *subpagep = &folio->page;
return retval;
}
static int v9fs_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied,
- struct page *page, void *fsdata)
+ struct page *subpage, void *fsdata)
{
loff_t last_pos = pos + copied;
- struct inode *inode = page->mapping->host;
+ struct folio *folio = page_folio(subpage);
+ struct inode *inode = mapping->host;
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
if (unlikely(copied < len)) {
copied = 0;
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
/*
@@ -314,10 +323,10 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
inode_add_bytes(inode, last_pos - inode->i_size);
i_size_write(inode, last_pos);
}
- set_page_dirty(page);
+ folio_mark_dirty(folio);
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return copied;
}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 4244d48398efd9..612e297f3763c5 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -528,13 +528,13 @@ static vm_fault_t
v9fs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct v9fs_inode *v9inode;
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct file *filp = vmf->vma->vm_file;
struct inode *inode = file_inode(filp);
- p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
- page, (unsigned long)filp->private_data);
+ p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
+ folio, (unsigned long)filp->private_data);
v9inode = V9FS_I(inode);
@@ -542,24 +542,24 @@ v9fs_vm_page_mkwrite(struct vm_fault *vmf)
* be modified. We then assume the entire page will need writing back.
*/
#ifdef CONFIG_9P_FSCACHE
- if (PageFsCache(page) &&
- wait_on_page_fscache_killable(page) < 0)
- return VM_FAULT_RETRY;
+ if (folio_test_fscache(folio) &&
+ folio_wait_fscache_killable(folio) < 0)
+ return VM_FAULT_NOPAGE;
#endif
/* Update file times before taking page lock */
file_update_time(filp);
BUG_ON(!v9inode->writeback_fid);
- if (lock_page_killable(page) < 0)
+ if (folio_lock_killable(folio) < 0)
return VM_FAULT_RETRY;
- if (page->mapping != inode->i_mapping)
+ if (folio_mapping(folio) != inode->i_mapping)
goto out_unlock;
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
return VM_FAULT_LOCKED;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
return VM_FAULT_NOPAGE;
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 4579bbda46346c..da9b4f8577a1a0 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -103,13 +103,13 @@ struct afs_lookup_cookie {
};
/*
- * Drop the refs that we're holding on the pages we were reading into. We've
+ * Drop the refs that we're holding on the folios we were reading into. We've
* got refs on the first nr_pages pages.
*/
static void afs_dir_read_cleanup(struct afs_read *req)
{
struct address_space *mapping = req->vnode->vfs_inode.i_mapping;
- struct page *page;
+ struct folio *folio;
pgoff_t last = req->nr_pages - 1;
XA_STATE(xas, &mapping->i_pages, 0);
@@ -118,65 +118,56 @@ static void afs_dir_read_cleanup(struct afs_read *req)
return;
rcu_read_lock();
- xas_for_each(&xas, page, last) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, last) {
+ if (xas_retry(&xas, folio))
continue;
- BUG_ON(xa_is_value(page));
- BUG_ON(PageCompound(page));
- ASSERTCMP(page->mapping, ==, mapping);
+ BUG_ON(xa_is_value(folio));
+ ASSERTCMP(folio_file_mapping(folio), ==, mapping);
- put_page(page);
+ folio_put(folio);
}
rcu_read_unlock();
}
/*
- * check that a directory page is valid
+ * check that a directory folio is valid
*/
-static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page,
- loff_t i_size)
+static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio,
+ loff_t i_size)
{
- struct afs_xdr_dir_page *dbuf;
- loff_t latter, off;
- int tmp, qty;
+ union afs_xdr_dir_block *block;
+ size_t offset, size;
+ loff_t pos;
- /* Determine how many magic numbers there should be in this page, but
+ /* Determine how many magic numbers there should be in this folio, but
* we must take care because the directory may change size under us.
*/
- off = page_offset(page);
- if (i_size <= off)
+ pos = folio_pos(folio);
+ if (i_size <= pos)
goto checked;
- latter = i_size - off;
- if (latter >= PAGE_SIZE)
- qty = PAGE_SIZE;
- else
- qty = latter;
- qty /= sizeof(union afs_xdr_dir_block);
-
- /* check them */
- dbuf = kmap_atomic(page);
- for (tmp = 0; tmp < qty; tmp++) {
- if (dbuf->blocks[tmp].hdr.magic != AFS_DIR_MAGIC) {
- printk("kAFS: %s(%lx): bad magic %d/%d is %04hx\n",
- __func__, dvnode->vfs_inode.i_ino, tmp, qty,
- ntohs(dbuf->blocks[tmp].hdr.magic));
- trace_afs_dir_check_failed(dvnode, off, i_size);
- kunmap(page);
+ size = min_t(loff_t, folio_size(folio), i_size - pos);
+ for (offset = 0; offset < size; offset += sizeof(*block)) {
+ block = kmap_local_folio(folio, offset);
+ if (block->hdr.magic != AFS_DIR_MAGIC) {
+ printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n",
+ __func__, dvnode->vfs_inode.i_ino,
+ pos, offset, size, ntohs(block->hdr.magic));
+ trace_afs_dir_check_failed(dvnode, pos + offset, i_size);
+ kunmap_local(block);
trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
goto error;
}
/* Make sure each block is NUL terminated so we can reasonably
- * use string functions on it. The filenames in the page
+ * use string functions on it. The filenames in the folio
* *should* be NUL-terminated anyway.
*/
- ((u8 *)&dbuf->blocks[tmp])[AFS_DIR_BLOCK_SIZE - 1] = 0;
- }
-
- kunmap_atomic(dbuf);
+ ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0;
+ kunmap_local(block);
+ }
checked:
afs_stat_v(dvnode, n_read_dir);
return true;
@@ -190,11 +181,11 @@ error:
*/
static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
{
- struct afs_xdr_dir_page *dbuf;
+ union afs_xdr_dir_block *block;
struct address_space *mapping = dvnode->vfs_inode.i_mapping;
- struct page *page;
- unsigned int i, qty = PAGE_SIZE / sizeof(union afs_xdr_dir_block);
+ struct folio *folio;
pgoff_t last = req->nr_pages - 1;
+ size_t offset, size;
XA_STATE(xas, &mapping->i_pages, 0);
@@ -205,30 +196,28 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
req->pos, req->nr_pages,
req->iter->iov_offset, iov_iter_count(req->iter));
- xas_for_each(&xas, page, last) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, last) {
+ if (xas_retry(&xas, folio))
continue;
- BUG_ON(PageCompound(page));
- BUG_ON(page->mapping != mapping);
-
- dbuf = kmap_atomic(page);
- for (i = 0; i < qty; i++) {
- union afs_xdr_dir_block *block = &dbuf->blocks[i];
+ BUG_ON(folio_file_mapping(folio) != mapping);
- pr_warn("[%02lx] %32phN\n", page->index * qty + i, block);
+ size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
+ for (offset = 0; offset < size; offset += sizeof(*block)) {
+ block = kmap_local_folio(folio, offset);
+ pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block);
+ kunmap_local(block);
}
- kunmap_atomic(dbuf);
}
}
/*
- * Check all the pages in a directory. All the pages are held pinned.
+ * Check all the blocks in a directory. All the folios are held pinned.
*/
static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
{
struct address_space *mapping = dvnode->vfs_inode.i_mapping;
- struct page *page;
+ struct folio *folio;
pgoff_t last = req->nr_pages - 1;
int ret = 0;
@@ -238,14 +227,13 @@ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
return 0;
rcu_read_lock();
- xas_for_each(&xas, page, last) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, last) {
+ if (xas_retry(&xas, folio))
continue;
- BUG_ON(PageCompound(page));
- BUG_ON(page->mapping != mapping);
+ BUG_ON(folio_file_mapping(folio) != mapping);
- if (!afs_dir_check_page(dvnode, page, req->file_size)) {
+ if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
afs_dir_dump(dvnode, req);
ret = -EIO;
break;
@@ -274,15 +262,16 @@ static int afs_dir_open(struct inode *inode, struct file *file)
/*
* Read the directory into the pagecache in one go, scrubbing the previous
- * contents. The list of pages is returned, pinning them so that they don't
+ * contents. The list of folios is returned, pinning them so that they don't
* get reclaimed during the iteration.
*/
static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
__acquires(&dvnode->validate_lock)
{
+ struct address_space *mapping = dvnode->vfs_inode.i_mapping;
struct afs_read *req;
loff_t i_size;
- int nr_pages, i, n;
+ int nr_pages, i;
int ret;
_enter("");
@@ -320,43 +309,30 @@ expand:
req->iter = &req->def_iter;
/* Fill in any gaps that we might find where the memory reclaimer has
- * been at work and pin all the pages. If there are any gaps, we will
+ * been at work and pin all the folios. If there are any gaps, we will
* need to reread the entire directory contents.
*/
i = req->nr_pages;
while (i < nr_pages) {
- struct page *pages[8], *page;
-
- n = find_get_pages_contig(dvnode->vfs_inode.i_mapping, i,
- min_t(unsigned int, nr_pages - i,
- ARRAY_SIZE(pages)),
- pages);
- _debug("find %u at %u/%u", n, i, nr_pages);
-
- if (n == 0) {
- gfp_t gfp = dvnode->vfs_inode.i_mapping->gfp_mask;
+ struct folio *folio;
+ folio = filemap_get_folio(mapping, i);
+ if (!folio) {
if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
afs_stat_v(dvnode, n_inval);
ret = -ENOMEM;
- page = __page_cache_alloc(gfp);
- if (!page)
+ folio = __filemap_get_folio(mapping,
+ i, FGP_LOCK | FGP_CREAT,
+ mapping->gfp_mask);
+ if (!folio)
goto error;
- ret = add_to_page_cache_lru(page,
- dvnode->vfs_inode.i_mapping,
- i, gfp);
- if (ret < 0)
- goto error;
-
- attach_page_private(page, (void *)1);
- unlock_page(page);
- req->nr_pages++;
- i++;
- } else {
- req->nr_pages += n;
- i += n;
+ folio_attach_private(folio, (void *)1);
+ folio_unlock(folio);
}
+
+ req->nr_pages += folio_nr_pages(folio);
+ i += folio_nr_pages(folio);
}
/* If we're going to reload, we need to lock all the pages to prevent
@@ -424,7 +400,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
size_t nlen;
int tmp;
- _enter("%u,%x,%p,,",(unsigned)ctx->pos,blkoff,block);
+ _enter("%llx,%x", ctx->pos, blkoff);
curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent);
@@ -513,12 +489,10 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
struct key *key, afs_dataversion_t *_dir_version)
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct afs_xdr_dir_page *dbuf;
union afs_xdr_dir_block *dblock;
struct afs_read *req;
- struct page *page;
- unsigned blkoff, limit;
- void __rcu **slot;
+ struct folio *folio;
+ unsigned offset, size;
int ret;
_enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos);
@@ -540,43 +514,30 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
/* walk through the blocks in sequence */
ret = 0;
while (ctx->pos < req->actual_len) {
- blkoff = ctx->pos & ~(sizeof(union afs_xdr_dir_block) - 1);
-
- /* Fetch the appropriate page from the directory and re-add it
+ /* Fetch the appropriate folio from the directory and re-add it
* to the LRU. We have all the pages pinned with an extra ref.
*/
- rcu_read_lock();
- page = NULL;
- slot = radix_tree_lookup_slot(&dvnode->vfs_inode.i_mapping->i_pages,
- blkoff / PAGE_SIZE);
- if (slot)
- page = radix_tree_deref_slot(slot);
- rcu_read_unlock();
- if (!page) {
+ folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE,
+ FGP_ACCESSED, 0);
+ if (!folio) {
ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
break;
}
- mark_page_accessed(page);
- limit = blkoff & ~(PAGE_SIZE - 1);
+ offset = round_down(ctx->pos, sizeof(*dblock)) - folio_file_pos(folio);
+ size = min_t(loff_t, folio_size(folio),
+ req->actual_len - folio_file_pos(folio));
- dbuf = kmap(page);
-
- /* deal with the individual blocks stashed on this page */
do {
- dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) /
- sizeof(union afs_xdr_dir_block)];
- ret = afs_dir_iterate_block(dvnode, ctx, dblock, blkoff);
- if (ret != 1) {
- kunmap(page);
+ dblock = kmap_local_folio(folio, offset);
+ ret = afs_dir_iterate_block(dvnode, ctx, dblock,
+ folio_file_pos(folio) + offset);
+ kunmap_local(dblock);
+ if (ret != 1)
goto out;
- }
- blkoff += sizeof(union afs_xdr_dir_block);
+ } while (offset += sizeof(*dblock), offset < size);
- } while (ctx->pos < dir->i_size && blkoff < limit);
-
- kunmap(page);
ret = 0;
}
@@ -2037,42 +1998,42 @@ error:
}
/*
- * Release a directory page and clean up its private state if it's not busy
- * - return true if the page can now be released, false if not
+ * Release a directory folio and clean up its private state if it's not busy
+ * - return true if the folio can now be released, false if not
*/
-static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags)
+static int afs_dir_releasepage(struct page *subpage, gfp_t gfp_flags)
{
- struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
+ struct folio *folio = page_folio(subpage);
+ struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
- _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
+ _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));
- detach_page_private(page);
+ folio_detach_private(folio);
/* The directory will need reloading. */
if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
afs_stat_v(dvnode, n_relpg);
- return 1;
+ return true;
}
/*
- * invalidate part or all of a page
- * - release a page and clean up its private data if offset is 0 (indicating
- * the entire page)
+ * Invalidate part or all of a folio.
*/
-static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
+static void afs_dir_invalidatepage(struct page *subpage, unsigned int offset,
unsigned int length)
{
- struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
+ struct folio *folio = page_folio(subpage);
+ struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
- _enter("{%lu},%u,%u", page->index, offset, length);
+ _enter("{%lu},%u,%u", folio_index(folio), offset, length);
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
/* The directory will need reloading. */
if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
afs_stat_v(dvnode, n_inval);
- /* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == thp_size(page))
- detach_page_private(page);
+ /* we clean up only if the entire folio is being invalidated */
+ if (offset == 0 && length == folio_size(folio))
+ folio_detach_private(folio);
}
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index 540b9fc96824ad..d98e109ecee9a3 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -105,6 +105,25 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
}
/*
+ * Get a new directory folio.
+ */
+static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
+{
+ struct address_space *mapping = vnode->vfs_inode.i_mapping;
+ struct folio *folio;
+
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping->gfp_mask);
+ if (!folio)
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ else if (folio && !folio_test_private(folio))
+ folio_attach_private(folio, (void *)1);
+
+ return folio;
+}
+
+/*
* Scan a directory block looking for a dirent of the right name.
*/
static int afs_dir_scan_block(union afs_xdr_dir_block *block, struct qstr *name,
@@ -188,13 +207,11 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
enum afs_edit_dir_reason why)
{
union afs_xdr_dir_block *meta, *block;
- struct afs_xdr_dir_page *meta_page, *dir_page;
union afs_xdr_dirent *de;
- struct page *page0, *page;
+ struct folio *folio0, *folio;
unsigned int need_slots, nr_blocks, b;
pgoff_t index;
loff_t i_size;
- gfp_t gfp;
int slot;
_enter(",,{%d,%s},", name->len, name->name);
@@ -206,10 +223,8 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
return;
}
- gfp = vnode->vfs_inode.i_mapping->gfp_mask;
- page0 = find_or_create_page(vnode->vfs_inode.i_mapping, 0, gfp);
- if (!page0) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ folio0 = afs_dir_get_folio(vnode, 0);
+ if (!folio0) {
_leave(" [fgp]");
return;
}
@@ -217,42 +232,35 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
/* Work out how many slots we're going to need. */
need_slots = afs_dir_calc_slots(name->len);
- meta_page = kmap(page0);
- meta = &meta_page->blocks[0];
+ meta = kmap_local_folio(folio0, 0);
if (i_size == 0)
goto new_directory;
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
- /* Find a block that has sufficient slots available. Each VM page
+ /* Find a block that has sufficient slots available. Each folio
* contains two or more directory blocks.
*/
for (b = 0; b < nr_blocks + 1; b++) {
- /* If the directory extended into a new page, then we need to
- * tack a new page on the end.
+ /* If the directory extended into a new folio, then we need to
+ * tack a new folio on the end.
*/
index = b / AFS_DIR_BLOCKS_PER_PAGE;
- if (index == 0) {
- page = page0;
- dir_page = meta_page;
- } else {
- if (nr_blocks >= AFS_DIR_MAX_BLOCKS)
- goto error;
- gfp = vnode->vfs_inode.i_mapping->gfp_mask;
- page = find_or_create_page(vnode->vfs_inode.i_mapping,
- index, gfp);
- if (!page)
+ if (nr_blocks >= AFS_DIR_MAX_BLOCKS)
+ goto error;
+ if (index >= folio_nr_pages(folio0)) {
+ folio = afs_dir_get_folio(vnode, index);
+ if (!folio)
goto error;
- if (!PagePrivate(page))
- attach_page_private(page, (void *)1);
- dir_page = kmap(page);
+ } else {
+ folio = folio0;
}
+ block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_file_pos(folio));
+
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
goto invalidated;
- block = &dir_page->blocks[b % AFS_DIR_BLOCKS_PER_PAGE];
-
_debug("block %u: %2u %3u %u",
b,
(b < AFS_DIR_BLOCKS_WITH_CTR) ? meta->meta.alloc_ctrs[b] : 99,
@@ -266,7 +274,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
- /* Only lower dir pages have a counter in the header. */
+ /* Only lower dir blocks have a counter in the header. */
if (b >= AFS_DIR_BLOCKS_WITH_CTR ||
meta->meta.alloc_ctrs[b] >= need_slots) {
/* We need to try and find one or more consecutive
@@ -279,10 +287,10 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
}
}
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
}
@@ -298,8 +306,8 @@ new_directory:
i_size = AFS_DIR_BLOCK_SIZE;
afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
- page = page0;
- block = meta;
+ folio = folio0;
+ block = kmap_local_folio(folio, 0);
nr_blocks = 1;
b = 0;
@@ -318,10 +326,10 @@ found_space:
/* Adjust the bitmap. */
afs_set_contig_bits(block, slot, need_slots);
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
/* Adjust the allocation counter. */
@@ -333,18 +341,19 @@ found_space:
_debug("Insert %s in %u[%u]", name->name, b, slot);
out_unmap:
- unlock_page(page0);
- kunmap(page0);
- put_page(page0);
+ kunmap_local(meta);
+ folio_unlock(folio0);
+ folio_put(folio0);
_leave("");
return;
invalidated:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_inval, 0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- if (page != page0) {
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
goto out_unmap;
@@ -364,10 +373,9 @@ error:
void afs_edit_dir_remove(struct afs_vnode *vnode,
struct qstr *name, enum afs_edit_dir_reason why)
{
- struct afs_xdr_dir_page *meta_page, *dir_page;
union afs_xdr_dir_block *meta, *block;
union afs_xdr_dirent *de;
- struct page *page0, *page;
+ struct folio *folio0, *folio;
unsigned int need_slots, nr_blocks, b;
pgoff_t index;
loff_t i_size;
@@ -384,9 +392,8 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
}
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
- page0 = find_lock_page(vnode->vfs_inode.i_mapping, 0);
- if (!page0) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ folio0 = afs_dir_get_folio(vnode, 0);
+ if (!folio0) {
_leave(" [fgp]");
return;
}
@@ -394,30 +401,27 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
/* Work out how many slots we're going to discard. */
need_slots = afs_dir_calc_slots(name->len);
- meta_page = kmap(page0);
- meta = &meta_page->blocks[0];
+ meta = kmap_local_folio(folio0, 0);
- /* Find a page that has sufficient slots available. Each VM page
+ /* Find a block that has sufficient slots available. Each folio
* contains two or more directory blocks.
*/
for (b = 0; b < nr_blocks; b++) {
index = b / AFS_DIR_BLOCKS_PER_PAGE;
- if (index != 0) {
- page = find_lock_page(vnode->vfs_inode.i_mapping, index);
- if (!page)
+ if (index >= folio_nr_pages(folio0)) {
+ folio = afs_dir_get_folio(vnode, index);
+ if (!folio)
goto error;
- dir_page = kmap(page);
} else {
- page = page0;
- dir_page = meta_page;
+ folio = folio0;
}
+ block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_file_pos(folio));
+
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
goto invalidated;
- block = &dir_page->blocks[b % AFS_DIR_BLOCKS_PER_PAGE];
-
if (b > AFS_DIR_BLOCKS_WITH_CTR ||
meta->meta.alloc_ctrs[b] <= AFS_DIR_SLOTS_PER_BLOCK - 1 - need_slots) {
slot = afs_dir_scan_block(block, name, b);
@@ -425,10 +429,10 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
goto found_dirent;
}
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
}
@@ -449,10 +453,10 @@ found_dirent:
/* Adjust the bitmap. */
afs_clear_contig_bits(block, slot, need_slots);
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
/* Adjust the allocation counter. */
@@ -464,9 +468,9 @@ found_dirent:
_debug("Remove %s from %u[%u]", name->name, b, slot);
out_unmap:
- unlock_page(page0);
- kunmap(page0);
- put_page(page0);
+ kunmap_local(meta);
+ folio_unlock(folio0);
+ folio_put(folio0);
_leave("");
return;
@@ -474,10 +478,10 @@ invalidated:
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_inval,
0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
goto out_unmap;
diff --git a/fs/afs/file.c b/fs/afs/file.c
index eb11d047c0ae62..cb6ad61eec3bf5 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -324,21 +324,24 @@ static int afs_symlink_readpage(struct file *file, struct page *page)
{
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
struct afs_read *fsreq;
+ struct folio *folio = page_folio(page);
int ret;
fsreq = afs_alloc_read(GFP_NOFS);
if (!fsreq)
return -ENOMEM;
- fsreq->pos = page->index * PAGE_SIZE;
- fsreq->len = PAGE_SIZE;
+ fsreq->pos = folio_pos(folio);
+ fsreq->len = folio_size(folio);
fsreq->vnode = vnode;
fsreq->iter = &fsreq->def_iter;
iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages,
fsreq->pos, fsreq->len);
ret = afs_fetch_data(fsreq->vnode, fsreq);
- page_endio(page, false, ret);
+ if (ret == 0)
+ SetPageUptodate(page);
+ unlock_page(page);
return ret;
}
@@ -362,7 +365,7 @@ static int afs_begin_cache_operation(struct netfs_read_request *rreq)
}
static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
- struct page *page, void **_fsdata)
+ struct folio *folio, void **_fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
@@ -385,7 +388,9 @@ const struct netfs_read_request_ops afs_req_ops = {
static int afs_readpage(struct file *file, struct page *page)
{
- return netfs_readpage(file, page, &afs_req_ops, NULL);
+ struct folio *folio = page_folio(page);
+
+ return netfs_readpage(file, folio, &afs_req_ops, NULL);
}
static void afs_readahead(struct readahead_control *ractl)
@@ -397,29 +402,29 @@ static void afs_readahead(struct readahead_control *ractl)
* Adjust the dirty region of the page on truncation or full invalidation,
* getting rid of the markers altogether if the region is entirely invalidated.
*/
-static void afs_invalidate_dirty(struct page *page, unsigned int offset,
+static void afs_invalidate_dirty(struct folio *folio, unsigned int offset,
unsigned int length)
{
- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+ struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
unsigned long priv;
unsigned int f, t, end = offset + length;
- priv = page_private(page);
+ priv = (unsigned long)folio_get_private(folio);
/* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == thp_size(page))
+ if (offset == 0 && length == folio_size(folio))
goto full_invalidate;
/* If the page was dirtied by page_mkwrite(), the PTE stays writable
* and we don't get another notification to tell us to expand it
* again.
*/
- if (afs_is_page_dirty_mmapped(priv))
+ if (afs_is_folio_dirty_mmapped(priv))
return;
/* We may need to shorten the dirty region */
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
if (t <= offset || f >= end)
return; /* Doesn't overlap */
@@ -437,17 +442,17 @@ static void afs_invalidate_dirty(struct page *page, unsigned int offset,
if (f == t)
goto undirty;
- priv = afs_page_dirty(page, f, t);
- set_page_private(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page);
+ priv = afs_folio_dirty(folio, f, t);
+ folio_change_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
return;
undirty:
- trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page);
- clear_page_dirty_for_io(page);
+ trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
+ folio_clear_dirty_for_io(folio);
full_invalidate:
- trace_afs_page_dirty(vnode, tracepoint_string("inval"), page);
- detach_page_private(page);
+ trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
+ folio_detach_private(folio);
}
/*
@@ -458,14 +463,16 @@ full_invalidate:
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
- _enter("{%lu},%u,%u", page->index, offset, length);
+ struct folio *folio = page_folio(page);
+
+ _enter("{%lu},%u,%u", folio_index(folio), offset, length);
BUG_ON(!PageLocked(page));
if (PagePrivate(page))
- afs_invalidate_dirty(page, offset, length);
+ afs_invalidate_dirty(folio, offset, length);
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
_leave("");
}
@@ -475,30 +482,31 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
*/
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+ struct folio *folio = page_folio(page);
+ struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
_enter("{{%llx:%llu}[%lu],%lx},%x",
- vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
+ vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
gfp_flags);
/* deny if page is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
- if (PageFsCache(page)) {
+ if (folio_test_fscache(folio)) {
if (!(gfp_flags & __GFP_DIRECT_RECLAIM) || !(gfp_flags & __GFP_FS))
return false;
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
}
#endif
- if (PagePrivate(page)) {
- trace_afs_page_dirty(vnode, tracepoint_string("rel"), page);
- detach_page_private(page);
+ if (folio_test_private(folio)) {
+ trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
+ folio_detach_private(folio);
}
- /* indicate that the page can be released */
+ /* Indicate that the folio can be released */
_leave(" = T");
- return 1;
+ return true;
}
static void afs_add_open_mmap(struct afs_vnode *vnode)
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 9357c53faa6931..aa4c0d6c9780cf 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -876,59 +876,59 @@ struct afs_vnode_cache_aux {
} __packed;
/*
- * We use page->private to hold the amount of the page that we've written to,
+ * We use folio->private to hold the amount of the folio that we've written to,
* splitting the field into two parts. However, we need to represent a range
- * 0...PAGE_SIZE, so we reduce the resolution if the size of the page
+ * 0...FOLIO_SIZE, so we reduce the resolution if the size of the folio
* exceeds what we can encode.
*/
#ifdef CONFIG_64BIT
-#define __AFS_PAGE_PRIV_MASK 0x7fffffffUL
-#define __AFS_PAGE_PRIV_SHIFT 32
-#define __AFS_PAGE_PRIV_MMAPPED 0x80000000UL
+#define __AFS_FOLIO_PRIV_MASK 0x7fffffffUL
+#define __AFS_FOLIO_PRIV_SHIFT 32
+#define __AFS_FOLIO_PRIV_MMAPPED 0x80000000UL
#else
-#define __AFS_PAGE_PRIV_MASK 0x7fffUL
-#define __AFS_PAGE_PRIV_SHIFT 16
-#define __AFS_PAGE_PRIV_MMAPPED 0x8000UL
+#define __AFS_FOLIO_PRIV_MASK 0x7fffUL
+#define __AFS_FOLIO_PRIV_SHIFT 16
+#define __AFS_FOLIO_PRIV_MMAPPED 0x8000UL
#endif
-static inline unsigned int afs_page_dirty_resolution(struct page *page)
+static inline unsigned int afs_folio_dirty_resolution(struct folio *folio)
{
- int shift = thp_order(page) + PAGE_SHIFT - (__AFS_PAGE_PRIV_SHIFT - 1);
+ int shift = folio_shift(folio) - (__AFS_FOLIO_PRIV_SHIFT - 1);
return (shift > 0) ? shift : 0;
}
-static inline size_t afs_page_dirty_from(struct page *page, unsigned long priv)
+static inline size_t afs_folio_dirty_from(struct folio *folio, unsigned long priv)
{
- unsigned long x = priv & __AFS_PAGE_PRIV_MASK;
+ unsigned long x = priv & __AFS_FOLIO_PRIV_MASK;
/* The lower bound is inclusive */
- return x << afs_page_dirty_resolution(page);
+ return x << afs_folio_dirty_resolution(folio);
}
-static inline size_t afs_page_dirty_to(struct page *page, unsigned long priv)
+static inline size_t afs_folio_dirty_to(struct folio *folio, unsigned long priv)
{
- unsigned long x = (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK;
+ unsigned long x = (priv >> __AFS_FOLIO_PRIV_SHIFT) & __AFS_FOLIO_PRIV_MASK;
/* The upper bound is immediately beyond the region */
- return (x + 1) << afs_page_dirty_resolution(page);
+ return (x + 1) << afs_folio_dirty_resolution(folio);
}
-static inline unsigned long afs_page_dirty(struct page *page, size_t from, size_t to)
+static inline unsigned long afs_folio_dirty(struct folio *folio, size_t from, size_t to)
{
- unsigned int res = afs_page_dirty_resolution(page);
+ unsigned int res = afs_folio_dirty_resolution(folio);
from >>= res;
to = (to - 1) >> res;
- return (to << __AFS_PAGE_PRIV_SHIFT) | from;
+ return (to << __AFS_FOLIO_PRIV_SHIFT) | from;
}
-static inline unsigned long afs_page_dirty_mmapped(unsigned long priv)
+static inline unsigned long afs_folio_dirty_mmapped(unsigned long priv)
{
- return priv | __AFS_PAGE_PRIV_MMAPPED;
+ return priv | __AFS_FOLIO_PRIV_MMAPPED;
}
-static inline bool afs_is_page_dirty_mmapped(unsigned long priv)
+static inline bool afs_is_folio_dirty_mmapped(unsigned long priv)
{
- return priv & __AFS_PAGE_PRIV_MMAPPED;
+ return priv & __AFS_FOLIO_PRIV_MMAPPED;
}
#include <trace/events/afs.h>
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 8b1d9c2f6bec24..ca4909baf5e6ca 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -32,7 +32,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
struct page **_page, void **fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
- struct page *page;
+ struct folio *folio;
unsigned long priv;
unsigned f, from;
unsigned t, to;
@@ -46,12 +46,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
* file. We need to do this before we get a lock on the page in case
* there's more than one writer competing for the same cache block.
*/
- ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata,
+ ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata,
&afs_req_ops, NULL);
if (ret < 0)
return ret;
- index = page->index;
+ index = folio_index(folio);
from = pos - index * PAGE_SIZE;
to = from + len;
@@ -59,14 +59,14 @@ try_again:
/* See if this page is already partially written in a way that we can
* merge the new write with.
*/
- if (PagePrivate(page)) {
- priv = page_private(page);
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ if (folio_test_private(folio)) {
+ priv = (unsigned long)folio_get_private(folio);
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
ASSERTCMP(f, <=, t);
- if (PageWriteback(page)) {
- trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page);
+ if (folio_test_writeback(folio)) {
+ trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
goto flush_conflicting_write;
}
/* If the file is being filled locally, allow inter-write
@@ -78,7 +78,7 @@ try_again:
goto flush_conflicting_write;
}
- *_page = page;
+ *_page = &folio->page;
_leave(" = 0");
return 0;
@@ -87,17 +87,17 @@ try_again:
*/
flush_conflicting_write:
_debug("flush conflict");
- ret = write_one_page(page);
+ ret = folio_write_one(folio);
if (ret < 0)
goto error;
- ret = lock_page_killable(page);
+ ret = folio_lock_killable(folio);
if (ret < 0)
goto error;
goto try_again;
error:
- put_page(page);
+ folio_put(folio);
_leave(" = %d", ret);
return ret;
}
@@ -107,24 +107,25 @@ error:
*/
int afs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct page *subpage, void *fsdata)
{
+ struct folio *folio = page_folio(subpage);
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
unsigned long priv;
- unsigned int f, from = pos & (thp_size(page) - 1);
+ unsigned int f, from = offset_in_folio(folio, pos);
unsigned int t, to = from + copied;
loff_t i_size, maybe_i_size;
_enter("{%llx:%llu},{%lx}",
- vnode->fid.vid, vnode->fid.vnode, page->index);
+ vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
if (copied < len) {
copied = 0;
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
if (copied == 0)
@@ -141,29 +142,29 @@ int afs_write_end(struct file *file, struct address_space *mapping,
write_sequnlock(&vnode->cb_lock);
}
- if (PagePrivate(page)) {
- priv = page_private(page);
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ if (folio_test_private(folio)) {
+ priv = (unsigned long)folio_get_private(folio);
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
if (from < f)
f = from;
if (to > t)
t = to;
- priv = afs_page_dirty(page, f, t);
- set_page_private(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page);
+ priv = afs_folio_dirty(folio, f, t);
+ folio_change_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
} else {
- priv = afs_page_dirty(page, from, to);
- attach_page_private(page, (void *)priv);
- trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page);
+ priv = afs_folio_dirty(folio, from, to);
+ folio_attach_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
}
- if (set_page_dirty(page))
- _debug("dirtied %lx", page->index);
+ if (folio_mark_dirty(folio))
+ _debug("dirtied %lx", folio_index(folio));
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return copied;
}
@@ -174,40 +175,32 @@ static void afs_kill_pages(struct address_space *mapping,
loff_t start, loff_t len)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct pagevec pv;
- unsigned int loop, psize;
+ struct folio *folio;
+ pgoff_t index = start / PAGE_SIZE;
+ pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
_enter("{%llx:%llu},%llx @%llx",
vnode->fid.vid, vnode->fid.vnode, len, start);
- pagevec_init(&pv);
-
do {
- _debug("kill %llx @%llx", len, start);
-
- pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
- PAGEVEC_SIZE, pv.pages);
- if (pv.nr == 0)
- break;
+ _debug("kill %lx (to %lx)", index, last);
- for (loop = 0; loop < pv.nr; loop++) {
- struct page *page = pv.pages[loop];
+ folio = filemap_get_folio(mapping, index);
+ if (!folio) {
+ next = index + 1;
+ continue;
+ }
- if (page->index * PAGE_SIZE >= start + len)
- break;
+ next = folio_next_index(folio);
- psize = thp_size(page);
- start += psize;
- len -= psize;
- ClearPageUptodate(page);
- end_page_writeback(page);
- lock_page(page);
- generic_error_remove_page(mapping, page);
- unlock_page(page);
- }
+ folio_clear_uptodate(folio);
+ folio_end_writeback(folio);
+ folio_lock(folio);
+ generic_error_remove_page(mapping, &folio->page);
+ folio_unlock(folio);
+ folio_put(folio);
- __pagevec_release(&pv);
- } while (len > 0);
+ } while (index = next, index <= last);
_leave("");
}
@@ -220,37 +213,27 @@ static void afs_redirty_pages(struct writeback_control *wbc,
loff_t start, loff_t len)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct pagevec pv;
- unsigned int loop, psize;
+ struct folio *folio;
+ pgoff_t index = start / PAGE_SIZE;
+ pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
_enter("{%llx:%llu},%llx @%llx",
vnode->fid.vid, vnode->fid.vnode, len, start);
- pagevec_init(&pv);
-
do {
_debug("redirty %llx @%llx", len, start);
- pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
- PAGEVEC_SIZE, pv.pages);
- if (pv.nr == 0)
- break;
-
- for (loop = 0; loop < pv.nr; loop++) {
- struct page *page = pv.pages[loop];
-
- if (page->index * PAGE_SIZE >= start + len)
- break;
-
- psize = thp_size(page);
- start += psize;
- len -= psize;
- redirty_page_for_writepage(wbc, page);
- end_page_writeback(page);
+ folio = filemap_get_folio(mapping, index);
+ if (!folio) {
+ next = index + 1;
+ continue;
}
- __pagevec_release(&pv);
- } while (len > 0);
+ next = index + folio_nr_pages(folio);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_end_writeback(folio);
+ folio_put(folio);
+ } while (index = next, index <= last);
_leave("");
}
@@ -261,7 +244,7 @@ static void afs_redirty_pages(struct writeback_control *wbc,
static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
{
struct address_space *mapping = vnode->vfs_inode.i_mapping;
- struct page *page;
+ struct folio *folio;
pgoff_t end;
XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
@@ -272,15 +255,16 @@ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsign
rcu_read_lock();
end = (start + len - 1) / PAGE_SIZE;
- xas_for_each(&xas, page, end) {
- if (!PageWriteback(page)) {
- kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end);
- ASSERT(PageWriteback(page));
+ xas_for_each(&xas, folio, end) {
+ if (!folio_test_writeback(folio)) {
+ kdebug("bad %x @%llx page %lx %lx",
+ len, start, folio_index(folio), end);
+ ASSERT(folio_test_writeback(folio));
}
- trace_afs_page_dirty(vnode, tracepoint_string("clear"), page);
- detach_page_private(page);
- page_endio(page, true, 0);
+ trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
+ folio_detach_private(folio);
+ folio_end_writeback(folio);
}
rcu_read_unlock();
@@ -437,7 +421,7 @@ static void afs_extend_writeback(struct address_space *mapping,
unsigned int *_len)
{
struct pagevec pvec;
- struct page *page;
+ struct folio *folio;
unsigned long priv;
unsigned int psize, filler = 0;
unsigned int f, t;
@@ -456,43 +440,43 @@ static void afs_extend_writeback(struct address_space *mapping,
*/
rcu_read_lock();
- xas_for_each(&xas, page, ULONG_MAX) {
+ xas_for_each(&xas, folio, ULONG_MAX) {
stop = true;
- if (xas_retry(&xas, page))
+ if (xas_retry(&xas, folio))
continue;
- if (xa_is_value(page))
+ if (xa_is_value(folio))
break;
- if (page->index != index)
+ if (folio_index(folio) != index)
break;
- if (!page_cache_get_speculative(page)) {
+ if (!folio_try_get_rcu(folio)) {
xas_reset(&xas);
continue;
}
/* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas))) {
- put_page(page);
+ if (unlikely(folio != xas_reload(&xas))) {
+ folio_put(folio);
break;
}
- if (!trylock_page(page)) {
- put_page(page);
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
break;
}
- if (!PageDirty(page) || PageWriteback(page)) {
- unlock_page(page);
- put_page(page);
+ if (!folio_test_dirty(folio) || folio_test_writeback(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
- psize = thp_size(page);
- priv = page_private(page);
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ psize = folio_size(folio);
+ priv = (unsigned long)folio_get_private(folio);
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
if (f != 0 && !new_content) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
@@ -503,8 +487,8 @@ static void afs_extend_writeback(struct address_space *mapping,
else if (t == psize || new_content)
stop = false;
- index += thp_nr_pages(page);
- if (!pagevec_add(&pvec, page))
+ index += folio_nr_pages(folio);
+ if (!pagevec_add(&pvec, &folio->page))
break;
if (stop)
break;
@@ -521,16 +505,16 @@ static void afs_extend_writeback(struct address_space *mapping,
break;
for (i = 0; i < pagevec_count(&pvec); i++) {
- page = pvec.pages[i];
- trace_afs_page_dirty(vnode, tracepoint_string("store+"), page);
+ folio = page_folio(pvec.pages[i]);
+ trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
BUG();
- if (test_set_page_writeback(page))
+ if (folio_start_writeback(folio))
BUG();
- *_count -= thp_nr_pages(page);
- unlock_page(page);
+ *_count -= folio_nr_pages(folio);
+ folio_unlock(folio);
}
pagevec_release(&pvec);
@@ -544,10 +528,10 @@ static void afs_extend_writeback(struct address_space *mapping,
* Synchronously write back the locked page and any subsequent non-locked dirty
* pages.
*/
-static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
- struct writeback_control *wbc,
- struct page *page,
- loff_t start, loff_t end)
+static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct folio *folio,
+ loff_t start, loff_t end)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
struct iov_iter iter;
@@ -558,22 +542,22 @@ static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
long count = wbc->nr_to_write;
int ret;
- _enter(",%lx,%llx-%llx", page->index, start, end);
+ _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
- if (test_set_page_writeback(page))
+ if (folio_start_writeback(folio))
BUG();
- count -= thp_nr_pages(page);
+ count -= folio_nr_pages(folio);
/* Find all consecutive lockable dirty pages that have contiguous
* written regions, stopping when we find a page that is not
* immediately lockable, is not dirty or is missing, or we reach the
* end of the range.
*/
- priv = page_private(page);
- offset = afs_page_dirty_from(page, priv);
- to = afs_page_dirty_to(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("store"), page);
+ priv = (unsigned long)folio_get_private(folio);
+ offset = afs_folio_dirty_from(folio, priv);
+ to = afs_folio_dirty_to(folio, priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
len = to - offset;
start += offset;
@@ -586,7 +570,7 @@ static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
max_len = min_t(unsigned long long, max_len, i_size - start);
if (len < max_len &&
- (to == thp_size(page) || new_content))
+ (to == folio_size(folio) || new_content))
afs_extend_writeback(mapping, vnode, &count,
start, max_len, new_content, &len);
len = min_t(loff_t, len, max_len);
@@ -596,7 +580,7 @@ static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
* set; the first page is still locked at this point, but all the rest
* have been unlocked.
*/
- unlock_page(page);
+ folio_unlock(folio);
if (start < i_size) {
_debug("write back %x @%llx [%llx]", len, start, i_size);
@@ -657,16 +641,17 @@ static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
* write a page back to the server
* - the caller locked the page for us
*/
-int afs_writepage(struct page *page, struct writeback_control *wbc)
+int afs_writepage(struct page *subpage, struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(subpage);
ssize_t ret;
loff_t start;
- _enter("{%lx},", page->index);
+ _enter("{%lx},", folio_index(folio));
- start = page->index * PAGE_SIZE;
- ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
- start, LLONG_MAX - start);
+ start = folio_index(folio) * PAGE_SIZE;
+ ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc,
+ folio, start, LLONG_MAX - start);
if (ret < 0) {
_leave(" = %zd", ret);
return ret;
@@ -683,7 +668,8 @@ static int afs_writepages_region(struct address_space *mapping,
struct writeback_control *wbc,
loff_t start, loff_t end, loff_t *_next)
{
- struct page *page;
+ struct folio *folio;
+ struct page *head_page;
ssize_t ret;
int n;
@@ -693,13 +679,14 @@ static int afs_writepages_region(struct address_space *mapping,
pgoff_t index = start / PAGE_SIZE;
n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
- PAGECACHE_TAG_DIRTY, 1, &page);
+ PAGECACHE_TAG_DIRTY, 1, &head_page);
if (!n)
break;
- start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */
+ folio = page_folio(head_page);
+ start = folio_pos(folio); /* May regress with THPs */
- _debug("wback %lx", page->index);
+ _debug("wback %lx", folio_index(folio));
/* At this point we hold neither the i_pages lock nor the
* page lock: the page may be truncated or invalidated
@@ -707,37 +694,38 @@ static int afs_writepages_region(struct address_space *mapping,
* back from swapper_space to tmpfs file mapping
*/
if (wbc->sync_mode != WB_SYNC_NONE) {
- ret = lock_page_killable(page);
+ ret = folio_lock_killable(folio);
if (ret < 0) {
- put_page(page);
+ folio_put(folio);
return ret;
}
} else {
- if (!trylock_page(page)) {
- put_page(page);
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
return 0;
}
}
- if (page->mapping != mapping || !PageDirty(page)) {
- start += thp_size(page);
- unlock_page(page);
- put_page(page);
+ if (folio_mapping(folio) != mapping ||
+ !folio_test_dirty(folio)) {
+ start += folio_size(folio);
+ folio_unlock(folio);
+ folio_put(folio);
continue;
}
- if (PageWriteback(page)) {
- unlock_page(page);
+ if (folio_test_writeback(folio)) {
+ folio_unlock(folio);
if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
- put_page(page);
+ folio_wait_writeback(folio);
+ folio_put(folio);
continue;
}
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
BUG();
- ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end);
- put_page(page);
+ ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
+ folio_put(folio);
if (ret < 0) {
_leave(" = %zd", ret);
return ret;
@@ -862,7 +850,6 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
{
struct folio *folio = page_folio(vmf->page);
- struct page *page = &folio->page;
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
@@ -870,7 +857,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
unsigned long priv;
vm_fault_t ret = VM_FAULT_RETRY;
- _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
+ _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
afs_validate(vnode, af->key);
@@ -880,18 +867,18 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
* be modified. We then assume the entire page will need writing back.
*/
#ifdef CONFIG_AFS_FSCACHE
- if (PageFsCache(page) &&
- wait_on_page_fscache_killable(page) < 0)
+ if (folio_test_fscache(folio) &&
+ folio_wait_fscache_killable(folio) < 0)
goto out;
#endif
if (folio_wait_writeback_killable(folio))
goto out;
- if (lock_page_killable(page) < 0)
+ if (folio_lock_killable(folio) < 0)
goto out;
- /* We mustn't change page->private until writeback is complete as that
+ /* We mustn't change folio->private until writeback is complete as that
* details the portion of the page we need to write back and we might
* need to redirty the page if there's a problem.
*/
@@ -900,14 +887,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
goto out;
}
- priv = afs_page_dirty(page, 0, thp_size(page));
- priv = afs_page_dirty_mmapped(priv);
- if (PagePrivate(page)) {
- set_page_private(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page);
+ priv = afs_folio_dirty(folio, 0, folio_size(folio));
+ priv = afs_folio_dirty_mmapped(priv);
+ if (folio_test_private(folio)) {
+ folio_change_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
} else {
- attach_page_private(page, (void *)priv);
- trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page);
+ folio_attach_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
}
file_update_time(file);
@@ -948,38 +935,38 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
/*
* Clean up a page during invalidation.
*/
-int afs_launder_page(struct page *page)
+int afs_launder_page(struct page *subpage)
{
- struct address_space *mapping = page->mapping;
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+ struct folio *folio = page_folio(subpage);
+ struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
struct iov_iter iter;
struct bio_vec bv[1];
unsigned long priv;
unsigned int f, t;
int ret = 0;
- _enter("{%lx}", page->index);
+ _enter("{%lx}", folio_index(folio));
- priv = page_private(page);
- if (clear_page_dirty_for_io(page)) {
+ priv = (unsigned long)folio_get_private(folio);
+ if (folio_clear_dirty_for_io(folio)) {
f = 0;
- t = thp_size(page);
- if (PagePrivate(page)) {
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ t = folio_size(folio);
+ if (folio_test_private(folio)) {
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
}
- bv[0].bv_page = page;
+ bv[0].bv_page = &folio->page;
bv[0].bv_offset = f;
bv[0].bv_len = t - f;
iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
- trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
- ret = afs_store_data(vnode, &iter, page_offset(page) + f, true);
+ trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
+ ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
}
- trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
- detach_page_private(page);
- wait_on_page_fscache(page);
+ trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
+ folio_detach_private(folio);
+ folio_wait_fscache(folio);
return ret;
}
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index f06b68040352e6..fc42dd0badd7c6 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -28,10 +28,10 @@
/* 307s to avoid pathologically clashing with transaction commit */
#define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
-static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
+static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
size_t src_len)
{
- ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
+ zstd_parameters params = zstd_get_params(level, src_len);
if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
@@ -48,8 +48,8 @@ struct workspace {
unsigned long last_used; /* jiffies */
struct list_head list;
struct list_head lru_list;
- ZSTD_inBuffer in_buf;
- ZSTD_outBuffer out_buf;
+ zstd_in_buffer in_buf;
+ zstd_out_buffer out_buf;
};
/*
@@ -155,12 +155,12 @@ static void zstd_calc_ws_mem_sizes(void)
unsigned int level;
for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
- ZSTD_parameters params =
+ zstd_parameters params =
zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
size_t level_size =
max_t(size_t,
- ZSTD_CStreamWorkspaceBound(params.cParams),
- ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
+ zstd_cstream_workspace_bound(&params.cParams),
+ zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
max_size = max_t(size_t, max_size, level_size);
zstd_ws_mem_sizes[level - 1] = max_size;
@@ -371,7 +371,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- ZSTD_CStream *stream;
+ zstd_cstream *stream;
int ret = 0;
int nr_pages = 0;
struct page *in_page = NULL; /* The current page to read */
@@ -381,7 +381,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
unsigned long len = *total_out;
const unsigned long nr_dest_pages = *out_pages;
unsigned long max_out = nr_dest_pages * PAGE_SIZE;
- ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
+ zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
len);
*out_pages = 0;
@@ -389,10 +389,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_in = 0;
/* Initialize the stream */
- stream = ZSTD_initCStream(params, len, workspace->mem,
+ stream = zstd_init_cstream(&params, len, workspace->mem,
workspace->size);
if (!stream) {
- pr_warn("BTRFS: ZSTD_initCStream failed\n");
+ pr_warn("BTRFS: zstd_init_cstream failed\n");
ret = -EIO;
goto out;
}
@@ -418,11 +418,11 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
while (1) {
size_t ret2;
- ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
+ ret2 = zstd_compress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ if (zstd_is_error(ret2)) {
+ pr_debug("BTRFS: zstd_compress_stream returned %d\n",
+ zstd_get_error_code(ret2));
ret = -EIO;
goto out;
}
@@ -487,10 +487,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
while (1) {
size_t ret2;
- ret2 = ZSTD_endStream(stream, &workspace->out_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_endStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ ret2 = zstd_end_stream(stream, &workspace->out_buf);
+ if (zstd_is_error(ret2)) {
+ pr_debug("BTRFS: zstd_end_stream returned %d\n",
+ zstd_get_error_code(ret2));
ret = -EIO;
goto out;
}
@@ -548,17 +548,17 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
struct workspace *workspace = list_entry(ws, struct workspace, list);
struct page **pages_in = cb->compressed_pages;
size_t srclen = cb->compressed_len;
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
int ret = 0;
unsigned long page_in_index = 0;
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long total_out = 0;
- stream = ZSTD_initDStream(
+ stream = zstd_init_dstream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
if (!stream) {
- pr_debug("BTRFS: ZSTD_initDStream failed\n");
+ pr_debug("BTRFS: zstd_init_dstream failed\n");
ret = -EIO;
goto done;
}
@@ -574,11 +574,11 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
while (1) {
size_t ret2;
- ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
+ ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ if (zstd_is_error(ret2)) {
+ pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
+ zstd_get_error_code(ret2));
ret = -EIO;
goto done;
}
@@ -624,16 +624,16 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
int ret = 0;
size_t ret2;
unsigned long total_out = 0;
unsigned long pg_offset = 0;
- stream = ZSTD_initDStream(
+ stream = zstd_init_dstream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
if (!stream) {
- pr_warn("BTRFS: ZSTD_initDStream failed\n");
+ pr_warn("BTRFS: zstd_init_dstream failed\n");
ret = -EIO;
goto finish;
}
@@ -657,15 +657,15 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
/* Check if the frame is over and we still need more input */
if (ret2 == 0) {
- pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
+ pr_debug("BTRFS: zstd_decompress_stream ended early\n");
ret = -EIO;
goto finish;
}
- ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
+ ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ if (zstd_is_error(ret2)) {
+ pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
+ zstd_get_error_code(ret2));
ret = -EIO;
goto finish;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 99b80b5c7a931c..e53c8541f5b234 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -63,7 +63,7 @@
(CONGESTION_ON_THRESH(congestion_kb) >> 2))
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
- struct page *page, void **_fsdata);
+ struct folio *folio, void **_fsdata);
static inline struct ceph_snap_context *page_snap_context(struct page *page)
{
@@ -317,13 +317,14 @@ static const struct netfs_read_request_ops ceph_netfs_read_ops = {
};
/* read a single page, without unlocking it. */
-static int ceph_readpage(struct file *file, struct page *page)
+static int ceph_readpage(struct file *file, struct page *subpage)
{
+ struct folio *folio = page_folio(subpage);
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_vino vino = ceph_vino(inode);
- u64 off = page_offset(page);
- u64 len = thp_size(page);
+ size_t len = folio_size(folio);
+ u64 off = folio_file_pos(folio);
if (ci->i_inline_version != CEPH_INLINE_NONE) {
/*
@@ -331,19 +332,19 @@ static int ceph_readpage(struct file *file, struct page *page)
* into page cache while getting Fcr caps.
*/
if (off == 0) {
- unlock_page(page);
+ folio_unlock(folio);
return -EINVAL;
}
- zero_user_segment(page, 0, thp_size(page));
- SetPageUptodate(page);
- unlock_page(page);
+ zero_user_segment(&folio->page, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return 0;
}
- dout("readpage ino %llx.%llx file %p off %llu len %llu page %p index %lu\n",
- vino.ino, vino.snap, file, off, len, page, page->index);
+ dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n",
+ vino.ino, vino.snap, file, off, len, folio, folio_index(folio));
- return netfs_readpage(file, page, &ceph_netfs_read_ops, NULL);
+ return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL);
}
static void ceph_readahead(struct readahead_control *ractl)
@@ -724,7 +725,7 @@ static int ceph_writepages_start(struct address_space *mapping,
wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
(wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
+ if (ceph_inode_is_shutdown(inode)) {
if (ci->i_wrbuffer_ref > 0) {
pr_warn_ratelimited(
"writepage_start %p %lld forced umount\n",
@@ -1145,12 +1146,12 @@ static struct ceph_snap_context *
ceph_find_incompatible(struct page *page)
{
struct inode *inode = page->mapping->host;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- dout(" page %p forced umount\n", page);
- return ERR_PTR(-EIO);
+ if (ceph_inode_is_shutdown(inode)) {
+ dout(" page %p %llx:%llx is shutdown\n", page,
+ ceph_vinop(inode));
+ return ERR_PTR(-ESTALE);
}
for (;;) {
@@ -1187,18 +1188,18 @@ ceph_find_incompatible(struct page *page)
}
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
- struct page *page, void **_fsdata)
+ struct folio *folio, void **_fsdata)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc;
- snapc = ceph_find_incompatible(page);
+ snapc = ceph_find_incompatible(folio_page(folio, 0));
if (snapc) {
int r;
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (IS_ERR(snapc))
return PTR_ERR(snapc);
@@ -1216,12 +1217,12 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
* clean, or already dirty within the same snap context.
*/
static int ceph_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len, unsigned aop_flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct page *page = NULL;
+ struct folio *folio = NULL;
pgoff_t index = pos >> PAGE_SHIFT;
int r;
@@ -1230,39 +1231,43 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
* for inline_version sent to the MDS.
*/
if (ci->i_inline_version != CEPH_INLINE_NONE) {
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
+ unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
+ if (aop_flags & AOP_FLAG_NOFS)
+ fgp_flags |= FGP_NOFS;
+ folio = __filemap_get_folio(mapping, index, fgp_flags,
+ mapping_gfp_mask(mapping));
+ if (!folio)
return -ENOMEM;
/*
* The inline_version on a new inode is set to 1. If that's the
- * case, then the page is brand new and isn't yet Uptodate.
+ * case, then the folio is brand new and isn't yet Uptodate.
*/
r = 0;
if (index == 0 && ci->i_inline_version != 1) {
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n",
ci->i_inline_version);
r = -EINVAL;
}
goto out;
}
- zero_user_segment(page, 0, thp_size(page));
- SetPageUptodate(page);
+ zero_user_segment(&folio->page, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
goto out;
}
- r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &page, NULL,
+ r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL,
&ceph_netfs_read_ops, NULL);
out:
if (r == 0)
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
if (r < 0) {
- if (page)
- put_page(page);
+ if (folio)
+ folio_put(folio);
} else {
- WARN_ON_ONCE(!PageLocked(page));
- *pagep = page;
+ WARN_ON_ONCE(!folio_test_locked(folio));
+ *pagep = &folio->page;
}
return r;
}
@@ -1273,32 +1278,33 @@ out:
*/
static int ceph_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct page *subpage, void *fsdata)
{
+ struct folio *folio = page_folio(subpage);
struct inode *inode = file_inode(file);
bool check_cap = false;
- dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
- inode, page, (int)pos, (int)copied, (int)len);
+ dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file,
+ inode, folio, (int)pos, (int)copied, (int)len);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
/* just return that nothing was copied on a short copy */
if (copied < len) {
copied = 0;
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
/* did file size increase? */
if (pos+copied > i_size_read(inode))
check_cap = ceph_inode_set_size(inode, pos+copied);
- set_page_dirty(page);
+ folio_mark_dirty(folio);
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (check_cap)
ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
@@ -1306,17 +1312,6 @@ out:
return copied;
}
-/*
- * we set .direct_IO to indicate direct io is supported, but since we
- * intercept O_DIRECT reads and writes early, this function should
- * never get called.
- */
-static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
const struct address_space_operations ceph_aops = {
.readpage = ceph_readpage,
.readahead = ceph_readahead,
@@ -1327,7 +1322,7 @@ const struct address_space_operations ceph_aops = {
.set_page_dirty = ceph_set_page_dirty,
.invalidatepage = ceph_invalidatepage,
.releasepage = ceph_releasepage,
- .direct_IO = ceph_direct_io,
+ .direct_IO = noop_direct_IO,
};
static void ceph_block_sigs(sigset_t *oldset)
@@ -1356,6 +1351,9 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
sigset_t oldset;
vm_fault_t ret = VM_FAULT_SIGBUS;
+ if (ceph_inode_is_shutdown(inode))
+ return ret;
+
ceph_block_sigs(&oldset);
dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
@@ -1447,6 +1445,9 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
sigset_t oldset;
vm_fault_t ret = VM_FAULT_SIGBUS;
+ if (ceph_inode_is_shutdown(inode))
+ return ret;
+
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
return VM_FAULT_OOM;
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 9cfadbb8656855..457afda5498aa4 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -12,12 +12,6 @@
#include "super.h"
#include "cache.h"
-struct ceph_aux_inode {
- u64 version;
- u64 mtime_sec;
- u64 mtime_nsec;
-};
-
struct fscache_netfs ceph_cache_netfs = {
.name = "ceph",
.version = 0,
@@ -109,20 +103,14 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux(
void *cookie_netfs_data, const void *data, uint16_t dlen,
loff_t object_size)
{
- struct ceph_aux_inode aux;
struct ceph_inode_info* ci = cookie_netfs_data;
struct inode* inode = &ci->vfs_inode;
- if (dlen != sizeof(aux) ||
+ if (dlen != sizeof(ci->i_version) ||
i_size_read(inode) != object_size)
return FSCACHE_CHECKAUX_OBSOLETE;
- memset(&aux, 0, sizeof(aux));
- aux.version = ci->i_version;
- aux.mtime_sec = inode->i_mtime.tv_sec;
- aux.mtime_nsec = inode->i_mtime.tv_nsec;
-
- if (memcmp(data, &aux, sizeof(aux)) != 0)
+ if (*(u64 *)data != ci->i_version)
return FSCACHE_CHECKAUX_OBSOLETE;
dout("ceph inode 0x%p cached okay\n", ci);
@@ -139,7 +127,6 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_aux_inode aux;
/* No caching for filesystem */
if (!fsc->fscache)
@@ -151,14 +138,10 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
inode_lock_nested(inode, I_MUTEX_CHILD);
if (!ci->fscache) {
- memset(&aux, 0, sizeof(aux));
- aux.version = ci->i_version;
- aux.mtime_sec = inode->i_mtime.tv_sec;
- aux.mtime_nsec = inode->i_mtime.tv_nsec;
ci->fscache = fscache_acquire_cookie(fsc->fscache,
&ceph_fscache_inode_object_def,
&ci->i_vino, sizeof(ci->i_vino),
- &aux, sizeof(aux),
+ &ci->i_version, sizeof(ci->i_version),
ci, i_size_read(inode), false);
}
inode_unlock(inode);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 8f537f1d9d1d34..b9460b6fb76f7c 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1188,11 +1188,11 @@ void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
lockdep_assert_held(&ci->i_ceph_lock);
- fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ fsc = ceph_inode_to_client(&ci->vfs_inode);
WARN_ON_ONCE(ci->i_auth_cap == cap &&
!list_empty(&ci->i_dirty_item) &&
!fsc->blocklisted &&
- READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
+ !ceph_inode_is_shutdown(&ci->vfs_inode));
__ceph_remove_cap(cap, queue_release);
}
@@ -1968,8 +1968,8 @@ retry:
}
}
- dout("check_caps %p file_want %s used %s dirty %s flushing %s"
- " issued %s revoking %s retain %s %s%s\n", inode,
+ dout("check_caps %llx.%llx file_want %s used %s dirty %s flushing %s"
+ " issued %s revoking %s retain %s %s%s\n", ceph_vinop(inode),
ceph_cap_string(file_wanted),
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
ceph_cap_string(ci->i_flushing_caps),
@@ -1990,7 +1990,8 @@ retry:
(revoking & (CEPH_CAP_FILE_CACHE|
CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
!tried_invalidate) {
- dout("check_caps trying to invalidate on %p\n", inode);
+ dout("check_caps trying to invalidate on %llx.%llx\n",
+ ceph_vinop(inode));
if (try_nonblocking_invalidate(inode) < 0) {
dout("check_caps queuing invalidate\n");
queue_invalidate = true;
@@ -2629,9 +2630,9 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
*
* Returns 0 if caps were not able to be acquired (yet), 1 if succeed,
* or a negative error code. There are 3 speical error codes:
- * -EAGAIN: need to sleep but non-blocking is specified
- * -EFBIG: ask caller to call check_max_size() and try again.
- * -ESTALE: ask caller to call ceph_renew_caps() and try again.
+ * -EAGAIN: need to sleep but non-blocking is specified
+ * -EFBIG: ask caller to call check_max_size() and try again.
+ * -EUCLEAN: ask caller to call ceph_renew_caps() and try again.
*/
enum {
/* first 8 bits are reserved for CEPH_FILE_MODE_FOO */
@@ -2679,7 +2680,7 @@ again:
dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
inode, endoff, ci->i_max_size);
if (endoff > ci->i_requested_max_size)
- ret = ci->i_auth_cap ? -EFBIG : -ESTALE;
+ ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN;
goto out_unlock;
}
/*
@@ -2749,9 +2750,9 @@ again:
goto out_unlock;
}
- if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- dout("get_cap_refs %p forced umount\n", inode);
- ret = -EIO;
+ if (ceph_inode_is_shutdown(inode)) {
+ dout("get_cap_refs %p inode is shutdown\n", inode);
+ ret = -ESTALE;
goto out_unlock;
}
mds_wanted = __ceph_caps_mds_wanted(ci, false);
@@ -2759,7 +2760,7 @@ again:
dout("get_cap_refs %p need %s > mds_wanted %s\n",
inode, ceph_cap_string(need),
ceph_cap_string(mds_wanted));
- ret = -ESTALE;
+ ret = -EUCLEAN;
goto out_unlock;
}
@@ -2843,7 +2844,7 @@ int ceph_try_get_caps(struct inode *inode, int need, int want,
ret = try_get_cap_refs(inode, need, want, 0, flags, got);
/* three special error codes */
- if (ret == -EAGAIN || ret == -EFBIG || ret == -ESTALE)
+ if (ret == -EAGAIN || ret == -EFBIG || ret == -EUCLEAN)
ret = 0;
return ret;
}
@@ -2926,7 +2927,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
}
if (ret < 0) {
- if (ret == -EFBIG || ret == -ESTALE) {
+ if (ret == -EFBIG || ret == -EUCLEAN) {
int ret2 = ceph_wait_on_async_create(inode);
if (ret2 < 0)
return ret2;
@@ -2935,7 +2936,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
check_max_size(inode, endoff);
continue;
}
- if (ret == -ESTALE) {
+ if (ret == -EUCLEAN) {
/* session was killed, try renew caps */
ret = ceph_renew_caps(inode, flags);
if (ret == 0)
@@ -4315,7 +4316,7 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s)
i_dirty_item);
inode = &ci->vfs_inode;
ihold(inode);
- dout("flush_dirty_caps %p\n", inode);
+ dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode));
spin_unlock(&mdsc->cap_dirty_lock);
ceph_check_caps(ci, CHECK_CAPS_FLUSH, NULL);
iput(inode);
@@ -4560,3 +4561,119 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
spin_unlock(&dentry->d_lock);
return ret;
}
+
+static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_cap_snap *capsnap;
+ int capsnap_release = 0;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
+
+ while (!list_empty(&ci->i_cap_snaps)) {
+ capsnap = list_first_entry(&ci->i_cap_snaps,
+ struct ceph_cap_snap, ci_item);
+ __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
+ ceph_put_snap_context(capsnap->context);
+ ceph_put_cap_snap(capsnap);
+ capsnap_release++;
+ }
+ wake_up_all(&ci->i_cap_wq);
+ wake_up_all(&mdsc->cap_flushing_wq);
+ return capsnap_release;
+}
+
+int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate)
+{
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_auth;
+ bool dirty_dropped = false;
+ int iputs = 0;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ dout("removing cap %p, ci is %p, inode is %p\n",
+ cap, ci, &ci->vfs_inode);
+
+ is_auth = (cap == ci->i_auth_cap);
+ __ceph_remove_cap(cap, false);
+ if (is_auth) {
+ struct ceph_cap_flush *cf;
+
+ if (ceph_inode_is_shutdown(inode)) {
+ if (inode->i_data.nrpages > 0)
+ *invalidate = true;
+ if (ci->i_wrbuffer_ref > 0)
+ mapping_set_error(&inode->i_data, -EIO);
+ }
+
+ spin_lock(&mdsc->cap_dirty_lock);
+
+ /* trash all of the cap flushes for this inode */
+ while (!list_empty(&ci->i_cap_flush_list)) {
+ cf = list_first_entry(&ci->i_cap_flush_list,
+ struct ceph_cap_flush, i_list);
+ list_del_init(&cf->g_list);
+ list_del_init(&cf->i_list);
+ if (!cf->is_capsnap)
+ ceph_free_cap_flush(cf);
+ }
+
+ if (!list_empty(&ci->i_dirty_item)) {
+ pr_warn_ratelimited(
+ " dropping dirty %s state for %p %lld\n",
+ ceph_cap_string(ci->i_dirty_caps),
+ inode, ceph_ino(inode));
+ ci->i_dirty_caps = 0;
+ list_del_init(&ci->i_dirty_item);
+ dirty_dropped = true;
+ }
+ if (!list_empty(&ci->i_flushing_item)) {
+ pr_warn_ratelimited(
+ " dropping dirty+flushing %s state for %p %lld\n",
+ ceph_cap_string(ci->i_flushing_caps),
+ inode, ceph_ino(inode));
+ ci->i_flushing_caps = 0;
+ list_del_init(&ci->i_flushing_item);
+ mdsc->num_cap_flushing--;
+ dirty_dropped = true;
+ }
+ spin_unlock(&mdsc->cap_dirty_lock);
+
+ if (dirty_dropped) {
+ mapping_set_error(inode->i_mapping, -EIO);
+
+ if (ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ }
+ }
+
+ if (atomic_read(&ci->i_filelock_ref) > 0) {
+ /* make further file lock syscall return -EIO */
+ ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
+ pr_warn_ratelimited(" dropping file locks for %p %lld\n",
+ inode, ceph_ino(inode));
+ }
+
+ if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
+ cf = ci->i_prealloc_cap_flush;
+ ci->i_prealloc_cap_flush = NULL;
+ if (!cf->is_capsnap)
+ ceph_free_cap_flush(cf);
+ }
+
+ if (!list_empty(&ci->i_cap_snaps))
+ iputs = remove_capsnaps(mdsc, inode);
+ }
+ if (dirty_dropped)
+ ++iputs;
+ return iputs;
+}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 38b78b45811f91..3cf7c9c1085b91 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -146,82 +146,93 @@ static int mdsc_show(struct seq_file *s, void *p)
name, total, avg, _min, max, sum); \
}
-static int metric_show(struct seq_file *s, void *p)
+static int metrics_file_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
- struct ceph_mds_client *mdsc = fsc->mdsc;
- struct ceph_client_metric *m = &mdsc->metric;
- int nr_caps = 0;
- s64 total, sum, avg, min, max, sq;
- u64 sum_sz, avg_sz, min_sz, max_sz;
+ struct ceph_client_metric *m = &fsc->mdsc->metric;
- sum = percpu_counter_sum(&m->total_inodes);
seq_printf(s, "item total\n");
seq_printf(s, "------------------------------------------\n");
- seq_printf(s, "%-35s%lld / %lld\n", "opened files / total inodes",
- atomic64_read(&m->opened_files), sum);
- seq_printf(s, "%-35s%lld / %lld\n", "pinned i_caps / total inodes",
- atomic64_read(&m->total_caps), sum);
- seq_printf(s, "%-35s%lld / %lld\n", "opened inodes / total inodes",
- percpu_counter_sum(&m->opened_inodes), sum);
-
- seq_printf(s, "\n");
+ seq_printf(s, "%-35s%lld\n", "total inodes",
+ percpu_counter_sum(&m->total_inodes));
+ seq_printf(s, "%-35s%lld\n", "opened files",
+ atomic64_read(&m->opened_files));
+ seq_printf(s, "%-35s%lld\n", "pinned i_caps",
+ atomic64_read(&m->total_caps));
+ seq_printf(s, "%-35s%lld\n", "opened inodes",
+ percpu_counter_sum(&m->opened_inodes));
+ return 0;
+}
+
+static const char * const metric_str[] = {
+ "read",
+ "write",
+ "metadata",
+ "copyfrom"
+};
+static int metrics_latency_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_client_metric *cm = &fsc->mdsc->metric;
+ struct ceph_metric *m;
+ s64 total, sum, avg, min, max, sq;
+ int i;
+
seq_printf(s, "item total avg_lat(us) min_lat(us) max_lat(us) stdev(us)\n");
seq_printf(s, "-----------------------------------------------------------------------------------\n");
- spin_lock(&m->read_metric_lock);
- total = m->total_reads;
- sum = m->read_latency_sum;
- avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
- min = m->read_latency_min;
- max = m->read_latency_max;
- sq = m->read_latency_sq_sum;
- spin_unlock(&m->read_metric_lock);
- CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
-
- spin_lock(&m->write_metric_lock);
- total = m->total_writes;
- sum = m->write_latency_sum;
- avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
- min = m->write_latency_min;
- max = m->write_latency_max;
- sq = m->write_latency_sq_sum;
- spin_unlock(&m->write_metric_lock);
- CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
-
- spin_lock(&m->metadata_metric_lock);
- total = m->total_metadatas;
- sum = m->metadata_latency_sum;
- avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
- min = m->metadata_latency_min;
- max = m->metadata_latency_max;
- sq = m->metadata_latency_sq_sum;
- spin_unlock(&m->metadata_metric_lock);
- CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
-
- seq_printf(s, "\n");
+ for (i = 0; i < METRIC_MAX; i++) {
+ m = &cm->metric[i];
+ spin_lock(&m->lock);
+ total = m->total;
+ sum = m->latency_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->latency_min;
+ max = m->latency_max;
+ sq = m->latency_sq_sum;
+ spin_unlock(&m->lock);
+ CEPH_LAT_METRIC_SHOW(metric_str[i], total, avg, min, max, sq);
+ }
+
+ return 0;
+}
+
+static int metrics_size_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_client_metric *cm = &fsc->mdsc->metric;
+ struct ceph_metric *m;
+ s64 total;
+ u64 sum, avg, min, max;
+ int i;
+
seq_printf(s, "item total avg_sz(bytes) min_sz(bytes) max_sz(bytes) total_sz(bytes)\n");
seq_printf(s, "----------------------------------------------------------------------------------------\n");
- spin_lock(&m->read_metric_lock);
- total = m->total_reads;
- sum_sz = m->read_size_sum;
- avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
- min_sz = m->read_size_min;
- max_sz = m->read_size_max;
- spin_unlock(&m->read_metric_lock);
- CEPH_SZ_METRIC_SHOW("read", total, avg_sz, min_sz, max_sz, sum_sz);
-
- spin_lock(&m->write_metric_lock);
- total = m->total_writes;
- sum_sz = m->write_size_sum;
- avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
- min_sz = m->write_size_min;
- max_sz = m->write_size_max;
- spin_unlock(&m->write_metric_lock);
- CEPH_SZ_METRIC_SHOW("write", total, avg_sz, min_sz, max_sz, sum_sz);
-
- seq_printf(s, "\n");
+ for (i = 0; i < METRIC_MAX; i++) {
+ /* skip 'metadata' as it doesn't use the size metric */
+ if (i == METRIC_METADATA)
+ continue;
+ m = &cm->metric[i];
+ spin_lock(&m->lock);
+ total = m->total;
+ sum = m->size_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->size_min;
+ max = m->size_max;
+ spin_unlock(&m->lock);
+ CEPH_SZ_METRIC_SHOW(metric_str[i], total, avg, min, max, sum);
+ }
+
+ return 0;
+}
+
+static int metrics_caps_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_client_metric *m = &fsc->mdsc->metric;
+ int nr_caps = 0;
+
seq_printf(s, "item total miss hit\n");
seq_printf(s, "-------------------------------------------------\n");
@@ -350,8 +361,11 @@ DEFINE_SHOW_ATTRIBUTE(mdsmap);
DEFINE_SHOW_ATTRIBUTE(mdsc);
DEFINE_SHOW_ATTRIBUTE(caps);
DEFINE_SHOW_ATTRIBUTE(mds_sessions);
-DEFINE_SHOW_ATTRIBUTE(metric);
DEFINE_SHOW_ATTRIBUTE(status);
+DEFINE_SHOW_ATTRIBUTE(metrics_file);
+DEFINE_SHOW_ATTRIBUTE(metrics_latency);
+DEFINE_SHOW_ATTRIBUTE(metrics_size);
+DEFINE_SHOW_ATTRIBUTE(metrics_caps);
/*
@@ -385,8 +399,9 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
debugfs_remove(fsc->debugfs_mdsmap);
debugfs_remove(fsc->debugfs_mds_sessions);
debugfs_remove(fsc->debugfs_caps);
- debugfs_remove(fsc->debugfs_metric);
+ debugfs_remove(fsc->debugfs_status);
debugfs_remove(fsc->debugfs_mdsc);
+ debugfs_remove_recursive(fsc->debugfs_metrics_dir);
}
void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
@@ -426,12 +441,6 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
fsc,
&mdsc_fops);
- fsc->debugfs_metric = debugfs_create_file("metrics",
- 0400,
- fsc->client->debugfs_dir,
- fsc,
- &metric_fops);
-
fsc->debugfs_caps = debugfs_create_file("caps",
0400,
fsc->client->debugfs_dir,
@@ -443,6 +452,18 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
fsc->client->debugfs_dir,
fsc,
&status_fops);
+
+ fsc->debugfs_metrics_dir = debugfs_create_dir("metrics",
+ fsc->client->debugfs_dir);
+
+ debugfs_create_file("file", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_file_fops);
+ debugfs_create_file("latency", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_latency_fops);
+ debugfs_create_file("size", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_size_fops);
+ debugfs_create_file("caps", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_caps_fops);
}
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 1d65934c12620e..e0fa66ac8b9faa 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -157,6 +157,11 @@ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
ceph_mdsc_put_request(req);
if (!inode)
return err < 0 ? ERR_PTR(err) : ERR_PTR(-ESTALE);
+ } else {
+ if (ceph_inode_is_shutdown(inode)) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
}
return inode;
}
@@ -223,8 +228,13 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
return ERR_PTR(-ESTALE);
inode = ceph_find_inode(sb, vino);
- if (inode)
+ if (inode) {
+ if (ceph_inode_is_shutdown(inode)) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
return d_obtain_alias(inode);
+ }
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO,
USE_ANY_MDS);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index b129ea551378c2..02a0a0fd9ccd51 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -525,6 +525,7 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
if (result) {
struct dentry *dentry = req->r_dentry;
+ struct inode *inode = d_inode(dentry);
int pathlen = 0;
u64 base = 0;
char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
@@ -534,7 +535,8 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
if (!d_unhashed(dentry))
d_drop(dentry);
- /* FIXME: start returning I/O errors on all accesses? */
+ ceph_inode_shutdown(inode);
+
pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
@@ -556,7 +558,7 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
}
ceph_kick_flushing_inode_caps(req->r_session, ci);
spin_unlock(&ci->i_ceph_lock);
- } else {
+ } else if (!result) {
pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
req->r_deleg_ino);
}
@@ -845,6 +847,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ssize_t ret;
u64 off = iocb->ki_pos;
u64 len = iov_iter_count(to);
+ u64 i_size;
dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
@@ -868,7 +871,6 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
struct page **pages;
int num_pages;
size_t page_off;
- u64 i_size;
bool more;
int idx;
size_t left;
@@ -951,11 +953,14 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
}
if (off > iocb->ki_pos) {
- if (ret >= 0 &&
- iov_iter_count(to) > 0 && off >= i_size_read(inode))
+ if (off >= i_size) {
*retry_op = CHECK_EOF;
- ret = off - iocb->ki_pos;
- iocb->ki_pos = off;
+ ret = i_size - iocb->ki_pos;
+ iocb->ki_pos = i_size;
+ } else {
+ ret = off - iocb->ki_pos;
+ iocb->ki_pos = off;
+ }
}
dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
@@ -1526,6 +1531,9 @@ again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
if (direct_lock)
ceph_start_io_direct(inode);
else
@@ -1678,6 +1686,9 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
loff_t pos;
loff_t limit = max(i_size_read(inode), fsc->max_file_size);
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
@@ -2200,6 +2211,54 @@ static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
return 0;
}
+static struct ceph_osd_request *
+ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
+ u64 src_snapid,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ struct ceph_object_id *dst_oid,
+ struct ceph_object_locator *dst_oloc,
+ u32 truncate_seq, u64 truncate_size)
+{
+ struct ceph_osd_request *req;
+ int ret;
+ u32 src_fadvise_flags =
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
+ u32 dst_fadvise_flags =
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
+
+ req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->r_flags = CEPH_OSD_FLAG_WRITE;
+
+ ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
+ ceph_oid_copy(&req->r_t.base_oid, dst_oid);
+
+ ret = osd_req_op_copy_from_init(req, src_snapid, 0,
+ src_oid, src_oloc,
+ src_fadvise_flags,
+ dst_fadvise_flags,
+ truncate_seq,
+ truncate_size,
+ CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
+ if (ret)
+ goto out;
+
+ ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ return req;
+
+out:
+ ceph_osdc_put_request(req);
+ return ERR_PTR(ret);
+}
+
static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
struct ceph_inode_info *dst_ci, u64 *dst_off,
struct ceph_fs_client *fsc,
@@ -2207,6 +2266,8 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
{
struct ceph_object_locator src_oloc, dst_oloc;
struct ceph_object_id src_oid, dst_oid;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_request *req;
size_t bytes = 0;
u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
u32 src_objlen, dst_objlen;
@@ -2217,6 +2278,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
dst_oloc.pool = dst_ci->i_layout.pool_id;
dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
+ osdc = &fsc->client->osdc;
while (len >= object_size) {
ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
@@ -2232,17 +2294,22 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
ceph_oid_printf(&dst_oid, "%llx.%08llx",
dst_ci->i_vino.ino, dst_objnum);
/* Do an object remote copy */
- ret = ceph_osdc_copy_from(&fsc->client->osdc,
- src_ci->i_vino.snap, 0,
- &src_oid, &src_oloc,
- CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
- CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
- &dst_oid, &dst_oloc,
- CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
- CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
- dst_ci->i_truncate_seq,
- dst_ci->i_truncate_size,
- CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
+ req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
+ &src_oid, &src_oloc,
+ &dst_oid, &dst_oloc,
+ dst_ci->i_truncate_seq,
+ dst_ci->i_truncate_size);
+ if (IS_ERR(req))
+ ret = PTR_ERR(req);
+ else {
+ ceph_osdc_start_request(osdc, req, false);
+ ret = ceph_osdc_wait_request(osdc, req);
+ ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
+ req->r_start_latency,
+ req->r_end_latency,
+ object_size, ret);
+ ceph_osdc_put_request(req);
+ }
if (ret) {
if (ret == -EOPNOTSUPP) {
fsc->have_copy_from2 = false;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 1c7574105478fb..e3322fcb2e8d92 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1841,15 +1841,14 @@ void ceph_queue_inode_work(struct inode *inode, int work_bit)
static void ceph_do_invalidate_pages(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
u32 orig_gen;
int check = 0;
mutex_lock(&ci->i_truncate_mutex);
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
- inode, ceph_ino(inode));
+ if (ceph_inode_is_shutdown(inode)) {
+ pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
+ __func__, ceph_vinop(inode));
mapping_set_error(inode->i_mapping, -EIO);
truncate_pagecache(inode, 0);
mutex_unlock(&ci->i_truncate_mutex);
@@ -1871,7 +1870,8 @@ static void ceph_do_invalidate_pages(struct inode *inode)
ceph_fscache_invalidate(inode);
if (invalidate_inode_pages2(inode->i_mapping) < 0) {
- pr_err("invalidate_pages %p fails\n", inode);
+ pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
+ ceph_vinop(inode));
}
spin_lock(&ci->i_ceph_lock);
@@ -2103,12 +2103,14 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
loff_t isize = i_size_read(inode);
dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
- if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size > isize) {
- i_size_write(inode, attr->ia_size);
- inode->i_blocks = calc_inode_blocks(attr->ia_size);
- ci->i_reported_size = attr->ia_size;
- dirtied |= CEPH_CAP_FILE_EXCL;
- ia_valid |= ATTR_MTIME;
+ if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
+ if (attr->ia_size > isize) {
+ i_size_write(inode, attr->ia_size);
+ inode->i_blocks = calc_inode_blocks(attr->ia_size);
+ ci->i_reported_size = attr->ia_size;
+ dirtied |= CEPH_CAP_FILE_EXCL;
+ ia_valid |= ATTR_MTIME;
+ }
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
attr->ia_size != isize) {
req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
@@ -2217,6 +2219,9 @@ int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
err = setattr_prepare(&init_user_ns, dentry, attr);
if (err != 0)
return err;
@@ -2347,6 +2352,9 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
u32 valid_mask = STATX_BASIC_STATS;
int err = 0;
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
/* Skip the getattr altogether if we're asked not to sync */
if (!(flags & AT_STATX_DONT_SYNC)) {
err = ceph_do_getattr(inode,
@@ -2394,3 +2402,27 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
stat->result_mask = request_mask & valid_mask;
return err;
}
+
+void ceph_inode_shutdown(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct rb_node *p;
+ int iputs = 0;
+ bool invalidate = false;
+
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
+ p = rb_first(&ci->i_caps);
+ while (p) {
+ struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
+
+ p = rb_next(p);
+ iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (invalidate)
+ ceph_queue_invalidate(inode);
+ while (iputs--)
+ iput(inode);
+}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index d8c31069fbf2b9..d1f154aec249bf 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -241,6 +241,9 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
if (!(fl->fl_flags & FL_POSIX))
return -ENOLCK;
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
/* set wait bit as appropriate, then make command as Ceph expects it*/
@@ -303,6 +306,9 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
dout("ceph_flock, fl_file: %p\n", fl->fl_file);
spin_lock(&ci->i_ceph_lock);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d64413adc0fd24..250aad330a1062 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1590,129 +1590,23 @@ out:
return ret;
}
-static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_cap_snap *capsnap;
- int capsnap_release = 0;
-
- lockdep_assert_held(&ci->i_ceph_lock);
-
- dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
-
- while (!list_empty(&ci->i_cap_snaps)) {
- capsnap = list_first_entry(&ci->i_cap_snaps,
- struct ceph_cap_snap, ci_item);
- __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
- ceph_put_snap_context(capsnap->context);
- ceph_put_cap_snap(capsnap);
- capsnap_release++;
- }
- wake_up_all(&ci->i_cap_wq);
- wake_up_all(&mdsc->cap_flushing_wq);
- return capsnap_release;
-}
-
static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
void *arg)
{
- struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
- struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
- LIST_HEAD(to_remove);
- bool dirty_dropped = false;
bool invalidate = false;
- int capsnap_release = 0;
+ int iputs;
dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
spin_lock(&ci->i_ceph_lock);
- __ceph_remove_cap(cap, false);
- if (!ci->i_auth_cap) {
- struct ceph_cap_flush *cf;
-
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- if (inode->i_data.nrpages > 0)
- invalidate = true;
- if (ci->i_wrbuffer_ref > 0)
- mapping_set_error(&inode->i_data, -EIO);
- }
-
- while (!list_empty(&ci->i_cap_flush_list)) {
- cf = list_first_entry(&ci->i_cap_flush_list,
- struct ceph_cap_flush, i_list);
- list_move(&cf->i_list, &to_remove);
- }
-
- spin_lock(&mdsc->cap_dirty_lock);
-
- list_for_each_entry(cf, &to_remove, i_list)
- list_del_init(&cf->g_list);
-
- if (!list_empty(&ci->i_dirty_item)) {
- pr_warn_ratelimited(
- " dropping dirty %s state for %p %lld\n",
- ceph_cap_string(ci->i_dirty_caps),
- inode, ceph_ino(inode));
- ci->i_dirty_caps = 0;
- list_del_init(&ci->i_dirty_item);
- dirty_dropped = true;
- }
- if (!list_empty(&ci->i_flushing_item)) {
- pr_warn_ratelimited(
- " dropping dirty+flushing %s state for %p %lld\n",
- ceph_cap_string(ci->i_flushing_caps),
- inode, ceph_ino(inode));
- ci->i_flushing_caps = 0;
- list_del_init(&ci->i_flushing_item);
- mdsc->num_cap_flushing--;
- dirty_dropped = true;
- }
- spin_unlock(&mdsc->cap_dirty_lock);
-
- if (dirty_dropped) {
- mapping_set_error(inode->i_mapping, -EIO);
-
- if (ci->i_wrbuffer_ref_head == 0 &&
- ci->i_wr_ref == 0 &&
- ci->i_dirty_caps == 0 &&
- ci->i_flushing_caps == 0) {
- ceph_put_snap_context(ci->i_head_snapc);
- ci->i_head_snapc = NULL;
- }
- }
-
- if (atomic_read(&ci->i_filelock_ref) > 0) {
- /* make further file lock syscall return -EIO */
- ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
- pr_warn_ratelimited(" dropping file locks for %p %lld\n",
- inode, ceph_ino(inode));
- }
-
- if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
- list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
- ci->i_prealloc_cap_flush = NULL;
- }
-
- if (!list_empty(&ci->i_cap_snaps))
- capsnap_release = remove_capsnaps(mdsc, inode);
- }
+ iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
spin_unlock(&ci->i_ceph_lock);
- while (!list_empty(&to_remove)) {
- struct ceph_cap_flush *cf;
- cf = list_first_entry(&to_remove,
- struct ceph_cap_flush, i_list);
- list_del_init(&cf->i_list);
- if (!cf->is_capsnap)
- ceph_free_cap_flush(cf);
- }
wake_up_all(&ci->i_cap_wq);
if (invalidate)
ceph_queue_invalidate(inode);
- if (dirty_dropped)
- iput(inode);
- while (capsnap_release--)
+ while (iputs--)
iput(inode);
return 0;
}
@@ -3467,9 +3361,14 @@ static void handle_session(struct ceph_mds_session *session,
if (msg_version >= 3) {
u32 len;
- /* version >= 2, metadata */
- if (__decode_session_metadata(&p, end, &blocklisted) < 0)
+ /* version >= 2 and < 5, decode metadata, skip otherwise
+ * as it's handled via flags.
+ */
+ if (msg_version >= 5)
+ ceph_decode_skip_map(&p, end, string, string, bad);
+ else if (__decode_session_metadata(&p, end, &blocklisted) < 0)
goto bad;
+
/* version >= 3, feature bits */
ceph_decode_32_safe(&p, end, len, bad);
if (len) {
@@ -3478,6 +3377,18 @@ static void handle_session(struct ceph_mds_session *session,
}
}
+ if (msg_version >= 5) {
+ u32 flags;
+ /* version >= 4, struct_v, struct_cv, len, metric_spec */
+ ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 2, bad);
+ /* version >= 5, flags */
+ ceph_decode_32_safe(&p, end, flags, bad);
+ if (flags & CEPH_SESSION_BLOCKLISTED) {
+ pr_warn("mds%d session blocklisted\n", session->s_mds);
+ blocklisted = true;
+ }
+ }
+
mutex_lock(&mdsc->mutex);
if (op == CEPH_SESSION_CLOSE) {
ceph_get_mds_session(session);
@@ -5072,7 +4983,8 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
return;
bad:
- pr_err("error decoding fsmap\n");
+ pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
+ ceph_umount_begin(mdsc->fsc->sb);
err_out:
mutex_lock(&mdsc->mutex);
mdsc->mdsmap_err = err;
@@ -5139,7 +5051,8 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
bad_unlock:
mutex_unlock(&mdsc->mutex);
bad:
- pr_err("error decoding mdsmap %d\n", err);
+ pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
+ ceph_umount_begin(mdsc->fsc->sb);
return;
}
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 61d67cbcb36711..30387733765d51 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -263,10 +263,6 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
goto nomem;
for (j = 0; j < num_export_targets; j++) {
target = ceph_decode_32(&pexport_targets);
- if (target >= m->possible_max_rank) {
- err = -EIO;
- goto corrupt;
- }
info->export_targets[j] = target;
}
} else {
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index 04d5df29bbbfb3..c57699d8408d62 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -62,7 +62,7 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
read->header.ver = 1;
read->header.compat = 1;
read->header.data_len = cpu_to_le32(sizeof(*read) - header_len);
- sum = m->read_latency_sum;
+ sum = m->metric[METRIC_READ].latency_sum;
jiffies_to_timespec64(sum, &ts);
read->sec = cpu_to_le32(ts.tv_sec);
read->nsec = cpu_to_le32(ts.tv_nsec);
@@ -74,7 +74,7 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
write->header.ver = 1;
write->header.compat = 1;
write->header.data_len = cpu_to_le32(sizeof(*write) - header_len);
- sum = m->write_latency_sum;
+ sum = m->metric[METRIC_WRITE].latency_sum;
jiffies_to_timespec64(sum, &ts);
write->sec = cpu_to_le32(ts.tv_sec);
write->nsec = cpu_to_le32(ts.tv_nsec);
@@ -86,7 +86,7 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
meta->header.ver = 1;
meta->header.compat = 1;
meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len);
- sum = m->metadata_latency_sum;
+ sum = m->metric[METRIC_METADATA].latency_sum;
jiffies_to_timespec64(sum, &ts);
meta->sec = cpu_to_le32(ts.tv_sec);
meta->nsec = cpu_to_le32(ts.tv_nsec);
@@ -141,8 +141,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
rsize->header.ver = 1;
rsize->header.compat = 1;
rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
- rsize->total_ops = cpu_to_le64(m->total_reads);
- rsize->total_size = cpu_to_le64(m->read_size_sum);
+ rsize->total_ops = cpu_to_le64(m->metric[METRIC_READ].total);
+ rsize->total_size = cpu_to_le64(m->metric[METRIC_READ].size_sum);
items++;
/* encode the write io size metric */
@@ -151,8 +151,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
wsize->header.ver = 1;
wsize->header.compat = 1;
wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
- wsize->total_ops = cpu_to_le64(m->total_writes);
- wsize->total_size = cpu_to_le64(m->write_size_sum);
+ wsize->total_ops = cpu_to_le64(m->metric[METRIC_WRITE].total);
+ wsize->total_size = cpu_to_le64(m->metric[METRIC_WRITE].size_sum);
items++;
put_unaligned_le32(items, &head->num);
@@ -220,7 +220,8 @@ static void metric_delayed_work(struct work_struct *work)
int ceph_metric_init(struct ceph_client_metric *m)
{
- int ret;
+ struct ceph_metric *metric;
+ int ret, i;
if (!m)
return -EINVAL;
@@ -243,32 +244,18 @@ int ceph_metric_init(struct ceph_client_metric *m)
if (ret)
goto err_i_caps_mis;
- spin_lock_init(&m->read_metric_lock);
- m->read_latency_sq_sum = 0;
- m->read_latency_min = KTIME_MAX;
- m->read_latency_max = 0;
- m->total_reads = 0;
- m->read_latency_sum = 0;
- m->read_size_min = U64_MAX;
- m->read_size_max = 0;
- m->read_size_sum = 0;
-
- spin_lock_init(&m->write_metric_lock);
- m->write_latency_sq_sum = 0;
- m->write_latency_min = KTIME_MAX;
- m->write_latency_max = 0;
- m->total_writes = 0;
- m->write_latency_sum = 0;
- m->write_size_min = U64_MAX;
- m->write_size_max = 0;
- m->write_size_sum = 0;
-
- spin_lock_init(&m->metadata_metric_lock);
- m->metadata_latency_sq_sum = 0;
- m->metadata_latency_min = KTIME_MAX;
- m->metadata_latency_max = 0;
- m->total_metadatas = 0;
- m->metadata_latency_sum = 0;
+ for (i = 0; i < METRIC_MAX; i++) {
+ metric = &m->metric[i];
+ spin_lock_init(&metric->lock);
+ metric->size_sum = 0;
+ metric->size_min = U64_MAX;
+ metric->size_max = 0;
+ metric->total = 0;
+ metric->latency_sum = 0;
+ metric->latency_sq_sum = 0;
+ metric->latency_min = KTIME_MAX;
+ metric->latency_max = 0;
+ }
atomic64_set(&m->opened_files, 0);
ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
@@ -338,9 +325,9 @@ static inline void __update_stdev(ktime_t total, ktime_t lsum,
*sq_sump += sq;
}
-void ceph_update_read_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc)
+void ceph_update_metrics(struct ceph_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
{
ktime_t lat = ktime_sub(r_end, r_start);
ktime_t total;
@@ -348,63 +335,12 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
return;
- spin_lock(&m->read_metric_lock);
- total = ++m->total_reads;
- m->read_size_sum += size;
- m->read_latency_sum += lat;
- METRIC_UPDATE_MIN_MAX(m->read_size_min,
- m->read_size_max,
- size);
- METRIC_UPDATE_MIN_MAX(m->read_latency_min,
- m->read_latency_max,
- lat);
- __update_stdev(total, m->read_latency_sum,
- &m->read_latency_sq_sum, lat);
- spin_unlock(&m->read_metric_lock);
-}
-
-void ceph_update_write_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc)
-{
- ktime_t lat = ktime_sub(r_end, r_start);
- ktime_t total;
-
- if (unlikely(rc && rc != -ETIMEDOUT))
- return;
-
- spin_lock(&m->write_metric_lock);
- total = ++m->total_writes;
- m->write_size_sum += size;
- m->write_latency_sum += lat;
- METRIC_UPDATE_MIN_MAX(m->write_size_min,
- m->write_size_max,
- size);
- METRIC_UPDATE_MIN_MAX(m->write_latency_min,
- m->write_latency_max,
- lat);
- __update_stdev(total, m->write_latency_sum,
- &m->write_latency_sq_sum, lat);
- spin_unlock(&m->write_metric_lock);
-}
-
-void ceph_update_metadata_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- int rc)
-{
- ktime_t lat = ktime_sub(r_end, r_start);
- ktime_t total;
-
- if (unlikely(rc && rc != -ENOENT))
- return;
-
- spin_lock(&m->metadata_metric_lock);
- total = ++m->total_metadatas;
- m->metadata_latency_sum += lat;
- METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
- m->metadata_latency_max,
- lat);
- __update_stdev(total, m->metadata_latency_sum,
- &m->metadata_latency_sq_sum, lat);
- spin_unlock(&m->metadata_metric_lock);
+ spin_lock(&m->lock);
+ total = ++m->total;
+ m->size_sum += size;
+ METRIC_UPDATE_MIN_MAX(m->size_min, m->size_max, size);
+ m->latency_sum += lat;
+ METRIC_UPDATE_MIN_MAX(m->latency_min, m->latency_max, lat);
+ __update_stdev(total, m->latency_sum, &m->latency_sq_sum, lat);
+ spin_unlock(&m->lock);
}
diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
index 0133955a3c6aac..bb45608181e7f2 100644
--- a/fs/ceph/metric.h
+++ b/fs/ceph/metric.h
@@ -125,6 +125,26 @@ struct ceph_metric_head {
__le32 num; /* the number of metrics that will be sent */
} __packed;
+enum metric_type {
+ METRIC_READ,
+ METRIC_WRITE,
+ METRIC_METADATA,
+ METRIC_COPYFROM,
+ METRIC_MAX
+};
+
+struct ceph_metric {
+ spinlock_t lock;
+ u64 total;
+ u64 size_sum;
+ u64 size_min;
+ u64 size_max;
+ ktime_t latency_sum;
+ ktime_t latency_sq_sum;
+ ktime_t latency_min;
+ ktime_t latency_max;
+};
+
/* This is the global metrics */
struct ceph_client_metric {
atomic64_t total_dentries;
@@ -135,32 +155,7 @@ struct ceph_client_metric {
struct percpu_counter i_caps_hit;
struct percpu_counter i_caps_mis;
- spinlock_t read_metric_lock;
- u64 total_reads;
- u64 read_size_sum;
- u64 read_size_min;
- u64 read_size_max;
- ktime_t read_latency_sum;
- ktime_t read_latency_sq_sum;
- ktime_t read_latency_min;
- ktime_t read_latency_max;
-
- spinlock_t write_metric_lock;
- u64 total_writes;
- u64 write_size_sum;
- u64 write_size_min;
- u64 write_size_max;
- ktime_t write_latency_sum;
- ktime_t write_latency_sq_sum;
- ktime_t write_latency_min;
- ktime_t write_latency_max;
-
- spinlock_t metadata_metric_lock;
- u64 total_metadatas;
- ktime_t metadata_latency_sum;
- ktime_t metadata_latency_sq_sum;
- ktime_t metadata_latency_min;
- ktime_t metadata_latency_max;
+ struct ceph_metric metric[METRIC_MAX];
/* The total number of directories and files that are opened */
atomic64_t opened_files;
@@ -195,13 +190,36 @@ static inline void ceph_update_cap_mis(struct ceph_client_metric *m)
percpu_counter_inc(&m->i_caps_mis);
}
-extern void ceph_update_read_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc);
-extern void ceph_update_write_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc);
-extern void ceph_update_metadata_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- int rc);
+extern void ceph_update_metrics(struct ceph_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc);
+
+static inline void ceph_update_read_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_READ],
+ r_start, r_end, size, rc);
+}
+static inline void ceph_update_write_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_WRITE],
+ r_start, r_end, size, rc);
+}
+static inline void ceph_update_metadata_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_METADATA],
+ r_start, r_end, 0, rc);
+}
+static inline void ceph_update_copyfrom_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_COPYFROM],
+ r_start, r_end, size, rc);
+}
#endif /* _FS_CEPH_MDS_METRIC_H */
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index fd8742bae84715..bab61232dc5a05 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -52,8 +52,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
struct ceph_mon_client *monc = &fsc->client->monc;
struct ceph_statfs st;
- u64 fsid;
- int err;
+ int i, err;
u64 data_pool;
if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
@@ -99,12 +98,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = NAME_MAX;
/* Must convert the fsid, for consistent values across arches */
+ buf->f_fsid.val[0] = 0;
mutex_lock(&monc->mutex);
- fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
- le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
+ for (i = 0 ; i < sizeof(monc->monmap->fsid) / sizeof(__le32) ; ++i)
+ buf->f_fsid.val[0] ^= le32_to_cpu(((__le32 *)&monc->monmap->fsid)[i]);
mutex_unlock(&monc->mutex);
- buf->f_fsid = u64_to_fsid(fsid);
+ /* fold the fs_cluster_id into the upper bits */
+ buf->f_fsid.val[1] = monc->fs_cluster_id;
return 0;
}
@@ -577,8 +578,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
seq_show_option(m, "recover_session", "clean");
- if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
- seq_puts(m, ",nowsync");
+ if (!(fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS))
+ seq_puts(m, ",wsync");
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
seq_printf(m, ",wsize=%u", fsopt->wsize);
@@ -842,7 +843,7 @@ static void __ceph_umount_begin(struct ceph_fs_client *fsc)
* ceph_umount_begin - initiate forced umount. Tear down the
* mount, skipping steps that may hang while waiting for server(s).
*/
-static void ceph_umount_begin(struct super_block *sb)
+void ceph_umount_begin(struct super_block *sb)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 14f951cd5b61bf..ac331aa07cfa59 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -48,7 +48,8 @@
#define CEPH_MOUNT_OPT_DEFAULT \
(CEPH_MOUNT_OPT_DCACHE | \
- CEPH_MOUNT_OPT_NOCOPYFROM)
+ CEPH_MOUNT_OPT_NOCOPYFROM | \
+ CEPH_MOUNT_OPT_ASYNC_DIROPS)
#define ceph_set_mount_opt(fsc, opt) \
(fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt
@@ -128,9 +129,9 @@ struct ceph_fs_client {
struct dentry *debugfs_congestion_kb;
struct dentry *debugfs_bdi;
struct dentry *debugfs_mdsc, *debugfs_mdsmap;
- struct dentry *debugfs_metric;
struct dentry *debugfs_status;
struct dentry *debugfs_mds_sessions;
+ struct dentry *debugfs_metrics_dir;
#endif
#ifdef CONFIG_CEPH_FSCACHE
@@ -580,6 +581,7 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_ODIRECT (1 << 11) /* inode in direct I/O mode */
#define CEPH_ASYNC_CREATE_BIT (12) /* async create in flight for this */
#define CEPH_I_ASYNC_CREATE (1 << CEPH_ASYNC_CREATE_BIT)
+#define CEPH_I_SHUTDOWN (1 << 13) /* inode is no longer usable */
/*
* Masks of ceph inode work.
@@ -939,6 +941,7 @@ extern void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
struct ceph_snapid_map *sm);
extern void ceph_trim_snapid_map(struct ceph_mds_client *mdsc);
extern void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc);
+void ceph_umount_begin(struct super_block *sb);
/*
@@ -1027,6 +1030,16 @@ extern int ceph_setattr(struct user_namespace *mnt_userns,
extern int ceph_getattr(struct user_namespace *mnt_userns,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
+void ceph_inode_shutdown(struct inode *inode);
+
+static inline bool ceph_inode_is_shutdown(struct inode *inode)
+{
+ unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ int state = READ_ONCE(fsc->mount_state);
+
+ return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
+}
/* xattr.c */
int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
@@ -1198,6 +1211,7 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
extern int ceph_uninline_data(struct file *filp, struct page *locked_page);
extern int ceph_pool_perm_check(struct inode *inode, int need);
extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
+int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate);
/* file.c */
extern const struct file_operations ceph_file_fops;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index de2c12bcfa4bc6..d282caf9f03721 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -271,7 +271,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
c = 0;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
- if (server->is_channel)
+ /* channel info will be printed as a part of sessions below */
+ if (CIFS_SERVER_IS_CHAN(server))
continue;
c++;
@@ -358,6 +359,8 @@ skip_rdma:
seq_printf(m, " signed");
if (server->posix_ext_supported)
seq_printf(m, " posix");
+ if (server->nosharesock)
+ seq_printf(m, " nosharesock");
if (server->rdma)
seq_printf(m, "\nRDMA ");
@@ -412,12 +415,14 @@ skip_rdma:
from_kuid(&init_user_ns, ses->linux_uid),
from_kuid(&init_user_ns, ses->cred_uid));
+ spin_lock(&ses->chan_lock);
if (ses->chan_count > 1) {
seq_printf(m, "\n\n\tExtra Channels: %zu ",
ses->chan_count-1);
for (j = 1; j < ses->chan_count; j++)
cifs_dump_channel(m, j, &ses->chans[j]);
}
+ spin_unlock(&ses->chan_lock);
seq_puts(m, "\n\n\tShares: ");
j = 0;
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 007427ba75e5fb..b0864da9ef4341 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -307,12 +307,8 @@ static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
{
struct cifs_sb_info *cifs_sb;
- struct cifs_ses *ses;
- struct cifs_tcon *tcon;
void *page;
- char *full_path, *root_path;
- unsigned int xid;
- int rc;
+ char *full_path;
struct vfsmount *mnt;
cifs_dbg(FYI, "in %s\n", __func__);
@@ -324,8 +320,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
* the double backslashes usually used in the UNC. This function
* gives us the latter, so we must adjust the result.
*/
- mnt = ERR_PTR(-ENOMEM);
-
cifs_sb = CIFS_SB(mntpt->d_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) {
mnt = ERR_PTR(-EREMOTE);
@@ -341,60 +335,11 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
}
convert_delimiter(full_path, '\\');
-
cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
- if (!cifs_sb_master_tlink(cifs_sb)) {
- cifs_dbg(FYI, "%s: master tlink is NULL\n", __func__);
- goto free_full_path;
- }
-
- tcon = cifs_sb_master_tcon(cifs_sb);
- if (!tcon) {
- cifs_dbg(FYI, "%s: master tcon is NULL\n", __func__);
- goto free_full_path;
- }
-
- root_path = kstrdup(tcon->treeName, GFP_KERNEL);
- if (!root_path) {
- mnt = ERR_PTR(-ENOMEM);
- goto free_full_path;
- }
- cifs_dbg(FYI, "%s: root path: %s\n", __func__, root_path);
-
- ses = tcon->ses;
- xid = get_xid();
-
- /*
- * If DFS root has been expired, then unconditionally fetch it again to
- * refresh DFS referral cache.
- */
- rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
- root_path + 1, NULL, NULL);
- if (!rc) {
- rc = dfs_cache_find(xid, ses, cifs_sb->local_nls,
- cifs_remap(cifs_sb), full_path + 1,
- NULL, NULL);
- }
-
- free_xid(xid);
-
- if (rc) {
- mnt = ERR_PTR(rc);
- goto free_root_path;
- }
- /*
- * OK - we were able to get and cache a referral for @full_path.
- *
- * Now, pass it down to cifs_mount() and it will retry every available
- * node server in case of failures - no need to do it here.
- */
mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
- cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__,
- full_path + 1, mnt);
+ cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__, full_path + 1, mnt);
-free_root_path:
- kfree(root_path);
free_full_path:
free_dentry_path(page);
cdda_exit:
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index f97407520ea157..013a4bd65280ce 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -61,11 +61,6 @@ struct cifs_sb_info {
/* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */
char *prepath;
- /*
- * Canonical DFS path initially provided by the mount call. We might connect to something
- * different via DFS but we want to keep it to do failover properly.
- */
- char *origin_fullpath; /* \\HOST\SHARE\[OPTIONAL PATH] */
/* randomly generated 128-bit number for indexing dfs mount groups in referral cache */
uuid_t dfs_mount_id;
/*
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index abff31dcd0050c..be74606724c793 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
+#include <linux/utsname.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
#include <crypto/internal/hash.h>
@@ -75,7 +76,8 @@
#define SMB_ECHO_INTERVAL_MAX 600
#define SMB_ECHO_INTERVAL_DEFAULT 60
-/* dns resolution interval in seconds */
+/* dns resolution intervals in seconds */
+#define SMB_DNS_RESOLVE_INTERVAL_MIN 120
#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
/* maximum number of PDUs in one compound */
@@ -99,6 +101,8 @@
#define XATTR_DOS_ATTRIB "user.DOSATTRIB"
#endif
+#define CIFS_MAX_WORKSTATION_LEN (__NEW_UTS_LEN + 1) /* reasonable max for client */
+
/*
* CIFS vfs client Status information (based on what we know.)
*/
@@ -592,6 +596,7 @@ struct TCP_Server_Info {
struct list_head pending_mid_q;
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
+ bool nosharesock;
bool tcp_nodelay;
unsigned int credits; /* send no more requests at once */
unsigned int max_credits; /* can override large 32000 default at mnt */
@@ -685,13 +690,34 @@ struct TCP_Server_Info {
*/
int nr_targets;
bool noblockcnt; /* use non-blocking connect() */
- bool is_channel; /* if a session channel */
+
+ /*
+ * If this is a session channel,
+ * primary_server holds the ref-counted
+ * pointer to primary channel connection for the session.
+ */
+#define CIFS_SERVER_IS_CHAN(server) (!!(server)->primary_server)
+ struct TCP_Server_Info *primary_server;
+
#ifdef CONFIG_CIFS_SWN_UPCALL
bool use_swn_dstaddr;
struct sockaddr_storage swn_dstaddr;
#endif
#ifdef CONFIG_CIFS_DFS_UPCALL
bool is_dfs_conn; /* if a dfs connection */
+ struct mutex refpath_lock; /* protects leaf_fullpath */
+ /*
+ * Canonical DFS full paths that were used to chase referrals in mount and reconnect.
+ *
+ * origin_fullpath: first or original referral path
+ * leaf_fullpath: last referral path (might be changed due to nested links in reconnect)
+ *
+ * current_fullpath: pointer to either origin_fullpath or leaf_fullpath
+ * NOTE: cannot be accessed outside cifs_reconnect() and smb2_reconnect()
+ *
+ * format: \\HOST\SHARE\[OPTIONAL PATH]
+ */
+ char *origin_fullpath, *leaf_fullpath, *current_fullpath;
#endif
};
@@ -908,6 +934,7 @@ struct cifs_ses {
and after mount option parsing we fill it */
char *domainName;
char *password;
+ char *workstation_name;
struct session_key auth_key;
struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
enum securityEnum sectype; /* what security flavor was specified? */
@@ -933,16 +960,21 @@ struct cifs_ses {
* iface_lock should be taken when accessing any of these fields
*/
spinlock_t iface_lock;
+ /* ========= begin: protected by iface_lock ======== */
struct cifs_server_iface *iface_list;
size_t iface_count;
unsigned long iface_last_update; /* jiffies */
+ /* ========= end: protected by iface_lock ======== */
+ spinlock_t chan_lock;
+ /* ========= begin: protected by chan_lock ======== */
#define CIFS_MAX_CHANNELS 16
struct cifs_chan chans[CIFS_MAX_CHANNELS];
struct cifs_chan *binding_chan;
size_t chan_count;
size_t chan_max;
atomic_t chan_seq; /* round robin state */
+ /* ========= end: protected by chan_lock ======== */
};
/*
@@ -1091,7 +1123,6 @@ struct cifs_tcon {
struct cached_fid crfid; /* Cached root fid */
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
- char *dfs_path; /* canonical DFS path */
struct list_head ulist; /* cache update list */
#endif
};
@@ -1942,4 +1973,14 @@ static inline bool is_tcon_dfs(struct cifs_tcon *tcon)
tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT);
}
+static inline bool cifs_is_referral_server(struct cifs_tcon *tcon,
+ const struct dfs_info3_param *ref)
+{
+ /*
+ * Check if all targets are capable of handling DFS referrals as per
+ * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
+ */
+ return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER));
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index d0f85b666662db..f3073a62ce574e 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -269,8 +269,9 @@ extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
const char *path);
-
-extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx);
+extern struct TCP_Server_Info *
+cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ struct TCP_Server_Info *primary_server);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
extern void cifs_put_tcon(struct cifs_tcon *tcon);
@@ -607,7 +608,7 @@ int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server);
void cifs_put_tcp_super(struct super_block *sb);
-int update_super_prepath(struct cifs_tcon *tcon, char *prefix);
+int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix);
char *extract_hostname(const char *unc);
char *extract_sharename(const char *unc);
@@ -634,4 +635,7 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
return options;
}
+struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
+void cifs_put_tcon_super(struct super_block *sb);
+
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0abbff4e4135c6..82577a7a5bb147 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -61,6 +61,20 @@ extern bool disable_legacy_dialects;
/* Drop the connection to not overload the server */
#define NUM_STATUS_IO_TIMEOUT 5
+struct mount_ctx {
+ struct cifs_sb_info *cifs_sb;
+ struct smb3_fs_context *fs_ctx;
+ unsigned int xid;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ struct cifs_ses *root_ses;
+ uuid_t mount_id;
+ char *origin_fullpath, *leaf_fullpath;
+#endif
+};
+
static int ip_connect(struct TCP_Server_Info *server);
static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -115,7 +129,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
* To make sure we don't use the cached entry, retry 1s
* after expiry.
*/
- ttl = (expiry - now + 1);
+ ttl = max_t(unsigned long, expiry - now, SMB_DNS_RESOLVE_INTERVAL_MIN) + 1;
}
rc = !rc ? -1 : 0;
@@ -148,139 +162,38 @@ static void cifs_resolve_server(struct work_struct *work)
mutex_unlock(&server->srv_mutex);
}
-#ifdef CONFIG_CIFS_DFS_UPCALL
-/* These functions must be called with server->srv_mutex held */
-static void reconn_set_next_dfs_target(struct TCP_Server_Info *server,
- struct cifs_sb_info *cifs_sb,
- struct dfs_cache_tgt_list *tgt_list,
- struct dfs_cache_tgt_iterator **tgt_it)
-{
- const char *name;
- int rc;
-
- if (!cifs_sb || !cifs_sb->origin_fullpath)
- return;
-
- if (!*tgt_it) {
- *tgt_it = dfs_cache_get_tgt_iterator(tgt_list);
- } else {
- *tgt_it = dfs_cache_get_next_tgt(tgt_list, *tgt_it);
- if (!*tgt_it)
- *tgt_it = dfs_cache_get_tgt_iterator(tgt_list);
- }
-
- cifs_dbg(FYI, "%s: UNC: %s\n", __func__, cifs_sb->origin_fullpath);
-
- name = dfs_cache_get_tgt_name(*tgt_it);
-
- kfree(server->hostname);
-
- server->hostname = extract_hostname(name);
- if (IS_ERR(server->hostname)) {
- cifs_dbg(FYI,
- "%s: failed to extract hostname from target: %ld\n",
- __func__, PTR_ERR(server->hostname));
- return;
- }
-
- rc = reconn_set_ipaddr_from_hostname(server);
- if (rc) {
- cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
- __func__, rc);
- }
-}
-
-static inline int reconn_setup_dfs_targets(struct cifs_sb_info *cifs_sb,
- struct dfs_cache_tgt_list *tl)
-{
- if (!cifs_sb->origin_fullpath)
- return -EOPNOTSUPP;
- return dfs_cache_noreq_find(cifs_sb->origin_fullpath + 1, NULL, tl);
-}
-#endif
-
-/*
- * cifs tcp session reconnection
+/**
+ * Mark all sessions and tcons for reconnect.
*
- * mark tcp session as reconnecting so temporarily locked
- * mark all smb sessions as reconnecting for tcp session
- * reconnect tcp session
- * wake up waiters on reconnection? - (not needed currently)
+ * @server needs to be previously set to CifsNeedReconnect.
*/
-int
-cifs_reconnect(struct TCP_Server_Info *server)
+static void cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server)
{
- int rc = 0;
- struct list_head *tmp, *tmp2;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct mid_q_entry *mid_entry;
+ struct mid_q_entry *mid, *nmid;
struct list_head retry_list;
-#ifdef CONFIG_CIFS_DFS_UPCALL
- struct super_block *sb = NULL;
- struct cifs_sb_info *cifs_sb = NULL;
- struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
- struct dfs_cache_tgt_iterator *tgt_it = NULL;
-#endif
+ struct TCP_Server_Info *pserver;
- spin_lock(&GlobalMid_Lock);
- server->nr_targets = 1;
-#ifdef CONFIG_CIFS_DFS_UPCALL
- spin_unlock(&GlobalMid_Lock);
- sb = cifs_get_tcp_super(server);
- if (IS_ERR(sb)) {
- rc = PTR_ERR(sb);
- cifs_dbg(FYI, "%s: will not do DFS failover: rc = %d\n",
- __func__, rc);
- sb = NULL;
- } else {
- cifs_sb = CIFS_SB(sb);
- rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list);
- if (rc) {
- cifs_sb = NULL;
- if (rc != -EOPNOTSUPP) {
- cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
- __func__);
- }
- } else {
- server->nr_targets = dfs_cache_get_nr_tgts(&tgt_list);
- }
- }
- cifs_dbg(FYI, "%s: will retry %d target(s)\n", __func__,
- server->nr_targets);
- spin_lock(&GlobalMid_Lock);
-#endif
- if (server->tcpStatus == CifsExiting) {
- /* the demux thread will exit normally
- next time through the loop */
- spin_unlock(&GlobalMid_Lock);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- dfs_cache_free_tgts(&tgt_list);
- cifs_put_tcp_super(sb);
-#endif
- wake_up(&server->response_q);
- return rc;
- } else
- server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
server->maxBuf = 0;
server->max_read = 0;
cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
trace_smb3_reconnect(server->CurrentMid, server->conn_id, server->hostname);
+ /*
+ * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
+ * are not used until reconnected.
+ */
+ cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n", __func__);
+
+ /* If server is a channel, select the primary channel */
+ pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
- /* before reconnecting the tcp session, mark the smb session (uid)
- and the tid bad so they are not used until reconnected */
- cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n",
- __func__);
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
ses->need_reconnect = true;
- list_for_each(tmp2, &ses->tcon_list) {
- tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
tcon->need_reconnect = true;
- }
if (ses->tcon_ipc)
ses->tcon_ipc->need_reconnect = true;
}
@@ -290,11 +203,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
- cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
- server->ssocket->state, server->ssocket->flags);
+ cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
+ server->ssocket->flags);
kernel_sock_shutdown(server->ssocket, SHUT_WR);
- cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n",
- server->ssocket->state, server->ssocket->flags);
+ cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
+ server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
}
@@ -309,23 +222,21 @@ cifs_reconnect(struct TCP_Server_Info *server)
INIT_LIST_HEAD(&retry_list);
cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
spin_lock(&GlobalMid_Lock);
- list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- kref_get(&mid_entry->refcount);
- if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
- mid_entry->mid_state = MID_RETRY_NEEDED;
- list_move(&mid_entry->qhead, &retry_list);
- mid_entry->mid_flags |= MID_DELETED;
+ list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
+ kref_get(&mid->refcount);
+ if (mid->mid_state == MID_REQUEST_SUBMITTED)
+ mid->mid_state = MID_RETRY_NEEDED;
+ list_move(&mid->qhead, &retry_list);
+ mid->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
- list_for_each_safe(tmp, tmp2, &retry_list) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- list_del_init(&mid_entry->qhead);
- mid_entry->callback(mid_entry);
- cifs_mid_q_entry_release(mid_entry);
+ list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
+ list_del_init(&mid->qhead);
+ mid->callback(mid);
+ cifs_mid_q_entry_release(mid);
}
if (cifs_rdma_enabled(server)) {
@@ -333,38 +244,48 @@ cifs_reconnect(struct TCP_Server_Info *server)
smbd_destroy(server);
mutex_unlock(&server->srv_mutex);
}
+}
+
+static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
+{
+ spin_lock(&GlobalMid_Lock);
+ server->nr_targets = num_targets;
+ if (server->tcpStatus == CifsExiting) {
+ /* the demux thread will exit normally next time through the loop */
+ spin_unlock(&GlobalMid_Lock);
+ wake_up(&server->response_q);
+ return false;
+ }
+ server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&GlobalMid_Lock);
+ return true;
+}
+
+/*
+ * cifs tcp session reconnection
+ *
+ * mark tcp session as reconnecting so temporarily locked
+ * mark all smb sessions as reconnecting for tcp session
+ * reconnect tcp session
+ * wake up waiters on reconnection? - (not needed currently)
+ */
+static int __cifs_reconnect(struct TCP_Server_Info *server)
+{
+ int rc = 0;
+
+ if (!cifs_tcp_ses_needs_reconnect(server, 1))
+ return 0;
+
+ cifs_mark_tcp_ses_conns_for_reconnect(server);
do {
try_to_freeze();
-
mutex_lock(&server->srv_mutex);
-
if (!cifs_swn_set_server_dstaddr(server)) {
-#ifdef CONFIG_CIFS_DFS_UPCALL
- if (cifs_sb && cifs_sb->origin_fullpath)
- /*
- * Set up next DFS target server (if any) for reconnect. If DFS
- * feature is disabled, then we will retry last server we
- * connected to before.
- */
- reconn_set_next_dfs_target(server, cifs_sb, &tgt_list, &tgt_it);
- else {
-#endif
- /*
- * Resolve the hostname again to make sure that IP address is up-to-date.
- */
+ /* resolve the hostname again to make sure that IP address is up-to-date */
rc = reconn_set_ipaddr_from_hostname(server);
- if (rc) {
- cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
- __func__, rc);
- }
-
-#ifdef CONFIG_CIFS_DFS_UPCALL
- }
-#endif
-
-
+ cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
}
if (cifs_rdma_enabled(server))
@@ -372,8 +293,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
else
rc = generic_ip_connect(server);
if (rc) {
- cifs_dbg(FYI, "reconnect error %d\n", rc);
mutex_unlock(&server->srv_mutex);
+ cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
msleep(3000);
} else {
atomic_inc(&tcpSesReconnectCount);
@@ -387,19 +308,128 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
} while (server->tcpStatus == CifsNeedReconnect);
+ if (server->tcpStatus == CifsNeedNegotiate)
+ mod_delayed_work(cifsiod_wq, &server->echo, 0);
+
+ wake_up(&server->response_q);
+ return rc;
+}
+
#ifdef CONFIG_CIFS_DFS_UPCALL
- if (tgt_it) {
- rc = dfs_cache_noreq_update_tgthint(cifs_sb->origin_fullpath + 1,
- tgt_it);
- if (rc) {
- cifs_server_dbg(VFS, "%s: failed to update DFS target hint: rc = %d\n",
- __func__, rc);
+static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
+{
+ int rc;
+ char *hostname;
+
+ if (!cifs_swn_set_server_dstaddr(server)) {
+ if (server->hostname != target) {
+ hostname = extract_hostname(target);
+ if (!IS_ERR(hostname)) {
+ kfree(server->hostname);
+ server->hostname = hostname;
+ } else {
+ cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
+ __func__, PTR_ERR(hostname));
+ cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
+ server->hostname);
+ }
}
- dfs_cache_free_tgts(&tgt_list);
+ /* resolve the hostname again to make sure that IP address is up-to-date. */
+ rc = reconn_set_ipaddr_from_hostname(server);
+ cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
}
+ /* Reconnect the socket */
+ if (cifs_rdma_enabled(server))
+ rc = smbd_reconnect(server);
+ else
+ rc = generic_ip_connect(server);
- cifs_put_tcp_super(sb);
-#endif
+ return rc;
+}
+
+static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
+ struct dfs_cache_tgt_iterator **target_hint)
+{
+ int rc;
+ struct dfs_cache_tgt_iterator *tit;
+
+ *target_hint = NULL;
+
+ /* If dfs target list is empty, then reconnect to last server */
+ tit = dfs_cache_get_tgt_iterator(tl);
+ if (!tit)
+ return __reconnect_target_unlocked(server, server->hostname);
+
+ /* Otherwise, try every dfs target in @tl */
+ for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
+ rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
+ if (!rc) {
+ *target_hint = tit;
+ break;
+ }
+ }
+ return rc;
+}
+
+static int reconnect_dfs_server(struct TCP_Server_Info *server)
+{
+ int rc = 0;
+ const char *refpath = server->current_fullpath + 1;
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ struct dfs_cache_tgt_iterator *target_hint = NULL;
+ int num_targets = 0;
+
+ /*
+ * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
+ *
+ * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
+ * targets (server->nr_targets). It's also possible that the cached referral was cleared
+ * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
+ * refreshing the referral, so, in this case, default it to 1.
+ */
+ if (!dfs_cache_noreq_find(refpath, NULL, &tl))
+ num_targets = dfs_cache_get_nr_tgts(&tl);
+ if (!num_targets)
+ num_targets = 1;
+
+ if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
+ return 0;
+
+ cifs_mark_tcp_ses_conns_for_reconnect(server);
+
+ do {
+ try_to_freeze();
+ mutex_lock(&server->srv_mutex);
+
+ rc = reconnect_target_unlocked(server, &tl, &target_hint);
+ if (rc) {
+ /* Failed to reconnect socket */
+ mutex_unlock(&server->srv_mutex);
+ cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
+ msleep(3000);
+ continue;
+ }
+ /*
+ * Socket was created. Update tcp session status to CifsNeedNegotiate so that a
+ * process waiting for reconnect will know it needs to re-establish session and tcon
+ * through the reconnected target server.
+ */
+ atomic_inc(&tcpSesReconnectCount);
+ set_credits(server, 1);
+ spin_lock(&GlobalMid_Lock);
+ if (server->tcpStatus != CifsExiting)
+ server->tcpStatus = CifsNeedNegotiate;
+ spin_unlock(&GlobalMid_Lock);
+ cifs_swn_reset_server_dstaddr(server);
+ mutex_unlock(&server->srv_mutex);
+ } while (server->tcpStatus == CifsNeedReconnect);
+
+ if (target_hint)
+ dfs_cache_noreq_update_tgthint(refpath, target_hint);
+
+ dfs_cache_free_tgts(&tl);
+
+ /* Need to set up echo worker again once connection has been established */
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
@@ -407,6 +437,25 @@ cifs_reconnect(struct TCP_Server_Info *server)
return rc;
}
+int cifs_reconnect(struct TCP_Server_Info *server)
+{
+ /* If tcp session is not an dfs connection, then reconnect to last target server */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (!server->is_dfs_conn || !server->origin_fullpath || !server->leaf_fullpath) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return __cifs_reconnect(server);
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ return reconnect_dfs_server(server);
+}
+#else
+int cifs_reconnect(struct TCP_Server_Info *server)
+{
+ return __cifs_reconnect(server);
+}
+#endif
+
static void
cifs_echo_request(struct work_struct *work)
{
@@ -665,13 +714,14 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
* Trying to handle/dequeue a mid after the send_recv()
* function has finished processing it is a bug.
*/
- if (mid->mid_flags & MID_DELETED)
+ if (mid->mid_flags & MID_DELETED) {
+ spin_unlock(&GlobalMid_Lock);
pr_warn_once("trying to dequeue a deleted mid\n");
- else {
+ } else {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
+ spin_unlock(&GlobalMid_Lock);
}
- spin_unlock(&GlobalMid_Lock);
}
static unsigned int
@@ -794,6 +844,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
*/
}
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ kfree(server->origin_fullpath);
+ kfree(server->leaf_fullpath);
+#endif
kfree(server);
length = atomic_dec_return(&tcpSesAllocCount);
@@ -1217,7 +1271,13 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
- if (ctx->nosharesock)
+ if (ctx->nosharesock) {
+ server->nosharesock = true;
+ return 0;
+ }
+
+ /* this server does not share socket */
+ if (server->nosharesock)
return 0;
/* If multidialect negotiation see if existing sessions match one */
@@ -1283,7 +1343,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
* Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv).
*/
- if (server->is_channel || !match_server(server, ctx))
+ if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx))
continue;
++server->srv_count;
@@ -1314,6 +1374,10 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
+ /* For secondary channels, we pick up ref-count on the primary server */
+ if (CIFS_SERVER_IS_CHAN(server))
+ cifs_put_tcp_session(server->primary_server, from_reconnect);
+
cancel_delayed_work_sync(&server->echo);
cancel_delayed_work_sync(&server->resolve);
@@ -1333,7 +1397,10 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
spin_unlock(&GlobalMid_Lock);
cifs_crypto_secmech_release(server);
- cifs_fscache_release_client_cookie(server);
+
+ /* fscache server cookies are based on primary channel only */
+ if (!CIFS_SERVER_IS_CHAN(server))
+ cifs_fscache_release_client_cookie(server);
kfree(server->session_key.response);
server->session_key.response = NULL;
@@ -1346,7 +1413,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
}
struct TCP_Server_Info *
-cifs_get_tcp_session(struct smb3_fs_context *ctx)
+cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ struct TCP_Server_Info *primary_server)
{
struct TCP_Server_Info *tcp_ses = NULL;
int rc;
@@ -1383,6 +1451,10 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
tcp_ses->in_flight = 0;
tcp_ses->max_in_flight = 0;
tcp_ses->credits = 1;
+ if (primary_server) {
+ ++primary_server->srv_count;
+ tcp_ses->primary_server = primary_server;
+ }
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
@@ -1403,6 +1475,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
mutex_init(&tcp_ses->reconnect_mutex);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ mutex_init(&tcp_ses->refpath_lock);
+#endif
memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
sizeof(tcp_ses->srcaddr));
memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
@@ -1481,7 +1556,9 @@ smbd_connected:
list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
- cifs_fscache_get_client_cookie(tcp_ses);
+ /* fscache server cookies are based on primary channel only */
+ if (!CIFS_SERVER_IS_CHAN(tcp_ses))
+ cifs_fscache_get_client_cookie(tcp_ses);
/* queue echo request delayed work */
queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
@@ -1501,6 +1578,8 @@ out_err_crypto_release:
out_err:
if (tcp_ses) {
+ if (CIFS_SERVER_IS_CHAN(tcp_ses))
+ cifs_put_tcp_session(tcp_ses->primary_server, false);
kfree(tcp_ses->hostname);
if (tcp_ses->ssocket)
sock_release(tcp_ses->ssocket);
@@ -1519,8 +1598,12 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
* If an existing session is limited to less channels than
* requested, it should not be reused
*/
- if (ses->chan_max < ctx->max_channels)
+ spin_lock(&ses->chan_lock);
+ if (ses->chan_max < ctx->max_channels) {
+ spin_unlock(&ses->chan_lock);
return 0;
+ }
+ spin_unlock(&ses->chan_lock);
switch (ses->sectype) {
case Kerberos:
@@ -1655,6 +1738,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
void cifs_put_smb_ses(struct cifs_ses *ses)
{
unsigned int rc, xid;
+ unsigned int chan_count;
struct TCP_Server_Info *server = ses->server;
cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
@@ -1696,12 +1780,24 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
list_del_init(&ses->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->chan_lock);
+ chan_count = ses->chan_count;
+ spin_unlock(&ses->chan_lock);
+
/* close any extra channels */
- if (ses->chan_count > 1) {
+ if (chan_count > 1) {
int i;
- for (i = 1; i < ses->chan_count; i++)
+ for (i = 1; i < chan_count; i++) {
+ /*
+ * note: for now, we're okay accessing ses->chans
+ * without chan_lock. But when chans can go away, we'll
+ * need to introduce ref counting to make sure that chan
+ * is not freed from under us.
+ */
cifs_put_tcp_session(ses->chans[i].server, 0);
+ ses->chans[i].server = NULL;
+ }
}
sesInfoFree(ses);
@@ -1885,16 +1981,18 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
ses->status);
mutex_lock(&ses->session_mutex);
- rc = cifs_negotiate_protocol(xid, ses);
- if (rc) {
- mutex_unlock(&ses->session_mutex);
- /* problem -- put our ses reference */
- cifs_put_smb_ses(ses);
- free_xid(xid);
- return ERR_PTR(rc);
- }
if (ses->need_reconnect) {
cifs_dbg(FYI, "Session needs reconnect\n");
+
+ rc = cifs_negotiate_protocol(xid, ses);
+ if (rc) {
+ mutex_unlock(&ses->session_mutex);
+ /* problem -- put our ses reference */
+ cifs_put_smb_ses(ses);
+ free_xid(xid);
+ return ERR_PTR(rc);
+ }
+
rc = cifs_setup_session(xid, ses,
ctx->local_nls);
if (rc) {
@@ -1942,6 +2040,12 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
if (!ses->domainName)
goto get_ses_fail;
}
+ if (ctx->workstation_name) {
+ ses->workstation_name = kstrdup(ctx->workstation_name,
+ GFP_KERNEL);
+ if (!ses->workstation_name)
+ goto get_ses_fail;
+ }
if (ctx->domainauto)
ses->domainAuto = ctx->domainauto;
ses->cred_uid = ctx->cred_uid;
@@ -1952,9 +2056,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
mutex_lock(&ses->session_mutex);
/* add server as first channel */
+ spin_lock(&ses->chan_lock);
ses->chans[0].server = server;
ses->chan_count = 1;
ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
+ spin_unlock(&ses->chan_lock);
rc = cifs_negotiate_protocol(xid, ses);
if (!rc)
@@ -2286,8 +2392,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
list_add(&tcon->tcon_list, &ses->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
- cifs_fscache_get_super_cookie(tcon);
-
return tcon;
out_fail:
@@ -2849,73 +2953,64 @@ int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
}
/* Release all succeed connections */
-static inline void mount_put_conns(struct cifs_sb_info *cifs_sb,
- unsigned int xid,
- struct TCP_Server_Info *server,
- struct cifs_ses *ses, struct cifs_tcon *tcon)
+static inline void mount_put_conns(struct mount_ctx *mnt_ctx)
{
int rc = 0;
- if (tcon)
- cifs_put_tcon(tcon);
- else if (ses)
- cifs_put_smb_ses(ses);
- else if (server)
- cifs_put_tcp_session(server, 0);
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
- free_xid(xid);
+ if (mnt_ctx->tcon)
+ cifs_put_tcon(mnt_ctx->tcon);
+ else if (mnt_ctx->ses)
+ cifs_put_smb_ses(mnt_ctx->ses);
+ else if (mnt_ctx->server)
+ cifs_put_tcp_session(mnt_ctx->server, 0);
+ mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
+ free_xid(mnt_ctx->xid);
}
/* Get connections for tcp, ses and tcon */
-static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
- unsigned int *xid,
- struct TCP_Server_Info **nserver,
- struct cifs_ses **nses, struct cifs_tcon **ntcon)
+static int mount_get_conns(struct mount_ctx *mnt_ctx)
{
int rc = 0;
- struct TCP_Server_Info *server;
- struct cifs_ses *ses;
- struct cifs_tcon *tcon;
-
- *nserver = NULL;
- *nses = NULL;
- *ntcon = NULL;
+ struct TCP_Server_Info *server = NULL;
+ struct cifs_ses *ses = NULL;
+ struct cifs_tcon *tcon = NULL;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ unsigned int xid;
- *xid = get_xid();
+ xid = get_xid();
/* get a reference to a tcp session */
- server = cifs_get_tcp_session(ctx);
+ server = cifs_get_tcp_session(ctx, NULL);
if (IS_ERR(server)) {
rc = PTR_ERR(server);
- return rc;
+ server = NULL;
+ goto out;
}
- *nserver = server;
-
/* get a reference to a SMB session */
ses = cifs_get_smb_ses(server, ctx);
if (IS_ERR(ses)) {
rc = PTR_ERR(ses);
- return rc;
+ ses = NULL;
+ goto out;
}
- *nses = ses;
-
if ((ctx->persistent == true) && (!(ses->server->capabilities &
SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
cifs_server_dbg(VFS, "persistent handles not supported by server\n");
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
+ goto out;
}
/* search for existing tcon to this server share */
tcon = cifs_get_tcon(ses, ctx);
if (IS_ERR(tcon)) {
rc = PTR_ERR(tcon);
- return rc;
+ tcon = NULL;
+ goto out;
}
- *ntcon = tcon;
-
/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
if (tcon->posix_extensions)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
@@ -2926,17 +3021,19 @@ static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cif
* reset of caps checks mount to see if unix extensions disabled
* for just this mount.
*/
- reset_cifs_unix_caps(*xid, tcon, cifs_sb, ctx);
+ reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
(le64_to_cpu(tcon->fsUnixInfo.Capability) &
- CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP))
- return -EACCES;
+ CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
+ rc = -EACCES;
+ goto out;
+ }
} else
tcon->unix_ext = 0; /* server does not support them */
/* do not care if a following call succeed - informational */
if (!tcon->pipe && server->ops->qfs_tcon) {
- server->ops->qfs_tcon(*xid, tcon, cifs_sb);
+ server->ops->qfs_tcon(xid, tcon, cifs_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
if (tcon->fsDevInfo.DeviceCharacteristics &
cpu_to_le32(FILE_READ_ONLY_DEVICE))
@@ -2946,6 +3043,12 @@ static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cif
cifs_dbg(VFS, "read only mount of RW share\n");
/* no need to log a RW mount of a typical RW share */
}
+ /*
+ * The cookie is initialized from volume info returned above.
+ * Inside cifs_fscache_get_super_cookie it checks
+ * that we do not get super cookie twice.
+ */
+ cifs_fscache_get_super_cookie(tcon);
}
/*
@@ -2960,7 +3063,13 @@ static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cif
(cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
- return 0;
+out:
+ mnt_ctx->server = server;
+ mnt_ctx->ses = ses;
+ mnt_ctx->tcon = tcon;
+ mnt_ctx->xid = xid;
+
+ return rc;
}
static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
@@ -2990,18 +3099,17 @@ static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
}
#ifdef CONFIG_CIFS_DFS_UPCALL
-static int mount_get_dfs_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
- unsigned int *xid, struct TCP_Server_Info **nserver,
- struct cifs_ses **nses, struct cifs_tcon **ntcon)
+/* Get unique dfs connections */
+static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
{
int rc;
- ctx->nosharesock = true;
- rc = mount_get_conns(ctx, cifs_sb, xid, nserver, nses, ntcon);
- if (*nserver) {
+ mnt_ctx->fs_ctx->nosharesock = true;
+ rc = mount_get_conns(mnt_ctx);
+ if (mnt_ctx->server) {
cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
spin_lock(&cifs_tcp_ses_lock);
- (*nserver)->is_dfs_conn = true;
+ mnt_ctx->server->is_dfs_conn = true;
spin_unlock(&cifs_tcp_ses_lock);
}
return rc;
@@ -3043,190 +3151,38 @@ build_unc_path_to_root(const struct smb3_fs_context *ctx,
}
/*
- * expand_dfs_referral - Perform a dfs referral query and update the cifs_sb
+ * expand_dfs_referral - Update cifs_sb from dfs referral path
*
- * If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated
- * to a string containing updated options for the submount. Otherwise it
- * will be left untouched.
- *
- * Returns the rc from get_dfs_path to the caller, which can be used to
- * determine whether there were referrals.
+ * cifs_sb->ctx->mount_options will be (re-)allocated to a string containing updated options for the
+ * submount. Otherwise it will be left untouched.
*/
-static int
-expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
- struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
- char *ref_path)
-{
- int rc;
- struct dfs_info3_param referral = {0};
- char *full_path = NULL, *mdata = NULL;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
- return -EREMOTE;
-
- full_path = build_unc_path_to_root(ctx, cifs_sb, true);
- if (IS_ERR(full_path))
- return PTR_ERR(full_path);
-
- rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
- ref_path, &referral, NULL);
- if (!rc) {
- char *fake_devname = NULL;
-
- mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- full_path + 1, &referral,
- &fake_devname);
- free_dfs_info_param(&referral);
-
- if (IS_ERR(mdata)) {
- rc = PTR_ERR(mdata);
- mdata = NULL;
- } else {
- /*
- * We can not clear out the whole structure since we
- * no longer have an explicit function to parse
- * a mount-string. Instead we need to clear out the
- * individual fields that are no longer valid.
- */
- kfree(ctx->prepath);
- ctx->prepath = NULL;
- rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
- }
- kfree(fake_devname);
- kfree(cifs_sb->ctx->mount_options);
- cifs_sb->ctx->mount_options = mdata;
- }
- kfree(full_path);
- return rc;
-}
-
-static int get_next_dfs_tgt(struct dfs_cache_tgt_list *tgt_list,
- struct dfs_cache_tgt_iterator **tgt_it)
-{
- if (!*tgt_it)
- *tgt_it = dfs_cache_get_tgt_iterator(tgt_list);
- else
- *tgt_it = dfs_cache_get_next_tgt(tgt_list, *tgt_it);
- return !*tgt_it ? -EHOSTDOWN : 0;
-}
-
-static int update_vol_info(const struct dfs_cache_tgt_iterator *tgt_it,
- struct smb3_fs_context *fake_ctx, struct smb3_fs_context *ctx)
-{
- const char *tgt = dfs_cache_get_tgt_name(tgt_it);
- int len = strlen(tgt) + 2;
- char *new_unc;
-
- new_unc = kmalloc(len, GFP_KERNEL);
- if (!new_unc)
- return -ENOMEM;
- scnprintf(new_unc, len, "\\%s", tgt);
-
- kfree(ctx->UNC);
- ctx->UNC = new_unc;
-
- if (fake_ctx->prepath) {
- kfree(ctx->prepath);
- ctx->prepath = fake_ctx->prepath;
- fake_ctx->prepath = NULL;
- }
- memcpy(&ctx->dstaddr, &fake_ctx->dstaddr, sizeof(ctx->dstaddr));
-
- return 0;
-}
-
-static int do_dfs_failover(const char *path, const char *full_path, struct cifs_sb_info *cifs_sb,
- struct smb3_fs_context *ctx, struct cifs_ses *root_ses,
- unsigned int *xid, struct TCP_Server_Info **server,
- struct cifs_ses **ses, struct cifs_tcon **tcon)
+static int expand_dfs_referral(struct mount_ctx *mnt_ctx, const char *full_path,
+ struct dfs_info3_param *referral)
{
int rc;
- char *npath = NULL;
- struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
- struct dfs_cache_tgt_iterator *tgt_it = NULL;
- struct smb3_fs_context tmp_ctx = {NULL};
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
- return -EOPNOTSUPP;
-
- npath = dfs_cache_canonical_path(path, cifs_sb->local_nls, cifs_remap(cifs_sb));
- if (IS_ERR(npath))
- return PTR_ERR(npath);
-
- cifs_dbg(FYI, "%s: path=%s full_path=%s\n", __func__, npath, full_path);
-
- rc = dfs_cache_noreq_find(npath, NULL, &tgt_list);
- if (rc)
- goto out;
- /*
- * We use a 'tmp_ctx' here because we need pass it down to the mount_{get,put} functions to
- * test connection against new DFS targets.
- */
- rc = smb3_fs_context_dup(&tmp_ctx, ctx);
- if (rc)
- goto out;
-
- for (;;) {
- struct dfs_info3_param ref = {0};
- char *fake_devname = NULL, *mdata = NULL;
-
- /* Get next DFS target server - if any */
- rc = get_next_dfs_tgt(&tgt_list, &tgt_it);
- if (rc)
- break;
-
- rc = dfs_cache_get_tgt_referral(npath, tgt_it, &ref);
- if (rc)
- break;
-
- cifs_dbg(FYI, "%s: old ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
- tmp_ctx.prepath);
-
- mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, &ref,
- &fake_devname);
- free_dfs_info_param(&ref);
-
- if (IS_ERR(mdata)) {
- rc = PTR_ERR(mdata);
- mdata = NULL;
- } else
- rc = cifs_setup_volume_info(&tmp_ctx, mdata, fake_devname);
-
- kfree(mdata);
- kfree(fake_devname);
-
- if (rc)
- break;
-
- cifs_dbg(FYI, "%s: new ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
- tmp_ctx.prepath);
-
- mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
- rc = mount_get_dfs_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
- if (!rc || (*server && *ses)) {
- /*
- * We were able to connect to new target server. Update current context with
- * new target server.
- */
- rc = update_vol_info(tgt_it, &tmp_ctx, ctx);
- break;
- }
- }
- if (!rc) {
- cifs_dbg(FYI, "%s: final ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
- tmp_ctx.prepath);
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ char *fake_devname = NULL, *mdata = NULL;
+
+ mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, referral,
+ &fake_devname);
+ if (IS_ERR(mdata)) {
+ rc = PTR_ERR(mdata);
+ mdata = NULL;
+ } else {
/*
- * Update DFS target hint in DFS referral cache with the target server we
- * successfully reconnected to.
+ * We can not clear out the whole structure since we no longer have an explicit
+ * function to parse a mount-string. Instead we need to clear out the individual
+ * fields that are no longer valid.
*/
- rc = dfs_cache_update_tgthint(*xid, root_ses ? root_ses : *ses, cifs_sb->local_nls,
- cifs_remap(cifs_sb), path, tgt_it);
+ kfree(ctx->prepath);
+ ctx->prepath = NULL;
+ rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
}
+ kfree(fake_devname);
+ kfree(cifs_sb->ctx->mount_options);
+ cifs_sb->ctx->mount_options = mdata;
-out:
- kfree(npath);
- smb3_cleanup_fs_context_contents(&tmp_ctx);
- dfs_cache_free_tgts(&tgt_list);
return rc;
}
#endif
@@ -3333,12 +3289,14 @@ cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
* Check if path is remote (e.g. a DFS share). Return -EREMOTE if it is,
* otherwise 0.
*/
-static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
- const unsigned int xid,
- struct TCP_Server_Info *server,
- struct cifs_tcon *tcon)
+static int is_path_remote(struct mount_ctx *mnt_ctx)
{
int rc;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct TCP_Server_Info *server = mnt_ctx->server;
+ unsigned int xid = mnt_ctx->xid;
+ struct cifs_tcon *tcon = mnt_ctx->tcon;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
char *full_path;
if (!server->ops->is_path_accessible)
@@ -3376,280 +3334,289 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *
}
#ifdef CONFIG_CIFS_DFS_UPCALL
-static void set_root_ses(struct cifs_sb_info *cifs_sb, const uuid_t *mount_id, struct cifs_ses *ses,
- struct cifs_ses **root_ses)
+static void set_root_ses(struct mount_ctx *mnt_ctx)
{
- if (ses) {
+ if (mnt_ctx->ses) {
spin_lock(&cifs_tcp_ses_lock);
- ses->ses_count++;
+ mnt_ctx->ses->ses_count++;
spin_unlock(&cifs_tcp_ses_lock);
- dfs_cache_add_refsrv_session(mount_id, ses);
+ dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
}
- *root_ses = ses;
+ mnt_ctx->root_ses = mnt_ctx->ses;
}
-/* Set up next dfs prefix path in @dfs_path */
-static int next_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
- const unsigned int xid, struct TCP_Server_Info *server,
- struct cifs_tcon *tcon, char **dfs_path)
+static int is_dfs_mount(struct mount_ctx *mnt_ctx, bool *isdfs, struct dfs_cache_tgt_list *root_tl)
{
- char *path, *npath;
- int added_treename = is_tcon_dfs(tcon);
int rc;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
- path = cifs_build_path_to_root(ctx, cifs_sb, tcon, added_treename);
- if (!path)
- return -ENOMEM;
+ *isdfs = true;
- rc = is_path_remote(cifs_sb, ctx, xid, server, tcon);
- if (rc == -EREMOTE) {
- struct smb3_fs_context v = {NULL};
- /* if @path contains a tree name, skip it in the prefix path */
- if (added_treename) {
- rc = smb3_parse_devname(path, &v);
- if (rc)
- goto out;
- npath = build_unc_path_to_root(&v, cifs_sb, true);
- smb3_cleanup_fs_context_contents(&v);
- } else {
- v.UNC = ctx->UNC;
- v.prepath = path + 1;
- npath = build_unc_path_to_root(&v, cifs_sb, true);
- }
+ rc = mount_get_conns(mnt_ctx);
+ /*
+ * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
+ * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+ *
+ * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
+ * to respond with PATH_NOT_COVERED to requests that include the prefix.
+ */
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
+ dfs_cache_find(mnt_ctx->xid, mnt_ctx->ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
+ ctx->UNC + 1, NULL, root_tl)) {
+ if (rc)
+ return rc;
+ /* Check if it is fully accessible and then mount it */
+ rc = is_path_remote(mnt_ctx);
+ if (!rc)
+ *isdfs = false;
+ else if (rc != -EREMOTE)
+ return rc;
+ }
+ return 0;
+}
- if (IS_ERR(npath)) {
- rc = PTR_ERR(npath);
- goto out;
- }
+static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
+ const char *ref_path, struct dfs_cache_tgt_iterator *tit)
+{
+ int rc;
+ struct dfs_info3_param ref = {};
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ char *oldmnt = cifs_sb->ctx->mount_options;
- kfree(*dfs_path);
- *dfs_path = npath;
- rc = -EREMOTE;
+ rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
+ if (rc)
+ goto out;
+
+ rc = expand_dfs_referral(mnt_ctx, full_path, &ref);
+ if (rc)
+ goto out;
+
+ /* Connect to new target only if we were redirected (e.g. mount options changed) */
+ if (oldmnt != cifs_sb->ctx->mount_options) {
+ mount_put_conns(mnt_ctx);
+ rc = mount_get_dfs_conns(mnt_ctx);
+ }
+ if (!rc) {
+ if (cifs_is_referral_server(mnt_ctx->tcon, &ref))
+ set_root_ses(mnt_ctx);
+ rc = dfs_cache_update_tgthint(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), ref_path, tit);
}
out:
- kfree(path);
+ free_dfs_info_param(&ref);
return rc;
}
-/* Check if resolved targets can handle any DFS referrals */
-static int is_referral_server(const char *ref_path, struct cifs_sb_info *cifs_sb,
- struct cifs_tcon *tcon, bool *ref_server)
+static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list *root_tl)
{
int rc;
- struct dfs_info3_param ref = {0};
+ char *full_path;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct dfs_cache_tgt_iterator *tit;
- cifs_dbg(FYI, "%s: ref_path=%s\n", __func__, ref_path);
+ /* Put initial connections as they might be shared with other mounts. We need unique dfs
+ * connections per mount to properly failover, so mount_get_dfs_conns() must be used from
+ * now on.
+ */
+ mount_put_conns(mnt_ctx);
+ mount_get_dfs_conns(mnt_ctx);
- if (is_tcon_dfs(tcon)) {
- *ref_server = true;
- } else {
- char *npath;
+ full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+ if (IS_ERR(full_path))
+ return PTR_ERR(full_path);
- npath = dfs_cache_canonical_path(ref_path, cifs_sb->local_nls, cifs_remap(cifs_sb));
- if (IS_ERR(npath))
- return PTR_ERR(npath);
+ mnt_ctx->origin_fullpath = dfs_cache_canonical_path(ctx->UNC, cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+ if (IS_ERR(mnt_ctx->origin_fullpath)) {
+ rc = PTR_ERR(mnt_ctx->origin_fullpath);
+ mnt_ctx->origin_fullpath = NULL;
+ goto out;
+ }
- rc = dfs_cache_noreq_find(npath, &ref, NULL);
- kfree(npath);
- if (rc) {
- cifs_dbg(VFS, "%s: dfs_cache_noreq_find: failed (rc=%d)\n", __func__, rc);
- return rc;
+ /* Try all dfs root targets */
+ for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(root_tl);
+ tit; tit = dfs_cache_get_next_tgt(root_tl, tit)) {
+ rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->origin_fullpath + 1, tit);
+ if (!rc) {
+ mnt_ctx->leaf_fullpath = kstrdup(mnt_ctx->origin_fullpath, GFP_KERNEL);
+ if (!mnt_ctx->leaf_fullpath)
+ rc = -ENOMEM;
+ break;
}
- cifs_dbg(FYI, "%s: ref.flags=0x%x\n", __func__, ref.flags);
- /*
- * Check if all targets are capable of handling DFS referrals as per
- * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
- */
- *ref_server = !!(ref.flags & DFSREF_REFERRAL_SERVER);
- free_dfs_info_param(&ref);
}
- return 0;
+
+out:
+ kfree(full_path);
+ return rc;
}
-int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
{
- int rc = 0;
- unsigned int xid;
- struct TCP_Server_Info *server = NULL;
- struct cifs_ses *ses = NULL, *root_ses = NULL;
- struct cifs_tcon *tcon = NULL;
- int count = 0;
- uuid_t mount_id = {0};
- char *ref_path = NULL, *full_path = NULL;
- char *oldmnt = NULL;
- bool ref_server = false;
+ int rc;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ char *full_path;
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ struct dfs_cache_tgt_iterator *tit;
- rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
- /*
- * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
- * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
- *
- * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
- * to respond with PATH_NOT_COVERED to requests that include the prefix.
- */
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
- dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), ctx->UNC + 1, NULL,
- NULL)) {
- if (rc)
- goto error;
- /* Check if it is fully accessible and then mount it */
- rc = is_path_remote(cifs_sb, ctx, xid, server, tcon);
- if (!rc)
- goto out;
- if (rc != -EREMOTE)
- goto error;
+ full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+ if (IS_ERR(full_path))
+ return PTR_ERR(full_path);
+
+ kfree(mnt_ctx->leaf_fullpath);
+ mnt_ctx->leaf_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+ if (IS_ERR(mnt_ctx->leaf_fullpath)) {
+ rc = PTR_ERR(mnt_ctx->leaf_fullpath);
+ mnt_ctx->leaf_fullpath = NULL;
+ goto out;
}
- mount_put_conns(cifs_sb, xid, server, ses, tcon);
- /*
- * Ignore error check here because we may failover to other targets from cached a
- * referral.
- */
- (void)mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
+ /* Get referral from dfs link */
+ rc = dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), mnt_ctx->leaf_fullpath + 1, NULL, &tl);
+ if (rc)
+ goto out;
- /* Get path of DFS root */
- ref_path = build_unc_path_to_root(ctx, cifs_sb, false);
- if (IS_ERR(ref_path)) {
- rc = PTR_ERR(ref_path);
- ref_path = NULL;
- goto error;
+ /* Try all dfs link targets */
+ for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
+ tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
+ rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
+ if (!rc) {
+ rc = is_path_remote(mnt_ctx);
+ break;
+ }
+ }
+
+out:
+ kfree(full_path);
+ dfs_cache_free_tgts(&tl);
+ return rc;
+}
+
+static int follow_dfs_link(struct mount_ctx *mnt_ctx)
+{
+ int rc;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ char *full_path;
+ int num_links = 0;
+
+ full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+ if (IS_ERR(full_path))
+ return PTR_ERR(full_path);
+
+ kfree(mnt_ctx->origin_fullpath);
+ mnt_ctx->origin_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+ kfree(full_path);
+
+ if (IS_ERR(mnt_ctx->origin_fullpath)) {
+ rc = PTR_ERR(mnt_ctx->origin_fullpath);
+ mnt_ctx->origin_fullpath = NULL;
+ return rc;
}
- uuid_gen(&mount_id);
- set_root_ses(cifs_sb, &mount_id, ses, &root_ses);
do {
- /* Save full path of last DFS path we used to resolve final target server */
- kfree(full_path);
- full_path = build_unc_path_to_root(ctx, cifs_sb, !!count);
- if (IS_ERR(full_path)) {
- rc = PTR_ERR(full_path);
- full_path = NULL;
- break;
- }
- /* Chase referral */
- oldmnt = cifs_sb->ctx->mount_options;
- rc = expand_dfs_referral(xid, root_ses, ctx, cifs_sb, ref_path + 1);
- if (rc)
- break;
- /* Connect to new DFS target only if we were redirected */
- if (oldmnt != cifs_sb->ctx->mount_options) {
- mount_put_conns(cifs_sb, xid, server, ses, tcon);
- rc = mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
- }
- if (rc && !server && !ses) {
- /* Failed to connect. Try to connect to other targets in the referral. */
- rc = do_dfs_failover(ref_path + 1, full_path, cifs_sb, ctx, root_ses, &xid,
- &server, &ses, &tcon);
- }
- if (rc == -EACCES || rc == -EOPNOTSUPP || !server || !ses)
+ rc = __follow_dfs_link(mnt_ctx);
+ if (!rc || rc != -EREMOTE)
break;
- if (!tcon)
- continue;
+ } while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
- /* Make sure that requests go through new root servers */
- rc = is_referral_server(ref_path + 1, cifs_sb, tcon, &ref_server);
- if (rc)
- break;
- if (ref_server)
- set_root_ses(cifs_sb, &mount_id, ses, &root_ses);
+ return rc;
+}
- /* Get next dfs path and then continue chasing them if -EREMOTE */
- rc = next_dfs_prepath(cifs_sb, ctx, xid, server, tcon, &ref_path);
- /* Prevent recursion on broken link referrals */
- if (rc == -EREMOTE && ++count > MAX_NESTED_LINKS)
- rc = -ELOOP;
- } while (rc == -EREMOTE);
+/* Set up DFS referral paths for failover */
+static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
+{
+ struct TCP_Server_Info *server = mnt_ctx->server;
+
+ server->origin_fullpath = mnt_ctx->origin_fullpath;
+ server->leaf_fullpath = mnt_ctx->leaf_fullpath;
+ server->current_fullpath = mnt_ctx->leaf_fullpath;
+ mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
+}
- if (rc || !tcon || !ses)
+int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+{
+ int rc;
+ struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ bool isdfs;
+
+ rc = is_dfs_mount(&mnt_ctx, &isdfs, &tl);
+ if (rc)
goto error;
+ if (!isdfs)
+ goto out;
- kfree(ref_path);
- /*
- * Store DFS full path in both superblock and tree connect structures.
- *
- * For DFS root mounts, the prefix path (cifs_sb->prepath) is preserved during reconnect so
- * only the root path is set in cifs_sb->origin_fullpath and tcon->dfs_path. And for DFS
- * links, the prefix path is included in both and may be changed during reconnect. See
- * cifs_tree_connect().
- */
- ref_path = dfs_cache_canonical_path(full_path, cifs_sb->local_nls, cifs_remap(cifs_sb));
- kfree(full_path);
- full_path = NULL;
+ uuid_gen(&mnt_ctx.mount_id);
+ rc = connect_dfs_root(&mnt_ctx, &tl);
+ dfs_cache_free_tgts(&tl);
- if (IS_ERR(ref_path)) {
- rc = PTR_ERR(ref_path);
- ref_path = NULL;
+ if (rc)
goto error;
- }
- cifs_sb->origin_fullpath = ref_path;
- ref_path = kstrdup(cifs_sb->origin_fullpath, GFP_KERNEL);
- if (!ref_path) {
- rc = -ENOMEM;
+ rc = is_path_remote(&mnt_ctx);
+ if (rc == -EREMOTE)
+ rc = follow_dfs_link(&mnt_ctx);
+ if (rc)
goto error;
- }
- spin_lock(&cifs_tcp_ses_lock);
- tcon->dfs_path = ref_path;
- ref_path = NULL;
- spin_unlock(&cifs_tcp_ses_lock);
+ setup_server_referral_paths(&mnt_ctx);
/*
- * After reconnecting to a different server, unique ids won't
- * match anymore, so we disable serverino. This prevents
- * dentry revalidation to think the dentry are stale (ESTALE).
+ * After reconnecting to a different server, unique ids won't match anymore, so we disable
+ * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
*/
cifs_autodisable_serverino(cifs_sb);
/*
- * Force the use of prefix path to support failover on DFS paths that
- * resolve to targets that have different prefix paths.
+ * Force the use of prefix path to support failover on DFS paths that resolve to targets
+ * that have different prefix paths.
*/
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
kfree(cifs_sb->prepath);
cifs_sb->prepath = ctx->prepath;
ctx->prepath = NULL;
- uuid_copy(&cifs_sb->dfs_mount_id, &mount_id);
+ uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
out:
- free_xid(xid);
- cifs_try_adding_channels(cifs_sb, ses);
- return mount_setup_tlink(cifs_sb, ses, tcon);
+ free_xid(mnt_ctx.xid);
+ cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
+ return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
error:
- kfree(ref_path);
- kfree(full_path);
- kfree(cifs_sb->origin_fullpath);
- dfs_cache_put_refsrv_sessions(&mount_id);
- mount_put_conns(cifs_sb, xid, server, ses, tcon);
+ dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
+ kfree(mnt_ctx.origin_fullpath);
+ kfree(mnt_ctx.leaf_fullpath);
+ mount_put_conns(&mnt_ctx);
return rc;
}
#else
int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
{
int rc = 0;
- unsigned int xid;
- struct cifs_ses *ses;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
+ struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
- rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
+ rc = mount_get_conns(&mnt_ctx);
if (rc)
goto error;
- if (tcon) {
- rc = is_path_remote(cifs_sb, ctx, xid, server, tcon);
+ if (mnt_ctx.tcon) {
+ rc = is_path_remote(&mnt_ctx);
if (rc == -EREMOTE)
rc = -EOPNOTSUPP;
if (rc)
goto error;
}
- free_xid(xid);
-
- return mount_setup_tlink(cifs_sb, ses, tcon);
+ free_xid(mnt_ctx.xid);
+ return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
error:
- mount_put_conns(cifs_sb, xid, server, ses, tcon);
+ mount_put_conns(&mnt_ctx);
return rc;
}
#endif
@@ -3818,7 +3785,6 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
kfree(cifs_sb->prepath);
#ifdef CONFIG_CIFS_DFS_UPCALL
dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
- kfree(cifs_sb->origin_fullpath);
#endif
call_rcu(&cifs_sb->rcu, delayed_free);
}
@@ -4145,104 +4111,246 @@ cifs_prune_tlinks(struct work_struct *work)
}
#ifdef CONFIG_CIFS_DFS_UPCALL
-int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
+static void mark_tcon_tcp_ses_for_reconnect(struct cifs_tcon *tcon)
+{
+ int i;
+
+ for (i = 0; i < tcon->ses->chan_count; i++) {
+ spin_lock(&GlobalMid_Lock);
+ if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
+ tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&GlobalMid_Lock);
+ }
+}
+
+/* Update dfs referral path of superblock */
+static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
+ const char *target)
+{
+ int rc = 0;
+ size_t len = strlen(target);
+ char *refpath, *npath;
+
+ if (unlikely(len < 2 || *target != '\\'))
+ return -EINVAL;
+
+ if (target[1] == '\\') {
+ len += 1;
+ refpath = kmalloc(len, GFP_KERNEL);
+ if (!refpath)
+ return -ENOMEM;
+
+ scnprintf(refpath, len, "%s", target);
+ } else {
+ len += sizeof("\\");
+ refpath = kmalloc(len, GFP_KERNEL);
+ if (!refpath)
+ return -ENOMEM;
+
+ scnprintf(refpath, len, "\\%s", target);
+ }
+
+ npath = dfs_cache_canonical_path(refpath, cifs_sb->local_nls, cifs_remap(cifs_sb));
+ kfree(refpath);
+
+ if (IS_ERR(npath)) {
+ rc = PTR_ERR(npath);
+ } else {
+ mutex_lock(&server->refpath_lock);
+ kfree(server->leaf_fullpath);
+ server->leaf_fullpath = npath;
+ mutex_unlock(&server->refpath_lock);
+ server->current_fullpath = server->leaf_fullpath;
+ }
+ return rc;
+}
+
+static int target_share_matches_server(struct TCP_Server_Info *server, const char *tcp_host,
+ size_t tcp_host_len, char *share, bool *target_match)
+{
+ int rc = 0;
+ const char *dfs_host;
+ size_t dfs_host_len;
+
+ *target_match = true;
+ extract_unc_hostname(share, &dfs_host, &dfs_host_len);
+
+ /* Check if hostnames or addresses match */
+ if (dfs_host_len != tcp_host_len || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
+ cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
+ dfs_host, (int)tcp_host_len, tcp_host);
+ rc = match_target_ip(server, dfs_host, dfs_host_len, target_match);
+ if (rc)
+ cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
+ }
+ return rc;
+}
+
+static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, char *tree, bool islink,
+ struct dfs_cache_tgt_list *tl)
{
int rc;
struct TCP_Server_Info *server = tcon->ses->server;
const struct smb_version_operations *ops = server->ops;
- struct dfs_cache_tgt_list tl;
- struct dfs_cache_tgt_iterator *it = NULL;
- char *tree;
+ struct cifs_tcon *ipc = tcon->ses->tcon_ipc;
+ char *share = NULL, *prefix = NULL;
const char *tcp_host;
size_t tcp_host_len;
- const char *dfs_host;
- size_t dfs_host_len;
- char *share = NULL, *prefix = NULL;
- struct dfs_info3_param ref = {0};
- bool isroot;
+ struct dfs_cache_tgt_iterator *tit;
+ bool target_match;
- tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
- if (!tree)
- return -ENOMEM;
+ extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
- /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
- if (!tcon->dfs_path || dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl)) {
- if (tcon->ipc) {
- scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
- rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
- } else {
- rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
- }
+ tit = dfs_cache_get_tgt_iterator(tl);
+ if (!tit) {
+ rc = -ENOENT;
goto out;
}
- isroot = ref.server_type == DFS_TYPE_ROOT;
- free_dfs_info_param(&ref);
-
- extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
-
- for (it = dfs_cache_get_tgt_iterator(&tl); it; it = dfs_cache_get_next_tgt(&tl, it)) {
- bool target_match;
+ /* Try to tree connect to all dfs targets */
+ for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
+ const char *target = dfs_cache_get_tgt_name(tit);
+ struct dfs_cache_tgt_list ntl = DFS_CACHE_TGT_LIST_INIT(ntl);
kfree(share);
kfree(prefix);
- share = NULL;
- prefix = NULL;
+ share = prefix = NULL;
- rc = dfs_cache_get_tgt_share(tcon->dfs_path + 1, it, &share, &prefix);
+ /* Check if share matches with tcp ses */
+ rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
if (rc) {
- cifs_dbg(VFS, "%s: failed to parse target share %d\n",
- __func__, rc);
- continue;
+ cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
+ break;
}
- extract_unc_hostname(share, &dfs_host, &dfs_host_len);
-
- if (dfs_host_len != tcp_host_len
- || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
- cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
- dfs_host, (int)tcp_host_len, tcp_host);
+ rc = target_share_matches_server(server, tcp_host, tcp_host_len, share,
+ &target_match);
+ if (rc)
+ break;
+ if (!target_match) {
+ rc = -EHOSTUNREACH;
+ continue;
+ }
- rc = match_target_ip(server, dfs_host, dfs_host_len, &target_match);
- if (rc) {
- cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
+ if (ipc->need_reconnect) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+ rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
+ if (rc)
break;
- }
-
- if (!target_match) {
- cifs_dbg(FYI, "%s: skipping target\n", __func__);
- continue;
- }
}
- if (tcon->ipc) {
- scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", share);
- rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
+ scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
+ if (!islink) {
+ rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
+ break;
+ }
+ /*
+ * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
+ * to it. Otherwise, cache the dfs referral and then mark current tcp ses for
+ * reconnect so either the demultiplex thread or the echo worker will reconnect to
+ * newly resolved target.
+ */
+ if (dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls, cifs_remap(cifs_sb), target,
+ NULL, &ntl)) {
+ rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
+ if (rc)
+ continue;
+ rc = dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
+ if (!rc)
+ rc = cifs_update_super_prepath(cifs_sb, prefix);
} else {
- scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
- rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
- /* Only handle prefix paths of DFS link targets */
- if (!rc && !isroot) {
- rc = update_super_prepath(tcon, prefix);
- break;
- }
+ /* Target is another dfs share */
+ rc = update_server_fullpath(server, cifs_sb, target);
+ dfs_cache_free_tgts(tl);
+
+ if (!rc) {
+ rc = -EREMOTE;
+ list_replace_init(&ntl.tl_list, &tl->tl_list);
+ } else
+ dfs_cache_free_tgts(&ntl);
}
- if (rc == -EREMOTE)
- break;
+ break;
}
+out:
kfree(share);
kfree(prefix);
- if (!rc) {
- if (it)
- rc = dfs_cache_noreq_update_tgthint(tcon->dfs_path + 1, it);
- else
- rc = -ENOENT;
+ return rc;
+}
+
+static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, char *tree, bool islink,
+ struct dfs_cache_tgt_list *tl)
+{
+ int rc;
+ int num_links = 0;
+ struct TCP_Server_Info *server = tcon->ses->server;
+
+ do {
+ rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl);
+ if (!rc || rc != -EREMOTE)
+ break;
+ } while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
+ /*
+ * If we couldn't tree connect to any targets from last referral path, then retry from
+ * original referral path.
+ */
+ if (rc && server->current_fullpath != server->origin_fullpath) {
+ server->current_fullpath = server->origin_fullpath;
+ mark_tcon_tcp_ses_for_reconnect(tcon);
}
- dfs_cache_free_tgts(&tl);
+
+ dfs_cache_free_tgts(tl);
+ return rc;
+}
+
+int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
+{
+ int rc;
+ struct TCP_Server_Info *server = tcon->ses->server;
+ const struct smb_version_operations *ops = server->ops;
+ struct super_block *sb = NULL;
+ struct cifs_sb_info *cifs_sb;
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ char *tree;
+ struct dfs_info3_param ref = {0};
+
+ tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
+ if (!tree)
+ return -ENOMEM;
+
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+ rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
+ goto out;
+ }
+
+ sb = cifs_get_tcp_super(server);
+ if (IS_ERR(sb)) {
+ rc = PTR_ERR(sb);
+ cifs_dbg(VFS, "%s: could not find superblock: %d\n", __func__, rc);
+ goto out;
+ }
+
+ cifs_sb = CIFS_SB(sb);
+
+ /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
+ if (!server->current_fullpath ||
+ dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
+ rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, cifs_sb->local_nls);
+ goto out;
+ }
+
+ rc = tree_connect_dfs_target(xid, tcon, cifs_sb, tree, ref.server_type == DFS_TYPE_LINK,
+ &tl);
+ free_dfs_info_param(&ref);
+
out:
kfree(tree);
+ cifs_put_tcp_super(sb);
+
return rc;
}
#else
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 28374559284413..5c1259d2eeac21 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -283,7 +283,7 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
seq_printf(m,
"cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
- ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
+ ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
@@ -1364,9 +1364,9 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
-static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
+static int __refresh_tcon(const char *path, struct cifs_ses **sessions, struct cifs_tcon *tcon,
+ bool force_refresh)
{
- const char *path = tcon->dfs_path + 1;
struct cifs_ses *ses;
struct cache_entry *ce;
struct dfs_info3_param *refs = NULL;
@@ -1422,6 +1422,20 @@ out:
return rc;
}
+static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
+{
+ struct TCP_Server_Info *server = tcon->ses->server;
+
+ mutex_lock(&server->refpath_lock);
+ if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
+ __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
+ mutex_unlock(&server->refpath_lock);
+
+ __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
+
+ return 0;
+}
+
/**
* dfs_cache_remount_fs - remount a DFS share
*
@@ -1435,6 +1449,7 @@ out:
int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
{
struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
struct mount_group *mg;
struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
int rc;
@@ -1443,13 +1458,15 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
return -EINVAL;
tcon = cifs_sb_master_tcon(cifs_sb);
- if (!tcon->dfs_path) {
- cifs_dbg(FYI, "%s: not a dfs tcon\n", __func__);
+ server = tcon->ses->server;
+
+ if (!server->origin_fullpath) {
+ cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
return 0;
}
if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
- cifs_dbg(FYI, "%s: tcon has no dfs mount group id\n", __func__);
+ cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
return -EINVAL;
}
@@ -1457,7 +1474,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
mg = find_mount_group_locked(&cifs_sb->dfs_mount_id);
if (IS_ERR(mg)) {
mutex_unlock(&mount_group_list_lock);
- cifs_dbg(FYI, "%s: tcon has ipc session to refresh referral\n", __func__);
+ cifs_dbg(FYI, "%s: no ipc session for refreshing referral\n", __func__);
return PTR_ERR(mg);
}
kref_get(&mg->refcount);
@@ -1498,9 +1515,12 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ if (!server->is_dfs_conn)
+ continue;
+
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
- if (tcon->dfs_path) {
+ if (!tcon->ipc && !tcon->need_reconnect) {
tcon->tc_count++;
list_add_tail(&tcon->ulist, &tcons);
}
@@ -1510,8 +1530,16 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
+ struct TCP_Server_Info *server = tcon->ses->server;
+
list_del_init(&tcon->ulist);
- refresh_tcon(sessions, tcon, false);
+
+ mutex_lock(&server->refpath_lock);
+ if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
+ __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
+ mutex_unlock(&server->refpath_lock);
+
+ __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
cifs_put_tcon(tcon);
}
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1b855fcb179eb2..9fee3af83a7369 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2692,12 +2692,23 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
tcon = tlink_tcon(smbfile->tlink);
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
server = tcon->ses->server;
- if (server->ops->flush)
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
- else
+ if (server->ops->flush == NULL) {
rc = -ENOSYS;
+ goto strict_fsync_exit;
+ }
+
+ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
+ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
+ if (smbfile) {
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
+ cifsFileInfo_put(smbfile);
+ } else
+ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
+ } else
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
}
+strict_fsync_exit:
free_xid(xid);
return rc;
}
@@ -2709,6 +2720,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct cifsFileInfo *smbfile = file->private_data;
+ struct inode *inode = file_inode(file);
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
rc = file_write_and_wait_range(file, start, end);
@@ -2725,12 +2737,23 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
tcon = tlink_tcon(smbfile->tlink);
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
server = tcon->ses->server;
- if (server->ops->flush)
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
- else
+ if (server->ops->flush == NULL) {
rc = -ENOSYS;
+ goto fsync_exit;
+ }
+
+ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
+ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
+ if (smbfile) {
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
+ cifsFileInfo_put(smbfile);
+ } else
+ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
+ } else
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
}
+fsync_exit:
free_xid(xid);
return rc;
}
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 38d96a48074527..6a179ae753c118 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -308,7 +308,9 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
new_ctx->nodename = NULL;
new_ctx->username = NULL;
new_ctx->password = NULL;
+ new_ctx->server_hostname = NULL;
new_ctx->domainname = NULL;
+ new_ctx->workstation_name = NULL;
new_ctx->UNC = NULL;
new_ctx->source = NULL;
new_ctx->iocharset = NULL;
@@ -323,6 +325,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
DUP_CTX_STR(UNC);
DUP_CTX_STR(source);
DUP_CTX_STR(domainname);
+ DUP_CTX_STR(workstation_name);
DUP_CTX_STR(nodename);
DUP_CTX_STR(iocharset);
@@ -459,6 +462,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
return -EINVAL;
/* record the server hostname */
+ kfree(ctx->server_hostname);
ctx->server_hostname = kstrndup(devname + 2, pos - devname - 2, GFP_KERNEL);
if (!ctx->server_hostname)
return -ENOMEM;
@@ -720,6 +724,11 @@ static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
cifs_errorf(fc, "can not change domainname during remount\n");
return -EINVAL;
}
+ if (new_ctx->workstation_name &&
+ (!old_ctx->workstation_name || strcmp(new_ctx->workstation_name, old_ctx->workstation_name))) {
+ cifs_errorf(fc, "can not change workstation_name during remount\n");
+ return -EINVAL;
+ }
if (new_ctx->nodename &&
(!old_ctx->nodename || strcmp(new_ctx->nodename, old_ctx->nodename))) {
cifs_errorf(fc, "can not change nodename during remount\n");
@@ -753,7 +762,8 @@ static int smb3_reconfigure(struct fs_context *fc)
return rc;
/*
- * We can not change UNC/username/password/domainname/nodename/iocharset
+ * We can not change UNC/username/password/domainname/
+ * workstation_name/nodename/iocharset
* during reconnect so ignore what we have in the new context and
* just use what we already have in cifs_sb->ctx.
*/
@@ -762,6 +772,7 @@ static int smb3_reconfigure(struct fs_context *fc)
STEAL_STRING(cifs_sb, ctx, username);
STEAL_STRING(cifs_sb, ctx, password);
STEAL_STRING(cifs_sb, ctx, domainname);
+ STEAL_STRING(cifs_sb, ctx, workstation_name);
STEAL_STRING(cifs_sb, ctx, nodename);
STEAL_STRING(cifs_sb, ctx, iocharset);
@@ -1414,13 +1425,22 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
int smb3_init_fs_context(struct fs_context *fc)
{
+ int rc;
struct smb3_fs_context *ctx;
char *nodename = utsname()->nodename;
int i;
ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
- if (unlikely(!ctx))
- return -ENOMEM;
+ if (unlikely(!ctx)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ ctx->workstation_name = kstrdup(nodename, GFP_KERNEL);
+ if (unlikely(!ctx->workstation_name)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
/*
* does not have to be perfect mapping since field is
@@ -1493,6 +1513,14 @@ int smb3_init_fs_context(struct fs_context *fc)
fc->fs_private = ctx;
fc->ops = &smb3_fs_context_ops;
return 0;
+
+err_exit:
+ if (ctx) {
+ kfree(ctx->workstation_name);
+ kfree(ctx);
+ }
+
+ return rc;
}
void
@@ -1518,6 +1546,8 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
ctx->source = NULL;
kfree(ctx->domainname);
ctx->domainname = NULL;
+ kfree(ctx->workstation_name);
+ ctx->workstation_name = NULL;
kfree(ctx->nodename);
ctx->nodename = NULL;
kfree(ctx->iocharset);
diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
index b2d22cf9cb181b..e54090d9ef368e 100644
--- a/fs/cifs/fs_context.h
+++ b/fs/cifs/fs_context.h
@@ -170,6 +170,7 @@ struct smb3_fs_context {
char *server_hostname;
char *UNC;
char *nodename;
+ char *workstation_name;
char *iocharset; /* local code page for mapping to and from Unicode */
char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 8eedd20c44abde..7e409a38a2d7c1 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -87,6 +87,14 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
char *sharename;
struct cifs_fscache_super_auxdata auxdata;
+ /*
+ * Check if cookie was already initialized so don't reinitialize it.
+ * In the future, as we integrate with newer fscache features,
+ * we may want to instead add a check if cookie has changed
+ */
+ if (tcon->fscache == NULL)
+ return;
+
sharename = extract_sharename(tcon->treeName);
if (IS_ERR(sharename)) {
cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index ba2c3e897b291a..5148d48d6a357e 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -75,6 +75,7 @@ sesInfoAlloc(void)
INIT_LIST_HEAD(&ret_buf->tcon_list);
mutex_init(&ret_buf->session_mutex);
spin_lock_init(&ret_buf->iface_lock);
+ spin_lock_init(&ret_buf->chan_lock);
}
return ret_buf;
}
@@ -94,6 +95,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree_sensitive(buf_to_free->password);
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
+ kfree(buf_to_free->workstation_name);
kfree_sensitive(buf_to_free->auth_key.response);
kfree(buf_to_free->iface_list);
kfree_sensitive(buf_to_free);
@@ -138,9 +140,6 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
kfree(buf_to_free->nativeFileSystem);
kfree_sensitive(buf_to_free->password);
kfree(buf_to_free->crfid.fid);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- kfree(buf_to_free->dfs_path);
-#endif
kfree(buf_to_free);
}
@@ -1287,69 +1286,20 @@ out:
return rc;
}
-static void tcon_super_cb(struct super_block *sb, void *arg)
+int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
{
- struct super_cb_data *sd = arg;
- struct cifs_tcon *tcon = sd->data;
- struct cifs_sb_info *cifs_sb;
-
- if (sd->sb)
- return;
-
- cifs_sb = CIFS_SB(sb);
- if (tcon->dfs_path && cifs_sb->origin_fullpath &&
- !strcasecmp(tcon->dfs_path, cifs_sb->origin_fullpath))
- sd->sb = sb;
-}
-
-static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
-{
- return __cifs_get_super(tcon_super_cb, tcon);
-}
-
-static inline void cifs_put_tcon_super(struct super_block *sb)
-{
- __cifs_put_super(sb);
-}
-#else
-static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void cifs_put_tcon_super(struct super_block *sb)
-{
-}
-#endif
-
-int update_super_prepath(struct cifs_tcon *tcon, char *prefix)
-{
- struct super_block *sb;
- struct cifs_sb_info *cifs_sb;
- int rc = 0;
-
- sb = cifs_get_tcon_super(tcon);
- if (IS_ERR(sb))
- return PTR_ERR(sb);
-
- cifs_sb = CIFS_SB(sb);
-
kfree(cifs_sb->prepath);
if (prefix && *prefix) {
cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
- if (!cifs_sb->prepath) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!cifs_sb->prepath)
+ return -ENOMEM;
convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
} else
cifs_sb->prepath = NULL;
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
-
-out:
- cifs_put_tcon_super(sb);
- return rc;
+ return 0;
}
+#endif
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 25a2b8ef88b99f..fe707f45da8902 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -119,7 +119,9 @@ typedef struct _AUTHENTICATE_MESSAGE {
*/
int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
-void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
+int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, u16 *buflen,
+ struct cifs_ses *ses,
+ const struct nls_table *nls_cp);
int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
struct cifs_ses *ses,
const struct nls_table *nls_cp);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 23e02db7923f6b..2c10b186ed6ee6 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -54,41 +54,53 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
{
int i;
+ spin_lock(&ses->chan_lock);
for (i = 0; i < ses->chan_count; i++) {
- if (is_server_using_iface(ses->chans[i].server, iface))
+ if (is_server_using_iface(ses->chans[i].server, iface)) {
+ spin_unlock(&ses->chan_lock);
return true;
+ }
}
+ spin_unlock(&ses->chan_lock);
return false;
}
/* returns number of channels added */
int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
{
- int old_chan_count = ses->chan_count;
- int left = ses->chan_max - ses->chan_count;
+ int old_chan_count, new_chan_count;
+ int left;
int i = 0;
int rc = 0;
int tries = 0;
struct cifs_server_iface *ifaces = NULL;
size_t iface_count;
+ if (ses->server->dialect < SMB30_PROT_ID) {
+ cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ return 0;
+ }
+
+ spin_lock(&ses->chan_lock);
+
+ new_chan_count = old_chan_count = ses->chan_count;
+ left = ses->chan_max - ses->chan_count;
+
if (left <= 0) {
cifs_dbg(FYI,
"ses already at max_channels (%zu), nothing to open\n",
ses->chan_max);
- return 0;
- }
-
- if (ses->server->dialect < SMB30_PROT_ID) {
- cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ spin_unlock(&ses->chan_lock);
return 0;
}
if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
ses->chan_max = 1;
+ spin_unlock(&ses->chan_lock);
return 0;
}
+ spin_unlock(&ses->chan_lock);
/*
* Make a copy of the iface list at the time and use that
@@ -142,10 +154,11 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
cifs_dbg(FYI, "successfully opened new channel on iface#%d\n",
i);
left--;
+ new_chan_count++;
}
kfree(ifaces);
- return ses->chan_count - old_chan_count;
+ return new_chan_count - old_chan_count;
}
/*
@@ -157,10 +170,14 @@ cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server)
{
int i;
+ spin_lock(&ses->chan_lock);
for (i = 0; i < ses->chan_count; i++) {
- if (ses->chans[i].server == server)
+ if (ses->chans[i].server == server) {
+ spin_unlock(&ses->chan_lock);
return &ses->chans[i];
+ }
}
+ spin_unlock(&ses->chan_lock);
return NULL;
}
@@ -168,6 +185,7 @@ static int
cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
struct cifs_server_iface *iface)
{
+ struct TCP_Server_Info *chan_server;
struct cifs_chan *chan;
struct smb3_fs_context ctx = {NULL};
static const char unc_fmt[] = "\\%s\\foo";
@@ -240,18 +258,19 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
SMB2_CLIENT_GUID_SIZE);
ctx.use_client_guid = true;
- mutex_lock(&ses->session_mutex);
+ chan_server = cifs_get_tcp_session(&ctx, ses->server);
+ mutex_lock(&ses->session_mutex);
+ spin_lock(&ses->chan_lock);
chan = ses->binding_chan = &ses->chans[ses->chan_count];
- chan->server = cifs_get_tcp_session(&ctx);
+ chan->server = chan_server;
if (IS_ERR(chan->server)) {
rc = PTR_ERR(chan->server);
chan->server = NULL;
+ spin_unlock(&ses->chan_lock);
goto out;
}
- spin_lock(&cifs_tcp_ses_lock);
- chan->server->is_channel = true;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->chan_lock);
/*
* We need to allocate the server crypto now as we will need
@@ -283,8 +302,11 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
* ses to the new server.
*/
+ spin_lock(&ses->chan_lock);
ses->chan_count++;
atomic_set(&ses->chan_seq, 0);
+ spin_unlock(&ses->chan_lock);
+
out:
ses->binding = false;
ses->binding_chan = NULL;
@@ -599,18 +621,85 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
return 0;
}
+static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size)
+{
+ int sz = base_size + ses->auth_key.len
+ - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
+
+ if (ses->domainName)
+ sz += sizeof(__le16) * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ else
+ sz += sizeof(__le16);
+
+ if (ses->user_name)
+ sz += sizeof(__le16) * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+ else
+ sz += sizeof(__le16);
+
+ sz += sizeof(__le16) * strnlen(ses->workstation_name, CIFS_MAX_WORKSTATION_LEN);
+
+ return sz;
+}
+
+static inline void cifs_security_buffer_from_str(SECURITY_BUFFER *pbuf,
+ char *str_value,
+ int str_length,
+ unsigned char *pstart,
+ unsigned char **pcur,
+ const struct nls_table *nls_cp)
+{
+ unsigned char *tmp = pstart;
+ int len;
+
+ if (!pbuf)
+ return;
+
+ if (!pcur)
+ pcur = &tmp;
+
+ if (!str_value) {
+ pbuf->BufferOffset = cpu_to_le32(*pcur - pstart);
+ pbuf->Length = 0;
+ pbuf->MaximumLength = 0;
+ *pcur += sizeof(__le16);
+ } else {
+ len = cifs_strtoUTF16((__le16 *)*pcur,
+ str_value,
+ str_length,
+ nls_cp);
+ len *= sizeof(__le16);
+ pbuf->BufferOffset = cpu_to_le32(*pcur - pstart);
+ pbuf->Length = cpu_to_le16(len);
+ pbuf->MaximumLength = cpu_to_le16(len);
+ *pcur += len;
+ }
+}
+
/* BB Move to ntlmssp.c eventually */
-/* We do not malloc the blob, it is passed in pbuffer, because
- it is fixed size, and small, making this approach cleaner */
-void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
- struct cifs_ses *ses)
+int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
+ u16 *buflen,
+ struct cifs_ses *ses,
+ const struct nls_table *nls_cp)
{
+ int rc = 0;
struct TCP_Server_Info *server = cifs_ses_server(ses);
- NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
+ NEGOTIATE_MESSAGE *sec_blob;
__u32 flags;
+ unsigned char *tmp;
+ int len;
+
+ len = size_of_ntlmssp_blob(ses, sizeof(NEGOTIATE_MESSAGE));
+ *pbuffer = kmalloc(len, GFP_KERNEL);
+ if (!*pbuffer) {
+ rc = -ENOMEM;
+ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+ *buflen = 0;
+ goto setup_ntlm_neg_ret;
+ }
+ sec_blob = (NEGOTIATE_MESSAGE *)*pbuffer;
- memset(pbuffer, 0, sizeof(NEGOTIATE_MESSAGE));
+ memset(*pbuffer, 0, sizeof(NEGOTIATE_MESSAGE));
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmNegotiate;
@@ -624,34 +713,25 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+ tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
- sec_blob->WorkstationName.BufferOffset = 0;
- sec_blob->WorkstationName.Length = 0;
- sec_blob->WorkstationName.MaximumLength = 0;
+ /* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
+ cifs_security_buffer_from_str(&sec_blob->DomainName,
+ NULL,
+ CIFS_MAX_DOMAINNAME_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
- /* Domain name is sent on the Challenge not Negotiate NTLMSSP request */
- sec_blob->DomainName.BufferOffset = 0;
- sec_blob->DomainName.Length = 0;
- sec_blob->DomainName.MaximumLength = 0;
-}
-
-static int size_of_ntlmssp_blob(struct cifs_ses *ses)
-{
- int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
- - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
-
- if (ses->domainName)
- sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
- else
- sz += 2;
-
- if (ses->user_name)
- sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
- else
- sz += 2;
+ cifs_security_buffer_from_str(&sec_blob->WorkstationName,
+ NULL,
+ CIFS_MAX_WORKSTATION_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
- return sz;
+ *buflen = tmp - *pbuffer;
+setup_ntlm_neg_ret:
+ return rc;
}
int build_ntlmssp_auth_blob(unsigned char **pbuffer,
@@ -663,6 +743,7 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
AUTHENTICATE_MESSAGE *sec_blob;
__u32 flags;
unsigned char *tmp;
+ int len;
rc = setup_ntlmv2_rsp(ses, nls_cp);
if (rc) {
@@ -670,7 +751,9 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
*buflen = 0;
goto setup_ntlmv2_ret;
}
- *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+
+ len = size_of_ntlmssp_blob(ses, sizeof(AUTHENTICATE_MESSAGE));
+ *pbuffer = kmalloc(len, GFP_KERNEL);
if (!*pbuffer) {
rc = -ENOMEM;
cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
@@ -686,7 +769,7 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
- NTLMSSP_NEGOTIATE_SEAL;
+ NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
if (ses->server->sign)
flags |= NTLMSSP_NEGOTIATE_SIGN;
if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
@@ -719,42 +802,23 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
sec_blob->NtChallengeResponse.MaximumLength = 0;
}
- if (ses->domainName == NULL) {
- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->DomainName.Length = 0;
- sec_blob->DomainName.MaximumLength = 0;
- tmp += 2;
- } else {
- int len;
- len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
- CIFS_MAX_DOMAINNAME_LEN, nls_cp);
- len *= 2; /* unicode is 2 bytes each */
- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->DomainName.Length = cpu_to_le16(len);
- sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
- tmp += len;
- }
+ cifs_security_buffer_from_str(&sec_blob->DomainName,
+ ses->domainName,
+ CIFS_MAX_DOMAINNAME_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
- if (ses->user_name == NULL) {
- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->UserName.Length = 0;
- sec_blob->UserName.MaximumLength = 0;
- tmp += 2;
- } else {
- int len;
- len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
- CIFS_MAX_USERNAME_LEN, nls_cp);
- len *= 2; /* unicode is 2 bytes each */
- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->UserName.Length = cpu_to_le16(len);
- sec_blob->UserName.MaximumLength = cpu_to_le16(len);
- tmp += len;
- }
+ cifs_security_buffer_from_str(&sec_blob->UserName,
+ ses->user_name,
+ CIFS_MAX_USERNAME_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
- sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->WorkstationName.Length = 0;
- sec_blob->WorkstationName.MaximumLength = 0;
- tmp += 2;
+ cifs_security_buffer_from_str(&sec_blob->WorkstationName,
+ ses->workstation_name,
+ CIFS_MAX_WORKSTATION_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
@@ -1230,6 +1294,7 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
struct cifs_ses *ses = sess_data->ses;
__u16 bytes_remaining;
char *bcc_ptr;
+ unsigned char *ntlmsspblob = NULL;
u16 blob_len;
cifs_dbg(FYI, "rawntlmssp session setup negotiate phase\n");
@@ -1253,10 +1318,15 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
/* Build security blob before we assemble the request */
- build_ntlmssp_negotiate_blob(pSMB->req.SecurityBlob, ses);
- sess_data->iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
- sess_data->iov[1].iov_base = pSMB->req.SecurityBlob;
- pSMB->req.SecurityBlobLength = cpu_to_le16(sizeof(NEGOTIATE_MESSAGE));
+ rc = build_ntlmssp_negotiate_blob(&ntlmsspblob,
+ &blob_len, ses,
+ sess_data->nls_cp);
+ if (rc)
+ goto out;
+
+ sess_data->iov[1].iov_len = blob_len;
+ sess_data->iov[1].iov_base = ntlmsspblob;
+ pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
rc = _sess_auth_rawntlmssp_assemble_req(sess_data);
if (rc)
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 8297703492eea6..fe5bfa245fa7ef 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -46,6 +46,10 @@ struct cop_vars {
struct smb2_file_link_info link_info;
};
+/*
+ * note: If cfile is passed, the reference to it is dropped here.
+ * So make sure that you do not reuse cfile after return from this func.
+ */
static int
smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path,
@@ -536,10 +540,11 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
create_options |= OPEN_REPARSE_POINT;
/* Failed on a symbolic link - query a reparse point info */
+ cifs_get_readable_path(tcon, full_path, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN,
create_options, ACL_NO_MODE,
- smb2_data, SMB2_OP_QUERY_INFO, NULL);
+ smb2_data, SMB2_OP_QUERY_INFO, cfile);
}
if (rc)
goto out;
@@ -587,10 +592,11 @@ smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
create_options |= OPEN_REPARSE_POINT;
/* Failed on a symbolic link - query a reparse point info */
+ cifs_get_readable_path(tcon, full_path, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN,
create_options, ACL_NO_MODE,
- smb2_data, SMB2_OP_POSIX_QUERY_INFO, NULL);
+ smb2_data, SMB2_OP_POSIX_QUERY_INFO, cfile);
}
if (rc)
goto out;
@@ -707,10 +713,12 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, bool set_alloc)
{
__le64 eof = cpu_to_le64(size);
+ struct cifsFileInfo *cfile;
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
return smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE,
- &eof, SMB2_OP_SET_EOF, NULL);
+ &eof, SMB2_OP_SET_EOF, cfile);
}
int
@@ -719,6 +727,8 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
+ struct cifs_tcon *tcon;
+ struct cifsFileInfo *cfile;
int rc;
if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
@@ -729,10 +739,12 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
- rc = smb2_compound_op(xid, tlink_tcon(tlink), cifs_sb, full_path,
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_WRITE_ATTRIBUTES, FILE_OPEN,
- 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, NULL);
+ 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile);
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 7acf71defea79e..c5b1dea54ebcdf 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -2844,6 +2844,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
struct fsctl_get_dfs_referral_req *dfs_req = NULL;
struct get_dfs_referral_rsp *dfs_rsp = NULL;
u32 dfs_req_size = 0, dfs_rsp_size = 0;
+ int retry_count = 0;
cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
@@ -2895,11 +2896,14 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
true /* is_fsctl */,
(char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
(char **)&dfs_rsp, &dfs_rsp_size);
- } while (rc == -EAGAIN);
+ if (!is_retryable_error(rc))
+ break;
+ usleep_range(512, 2048);
+ } while (++retry_count < 5);
if (rc) {
- if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
- cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
+ if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
+ cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
goto out;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index d2ecb2ea37c0df..2f5f2c4c6183c9 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -155,7 +155,11 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (tcon == NULL)
return 0;
- if (smb2_command == SMB2_TREE_CONNECT)
+ /*
+ * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in
+ * cifs_tree_connect().
+ */
+ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0;
if (tcon->tidStatus == CifsExiting) {
@@ -253,7 +257,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
/*
* If we are reconnecting an extra channel, bind
*/
- if (server->is_channel) {
+ if (CIFS_SERVER_IS_CHAN(server)) {
ses->binding = true;
ses->binding_chan = cifs_ses_find_chan(ses, server);
}
@@ -1456,7 +1460,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
int rc;
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_rsp *rsp = NULL;
- char *ntlmssp_blob = NULL;
+ unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
u16 blob_length = 0;
@@ -1475,22 +1479,17 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
if (rc)
goto out_err;
- ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
- GFP_KERNEL);
- if (ntlmssp_blob == NULL) {
- rc = -ENOMEM;
- goto out;
- }
+ rc = build_ntlmssp_negotiate_blob(&ntlmssp_blob,
+ &blob_length, ses,
+ sess_data->nls_cp);
+ if (rc)
+ goto out_err;
- build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
if (use_spnego) {
/* BB eventually need to add this */
cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
rc = -EOPNOTSUPP;
goto out;
- } else {
- blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
- /* with raw NTLMSSP we don't encapsulate in SPNEGO */
}
sess_data->iov[1].iov_base = ntlmssp_blob;
sess_data->iov[1].iov_len = blob_length;
@@ -1841,7 +1840,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
- if (rc != 0) {
+ if ((rc != 0) || (rsp == NULL)) {
cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
tcon->need_reconnect = true;
goto tcon_error_exit;
@@ -2669,7 +2668,18 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
goto err_free_rsp_buf;
}
+ /*
+ * Although unlikely to be possible for rsp to be null and rc not set,
+ * adding check below is slightly safer long term (and quiets Coverity
+ * warning)
+ */
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+ if (rsp == NULL) {
+ rc = -EIO;
+ kfree(pc_buf);
+ goto err_free_req;
+ }
+
trace_smb3_posix_mkdir_done(xid, le64_to_cpu(rsp->PersistentFileId),
tcon->tid,
ses->Suid, CREATE_NOT_FILE,
@@ -2942,7 +2952,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
tcon->need_reconnect = true;
}
goto creat_exit;
- } else
+ } else if (rsp == NULL) /* unlikely to happen, but safer to check */
+ goto creat_exit;
+ else
trace_smb3_open_done(xid, le64_to_cpu(rsp->PersistentFileId),
tcon->tid,
ses->Suid, oparms->create_options,
@@ -3163,6 +3175,16 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if ((plen == NULL) || (out_data == NULL))
goto ioctl_exit;
+ /*
+ * Although unlikely to be possible for rsp to be null and rc not set,
+ * adding check below is slightly safer long term (and quiets Coverity
+ * warning)
+ */
+ if (rsp == NULL) {
+ rc = -EIO;
+ goto ioctl_exit;
+ }
+
*plen = le32_to_cpu(rsp->OutputCount);
/* We check for obvious errors in the output buffer length and offset */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b7379329b741c9..61ea3d3f95b4a6 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1044,14 +1044,17 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
if (!ses)
return NULL;
+ spin_lock(&ses->chan_lock);
if (!ses->binding) {
/* round robin */
if (ses->chan_count > 1) {
index = (uint)atomic_inc_return(&ses->chan_seq);
index %= ses->chan_count;
}
+ spin_unlock(&ses->chan_lock);
return ses->chans[index].server;
} else {
+ spin_unlock(&ses->chan_lock);
return cifs_ses_server(ses);
}
}
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index bcb1b91b234fb3..9a249bfc277058 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -96,16 +96,9 @@ static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
DBG_BUGON(1);
}
-/*
- * a compressed_pages[] placeholder in order to avoid
- * being filled with file pages for in-place decompression.
- */
-#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
-
/* how to allocate cached pages for a pcluster */
enum z_erofs_cache_alloctype {
DONTALLOC, /* don't allocate any cached pages */
- DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
/*
* try to use cached I/O if page allocation succeeds or fallback
* to in-place I/O instead to avoid any direct reclaim.
@@ -267,10 +260,6 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
/* I/O is needed, no possible to decompress directly */
standalone = false;
switch (type) {
- case DELAYEDALLOC:
- t = tagptr_init(compressed_page_t,
- PAGE_UNALLOCATED);
- break;
case TRYALLOC:
newpage = erofs_allocpage(pagepool, gfp);
if (!newpage)
@@ -371,8 +360,8 @@ static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
/* callers must be with collection lock held */
static int z_erofs_attach_page(struct z_erofs_collector *clt,
- struct page *page,
- enum z_erofs_page_type type)
+ struct page *page, enum z_erofs_page_type type,
+ bool pvec_safereuse)
{
int ret;
@@ -382,9 +371,9 @@ static int z_erofs_attach_page(struct z_erofs_collector *clt,
z_erofs_try_inplace_io(clt, page))
return 0;
- ret = z_erofs_pagevec_enqueue(&clt->vector, page, type);
+ ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
+ pvec_safereuse);
clt->cl->vcnt += (unsigned int)ret;
-
return ret ? 0 : -EAGAIN;
}
@@ -727,7 +716,8 @@ hitted:
tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
retry:
- err = z_erofs_attach_page(clt, page, page_type);
+ err = z_erofs_attach_page(clt, page, page_type,
+ clt->mode >= COLLECT_PRIMARY_FOLLOWED);
/* should allocate an additional short-lived page for pagevec */
if (err == -EAGAIN) {
struct page *const newpage =
@@ -735,7 +725,7 @@ retry:
set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
err = z_erofs_attach_page(clt, newpage,
- Z_EROFS_PAGE_TYPE_EXCLUSIVE);
+ Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
if (!err)
goto retry;
}
@@ -1089,15 +1079,6 @@ repeat:
if (!page)
goto out_allocpage;
- /*
- * the cached page has not been allocated and
- * an placeholder is out there, prepare it now.
- */
- if (page == PAGE_UNALLOCATED) {
- tocache = true;
- goto out_allocpage;
- }
-
/* process the target tagged pointer */
t = tagptr_init(compressed_page_t, page);
justfound = tagptr_unfold_tags(t);
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
index 879df536277717..4a69515dea7552 100644
--- a/fs/erofs/zdata.h
+++ b/fs/erofs/zdata.h
@@ -179,4 +179,3 @@ static inline void z_erofs_onlinepage_endio(struct page *page)
#define Z_EROFS_VMAP_GLOBAL_PAGES 2048
#endif
-
diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h
index dfd7fe0503bb11..b05464f4a80831 100644
--- a/fs/erofs/zpvec.h
+++ b/fs/erofs/zpvec.h
@@ -106,11 +106,18 @@ static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
struct page *page,
- enum z_erofs_page_type type)
+ enum z_erofs_page_type type,
+ bool pvec_safereuse)
{
- if (!ctor->next && type)
- if (ctor->index + 1 == ctor->nr)
+ if (!ctor->next) {
+ /* some pages cannot be reused as pvec safely without I/O */
+ if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
+ type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
+
+ if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
+ ctor->index + 1 == ctor->nr)
return false;
+ }
if (ctor->index >= ctor->nr)
z_erofs_pagevec_ctor_pagedown(ctor, false);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 83e9bc0f91ffd7..f1693d45bb7827 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -653,7 +653,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
return PTR_ERR(inode);
}
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err) {
iput(inode);
goto err_out;
@@ -705,9 +705,6 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
}
#ifdef CONFIG_QUOTA
- /* Needed for iput() to work correctly and not trash data */
- sbi->sb->s_flags |= SB_ACTIVE;
-
/*
* Turn on quotas which were not enabled for read-only mounts if
* filesystem has quota feature, so that they are updated correctly.
@@ -1162,7 +1159,8 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
if (!is_journalled_quota(sbi))
return false;
- down_write(&sbi->quota_sem);
+ if (!down_write_trylock(&sbi->quota_sem))
+ return true;
if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
ret = false;
} else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 20a083dc904231..49121a21f749f8 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -336,8 +336,8 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
static int zstd_init_compress_ctx(struct compress_ctx *cc)
{
- ZSTD_parameters params;
- ZSTD_CStream *stream;
+ zstd_parameters params;
+ zstd_cstream *stream;
void *workspace;
unsigned int workspace_size;
unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
@@ -346,17 +346,17 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
if (!level)
level = F2FS_ZSTD_DEFAULT_CLEVEL;
- params = ZSTD_getParams(level, cc->rlen, 0);
- workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
+ params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
+ workspace_size = zstd_cstream_workspace_bound(&params.cParams);
workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
workspace_size, GFP_NOFS);
if (!workspace)
return -ENOMEM;
- stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
+ stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
if (!stream) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
__func__);
kvfree(workspace);
@@ -379,9 +379,9 @@ static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
static int zstd_compress_pages(struct compress_ctx *cc)
{
- ZSTD_CStream *stream = cc->private2;
- ZSTD_inBuffer inbuf;
- ZSTD_outBuffer outbuf;
+ zstd_cstream *stream = cc->private2;
+ zstd_in_buffer inbuf;
+ zstd_out_buffer outbuf;
int src_size = cc->rlen;
int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
int ret;
@@ -394,19 +394,19 @@ static int zstd_compress_pages(struct compress_ctx *cc)
outbuf.dst = cc->cbuf->cdata;
outbuf.size = dst_size;
- ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
- if (ZSTD_isError(ret)) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
+ ret = zstd_compress_stream(stream, &outbuf, &inbuf);
+ if (zstd_is_error(ret)) {
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
- __func__, ZSTD_getErrorCode(ret));
+ __func__, zstd_get_error_code(ret));
return -EIO;
}
- ret = ZSTD_endStream(stream, &outbuf);
- if (ZSTD_isError(ret)) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
+ ret = zstd_end_stream(stream, &outbuf);
+ if (zstd_is_error(ret)) {
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
- __func__, ZSTD_getErrorCode(ret));
+ __func__, zstd_get_error_code(ret));
return -EIO;
}
@@ -423,22 +423,22 @@ static int zstd_compress_pages(struct compress_ctx *cc)
static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
{
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
void *workspace;
unsigned int workspace_size;
unsigned int max_window_size =
MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
- workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
+ workspace_size = zstd_dstream_workspace_bound(max_window_size);
workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
workspace_size, GFP_NOFS);
if (!workspace)
return -ENOMEM;
- stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
+ stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
if (!stream) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
__func__);
kvfree(workspace);
@@ -460,9 +460,9 @@ static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
static int zstd_decompress_pages(struct decompress_io_ctx *dic)
{
- ZSTD_DStream *stream = dic->private2;
- ZSTD_inBuffer inbuf;
- ZSTD_outBuffer outbuf;
+ zstd_dstream *stream = dic->private2;
+ zstd_in_buffer inbuf;
+ zstd_out_buffer outbuf;
int ret;
inbuf.pos = 0;
@@ -473,11 +473,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
outbuf.dst = dic->rbuf;
outbuf.size = dic->rlen;
- ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
- if (ZSTD_isError(ret)) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
+ ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
+ if (zstd_is_error(ret)) {
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
- __func__, ZSTD_getErrorCode(ret));
+ __func__, zstd_get_error_code(ret));
return -EIO;
}
@@ -882,6 +882,25 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
return is_page_in_cluster(cc, index);
}
+bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
+ int index, int nr_pages)
+{
+ unsigned long pgidx;
+ int i;
+
+ if (nr_pages - index < cc->cluster_size)
+ return false;
+
+ pgidx = pvec->pages[index]->index;
+
+ for (i = 1; i < cc->cluster_size; i++) {
+ if (pvec->pages[index + i]->index != pgidx + i)
+ return false;
+ }
+
+ return true;
+}
+
static bool cluster_has_invalid_data(struct compress_ctx *cc)
{
loff_t i_size = i_size_read(cc->inode);
@@ -1531,6 +1550,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
if (cluster_may_compress(cc)) {
err = f2fs_compress_pages(cc);
if (err == -EAGAIN) {
+ add_compr_block_stat(cc->inode, cc->cluster_size);
goto write;
} else if (err) {
f2fs_put_rpages_wbc(cc, wbc, true, 1);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f4fd6c246c9a95..9f754aaef558be 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1465,10 +1465,15 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
struct extent_info ei = {0, };
block_t blkaddr;
unsigned int start_pgofs;
+ int bidx = 0;
if (!maxblocks)
return 0;
+ map->m_bdev = inode->i_sb->s_bdev;
+ map->m_multidev_dio =
+ f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
+
map->m_len = 0;
map->m_flags = 0;
@@ -1491,6 +1496,21 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
if (flag == F2FS_GET_BLOCK_DIO)
f2fs_wait_on_block_writeback_range(inode,
map->m_pblk, map->m_len);
+
+ if (map->m_multidev_dio) {
+ block_t blk_addr = map->m_pblk;
+
+ bidx = f2fs_target_device_index(sbi, map->m_pblk);
+
+ map->m_bdev = FDEV(bidx).bdev;
+ map->m_pblk -= FDEV(bidx).start_blk;
+ map->m_len = min(map->m_len,
+ FDEV(bidx).end_blk + 1 - map->m_pblk);
+
+ if (map->m_may_create)
+ f2fs_update_device_state(sbi, inode->i_ino,
+ blk_addr, map->m_len);
+ }
goto out;
}
@@ -1609,6 +1629,9 @@ next_block:
if (flag == F2FS_GET_BLOCK_PRE_AIO)
goto skip;
+ if (map->m_multidev_dio)
+ bidx = f2fs_target_device_index(sbi, blkaddr);
+
if (map->m_len == 0) {
/* preallocated unwritten block should be mapped for fiemap. */
if (blkaddr == NEW_ADDR)
@@ -1617,10 +1640,15 @@ next_block:
map->m_pblk = blkaddr;
map->m_len = 1;
+
+ if (map->m_multidev_dio)
+ map->m_bdev = FDEV(bidx).bdev;
} else if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) ||
(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
flag == F2FS_GET_BLOCK_PRE_DIO) {
+ if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
+ goto sync_out;
ofs++;
map->m_len++;
} else {
@@ -1673,10 +1701,32 @@ skip:
sync_out:
- /* for hardware encryption, but to avoid potential issue in future */
- if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
+ if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
+ /*
+ * for hardware encryption, but to avoid potential issue
+ * in future
+ */
f2fs_wait_on_block_writeback_range(inode,
map->m_pblk, map->m_len);
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ map->m_pblk, map->m_pblk);
+
+ if (map->m_multidev_dio) {
+ block_t blk_addr = map->m_pblk;
+
+ bidx = f2fs_target_device_index(sbi, map->m_pblk);
+
+ map->m_bdev = FDEV(bidx).bdev;
+ map->m_pblk -= FDEV(bidx).start_blk;
+
+ if (map->m_may_create)
+ f2fs_update_device_state(sbi, inode->i_ino,
+ blk_addr, map->m_len);
+
+ f2fs_bug_on(sbi, blk_addr + map->m_len >
+ FDEV(bidx).end_blk + 1);
+ }
+ }
if (flag == F2FS_GET_BLOCK_PRECACHE) {
if (map->m_flags & F2FS_MAP_MAPPED) {
@@ -1696,7 +1746,7 @@ unlock_out:
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
- trace_f2fs_map_blocks(inode, map, err);
+ trace_f2fs_map_blocks(inode, map, create, flag, err);
return err;
}
@@ -1755,6 +1805,9 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
bh->b_size = blks_to_bytes(inode, map.m_len);
+
+ if (map.m_multidev_dio)
+ bh->b_bdev = map.m_bdev;
}
return err;
}
@@ -2989,6 +3042,10 @@ readd:
need_readd = false;
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
+ void *fsdata = NULL;
+ struct page *pagep;
+ int ret2;
+
ret = f2fs_init_compress_ctx(&cc);
if (ret) {
done = 1;
@@ -3007,27 +3064,23 @@ readd:
if (unlikely(f2fs_cp_error(sbi)))
goto lock_page;
- if (f2fs_cluster_is_empty(&cc)) {
- void *fsdata = NULL;
- struct page *pagep;
- int ret2;
+ if (!f2fs_cluster_is_empty(&cc))
+ goto lock_page;
- ret2 = f2fs_prepare_compress_overwrite(
+ ret2 = f2fs_prepare_compress_overwrite(
inode, &pagep,
page->index, &fsdata);
- if (ret2 < 0) {
- ret = ret2;
- done = 1;
- break;
- } else if (ret2 &&
- !f2fs_compress_write_end(inode,
- fsdata, page->index,
- 1)) {
- retry = 1;
- break;
- }
- } else {
- goto lock_page;
+ if (ret2 < 0) {
+ ret = ret2;
+ done = 1;
+ break;
+ } else if (ret2 &&
+ (!f2fs_compress_write_end(inode,
+ fsdata, page->index, 1) ||
+ !f2fs_all_cluster_page_loaded(&cc,
+ &pvec, i, nr_pages))) {
+ retry = 1;
+ break;
}
}
#endif
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index b339ae89c1ad18..ce9fc9f1300023 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -55,6 +55,7 @@ enum {
FAULT_DISCARD,
FAULT_WRITE_IO,
FAULT_SLAB_ALLOC,
+ FAULT_DQUOT_INIT,
FAULT_MAX,
};
@@ -561,6 +562,9 @@ enum {
#define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
+/* dirty segments threshold for triggering CP */
+#define DEFAULT_DIRTY_THRESHOLD 4
+
/* for in-memory extent cache entry */
#define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
@@ -617,6 +621,7 @@ struct extent_tree {
F2FS_MAP_UNWRITTEN)
struct f2fs_map_blocks {
+ struct block_device *m_bdev; /* for multi-device dio */
block_t m_pblk;
block_t m_lblk;
unsigned int m_len;
@@ -625,6 +630,7 @@ struct f2fs_map_blocks {
pgoff_t *m_next_extent; /* point to next possible extent */
int m_seg_type;
bool m_may_create; /* indicate it is from write path */
+ bool m_multidev_dio; /* indicate it allows multi-device dio */
};
/* for flag in get_data_block */
@@ -1284,8 +1290,10 @@ enum {
};
enum {
- FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
- FS_MODE_LFS, /* use lfs allocation only */
+ FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
+ FS_MODE_LFS, /* use lfs allocation only */
+ FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */
+ FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */
};
enum {
@@ -1728,12 +1736,15 @@ struct f2fs_sb_info {
/* For shrinker support */
struct list_head s_list;
+ struct mutex umount_mutex;
+ unsigned int shrinker_run_no;
+
+ /* For multi devices */
int s_ndevs; /* number of devices */
struct f2fs_dev_info *devs; /* for device list */
unsigned int dirty_device; /* for checkpoint data flush */
spinlock_t dev_lock; /* protect dirty_device */
- struct mutex umount_mutex;
- unsigned int shrinker_run_no;
+ bool aligned_blksize; /* all devices has the same logical blksize */
/* For write statistics */
u64 sectors_written_start;
@@ -1756,6 +1767,9 @@ struct f2fs_sb_info {
unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */
+ int max_fragment_chunk; /* max chunk size for block fragmentation mode */
+ int max_fragment_hole; /* max hole size for block fragmentation mode */
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct kmem_cache *page_array_slab; /* page array entry */
unsigned int page_array_slab_size; /* default page array slab size */
@@ -3363,6 +3377,7 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
*/
int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
+int f2fs_dquot_initialize(struct inode *inode);
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
int f2fs_quota_sync(struct super_block *sb, int type);
loff_t max_file_blocks(struct inode *inode);
@@ -3492,6 +3507,8 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio);
+void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
+ block_t blkaddr, unsigned int blkcnt);
void f2fs_wait_on_page_writeback(struct page *page,
enum page_type type, bool ordered, bool locked);
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
@@ -3516,6 +3533,16 @@ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno);
+#define DEF_FRAGMENT_SIZE 4
+#define MIN_FRAGMENT_SIZE 1
+#define MAX_FRAGMENT_SIZE 512
+
+static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
+{
+ return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
+ F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
+}
+
/*
* checkpoint.c
*/
@@ -4027,6 +4054,8 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
block_t blkaddr);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
+bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
+ int index, int nr_pages);
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
int f2fs_write_multi_pages(struct compress_ctx *cc,
@@ -4152,8 +4181,7 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
if (!f2fs_compressed_file(inode))
return true;
- if (S_ISREG(inode->i_mode) &&
- (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks)))
+ if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
return false;
fi->i_flags &= ~F2FS_COMPR_FL;
@@ -4302,6 +4330,16 @@ static inline int block_unaligned_IO(struct inode *inode,
return align & blocksize_mask;
}
+static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
+ int flag)
+{
+ if (!f2fs_is_multi_device(sbi))
+ return false;
+ if (flag != F2FS_GET_BLOCK_DIO)
+ return false;
+ return sbi->aligned_blksize;
+}
+
static inline bool f2fs_force_buffered_io(struct inode *inode,
struct kiocb *iocb, struct iov_iter *iter)
{
@@ -4310,7 +4348,9 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
if (f2fs_post_read_required(inode))
return true;
- if (f2fs_is_multi_device(sbi))
+
+ /* disallow direct IO if any of devices has unaligned blksize */
+ if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
return true;
/*
* for blkzoned device, fallback direct IO to buffered IO, so
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index eb971e1e722768..92ec2699bc8599 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -786,7 +786,7 @@ int f2fs_truncate(struct inode *inode)
return -EIO;
}
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
@@ -916,7 +916,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
return err;
if (is_quota_modification(inode, attr)) {
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
}
@@ -3020,7 +3020,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
}
f2fs_put_page(ipage, 1);
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 77391e3b7d68f8..a946ce0ead3417 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/sched/signal.h>
+#include <linux/random.h>
#include "f2fs.h"
#include "node.h"
@@ -257,7 +258,9 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
p->max_search = sbi->max_victim_search;
/* let's select beginning hot/small space first in no_heap mode*/
- if (test_opt(sbi, NOHEAP) &&
+ if (f2fs_need_rand_seg(sbi))
+ p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
+ else if (test_opt(sbi, NOHEAP) &&
(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
p->offset = 0;
else
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 56a20d5c15dad0..ea08f0dfa1bdff 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -192,7 +192,7 @@ int f2fs_convert_inline_inode(struct inode *inode)
f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
return 0;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 9141147b5bb00e..0f8b2df3e1e012 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -527,7 +527,7 @@ make_now:
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- inode_nohighmem(inode);
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
} else if (S_ISLNK(inode->i_mode)) {
if (file_is_encrypt(inode))
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
@@ -754,7 +754,7 @@ void f2fs_evict_inode(struct inode *inode)
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err) {
err = 0;
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 9c528e583c9d53..a728a0af9ce0c8 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -74,7 +74,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (err)
goto fail_drop;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
goto fail_drop;
@@ -345,7 +345,7 @@ static int f2fs_create(struct user_namespace *mnt_userns, struct inode *dir,
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -404,7 +404,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
F2FS_I(old_dentry->d_inode)->i_projid)))
return -EXDEV;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -460,7 +460,7 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
return 0;
}
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -598,10 +598,10 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
goto fail;
}
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
goto fail;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
goto fail;
@@ -675,7 +675,7 @@ static int f2fs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
if (err)
return err;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -746,7 +746,7 @@ static int f2fs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -757,7 +757,7 @@ static int f2fs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- inode_nohighmem(inode);
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
@@ -803,7 +803,7 @@ static int f2fs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -841,7 +841,7 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
struct inode *inode;
int err;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -965,16 +965,16 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
- err = dquot_initialize(old_dir);
+ err = f2fs_dquot_initialize(old_dir);
if (err)
goto out;
- err = dquot_initialize(new_dir);
+ err = f2fs_dquot_initialize(new_dir);
if (err)
goto out;
if (new_inode) {
- err = dquot_initialize(new_inode);
+ err = f2fs_dquot_initialize(new_inode);
if (err)
goto out;
}
@@ -1138,11 +1138,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
F2FS_I(new_dentry->d_inode)->i_projid)))
return -EXDEV;
- err = dquot_initialize(old_dir);
+ err = f2fs_dquot_initialize(old_dir);
if (err)
goto out;
- err = dquot_initialize(new_dir);
+ err = f2fs_dquot_initialize(new_dir);
if (err)
goto out;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index e863136081b47f..556fcd8457f3f2 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1443,6 +1443,7 @@ page_hit:
nid, nid_of_node(page), ino_of_node(page),
ofs_of_node(page), cpver_of_node(page),
next_blkaddr_of_node(page));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
err = -EINVAL;
out_err:
ClearPageUptodate(page);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index ff14a6e5ac1c9e..18b98cf0465b84 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -138,11 +138,6 @@ static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
}
-static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
-{
- return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
-}
-
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 04655511d7f514..6a1b4668d933aa 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -81,7 +81,7 @@ static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
if (IS_ERR(inode))
return ERR_CAST(inode);
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
goto err_out;
@@ -203,7 +203,7 @@ retry:
goto out_put;
}
- err = dquot_initialize(einode);
+ err = f2fs_dquot_initialize(einode);
if (err) {
iput(einode);
goto out_put;
@@ -508,7 +508,7 @@ got_it:
if (IS_ERR(inode))
return PTR_ERR(inode);
- ret = dquot_initialize(inode);
+ ret = f2fs_dquot_initialize(inode);
if (ret) {
iput(inode);
return ret;
@@ -787,8 +787,6 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
}
#ifdef CONFIG_QUOTA
- /* Needed for iput() to work correctly and not trash data */
- sbi->sb->s_flags |= SB_ACTIVE;
/* Turn on quotas so that they are updated correctly */
quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif
@@ -816,10 +814,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
- else {
- /* restore s_flags to let iput() trash data */
- sbi->sb->s_flags = s_flags;
- }
+ else
+ f2fs_bug_on(sbi, sbi->sb->s_flags & SB_ACTIVE);
skip:
fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a135d22474154c..df9ed75f0b7a76 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -15,6 +15,7 @@
#include <linux/timer.h>
#include <linux/freezer.h>
#include <linux/sched/signal.h>
+#include <linux/random.h>
#include "f2fs.h"
#include "segment.h"
@@ -529,6 +530,25 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
}
}
+static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
+{
+ int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
+ unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
+ unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
+ unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
+ unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
+ unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
+ unsigned int threshold = sbi->blocks_per_seg * factor *
+ DEFAULT_DIRTY_THRESHOLD;
+ unsigned int global_threshold = threshold * 3 / 2;
+
+ if (dents >= threshold || qdata >= threshold ||
+ nodes >= threshold || meta >= threshold ||
+ imeta >= threshold)
+ return true;
+ return dents + qdata + nodes + meta + imeta > global_threshold;
+}
+
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
{
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
@@ -547,8 +567,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
else
f2fs_build_free_nids(sbi, false, false);
- if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
- excess_prefree_segs(sbi))
+ if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
+ excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
goto do_sync;
/* there is background inflight IO or foreground operation recently */
@@ -561,7 +581,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
goto do_sync;
/* checkpoint is the only way to shrink partial cached entries */
- if (f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
+ if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
f2fs_available_free_memory(sbi, INO_ENTRIES))
return;
@@ -2630,6 +2650,8 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
unsigned short seg_type = curseg->seg_type;
sanity_check_seg_type(sbi, seg_type);
+ if (f2fs_need_rand_seg(sbi))
+ return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
/* if segs_per_sec is large than 1, we need to keep original policy. */
if (__is_large_section(sbi))
@@ -2681,6 +2703,9 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
curseg->next_segno = segno;
reset_curseg(sbi, type, 1);
curseg->alloc_type = LFS;
+ if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
+ curseg->fragment_remained_chunk =
+ prandom_u32() % sbi->max_fragment_chunk + 1;
}
static int __next_free_blkoff(struct f2fs_sb_info *sbi,
@@ -2707,12 +2732,22 @@ static int __next_free_blkoff(struct f2fs_sb_info *sbi,
static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
struct curseg_info *seg)
{
- if (seg->alloc_type == SSR)
+ if (seg->alloc_type == SSR) {
seg->next_blkoff =
__next_free_blkoff(sbi, seg->segno,
seg->next_blkoff + 1);
- else
+ } else {
seg->next_blkoff++;
+ if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) {
+ /* To allocate block chunks in different sizes, use random number */
+ if (--seg->fragment_remained_chunk <= 0) {
+ seg->fragment_remained_chunk =
+ prandom_u32() % sbi->max_fragment_chunk + 1;
+ seg->next_blkoff +=
+ prandom_u32() % sbi->max_fragment_hole + 1;
+ }
+ }
+ }
}
bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
@@ -3485,24 +3520,30 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
up_read(&SM_I(sbi)->curseg_lock);
}
-static void update_device_state(struct f2fs_io_info *fio)
+void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
+ block_t blkaddr, unsigned int blkcnt)
{
- struct f2fs_sb_info *sbi = fio->sbi;
- unsigned int devidx;
-
if (!f2fs_is_multi_device(sbi))
return;
- devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
+ while (1) {
+ unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
+ unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
- /* update device state for fsync */
- f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
+ /* update device state for fsync */
+ f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
- /* update device state for checkpoint */
- if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
- spin_lock(&sbi->dev_lock);
- f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
- spin_unlock(&sbi->dev_lock);
+ /* update device state for checkpoint */
+ if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
+ spin_lock(&sbi->dev_lock);
+ f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
+ spin_unlock(&sbi->dev_lock);
+ }
+
+ if (blkcnt <= blks)
+ break;
+ blkcnt -= blks;
+ blkaddr += blks;
}
}
@@ -3529,7 +3570,7 @@ reallocate:
goto reallocate;
}
- update_device_state(fio);
+ f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
if (keep_order)
up_read(&fio->sbi->io_order_lock);
@@ -3611,6 +3652,9 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
goto drop_bio;
}
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ fio->new_blkaddr, fio->new_blkaddr);
+
stat_inc_inplace_blocks(fio->sbi);
if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
@@ -3618,7 +3662,8 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
else
err = f2fs_submit_page_bio(fio);
if (!err) {
- update_device_state(fio);
+ f2fs_update_device_state(fio->sbi, fio->ino,
+ fio->new_blkaddr, 1);
f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 89fff258727d12..46fde9f3f28eae 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -314,6 +314,7 @@ struct curseg_info {
unsigned short next_blkoff; /* next block offset to write */
unsigned int zone; /* current zone number */
unsigned int next_segno; /* preallocated segment */
+ int fragment_remained_chunk; /* remained block size in a chunk for block fragmentation mode */
bool inited; /* indicate inmem log is inited */
};
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index cf049a042482b9..040b6d02e1d8a4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -58,6 +58,7 @@ const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_DISCARD] = "discard error",
[FAULT_WRITE_IO] = "write IO error",
[FAULT_SLAB_ALLOC] = "slab alloc",
+ [FAULT_DQUOT_INIT] = "dquot initialize",
};
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
@@ -592,7 +593,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
if (kstrtouint(str + 1, 10, &level))
return -EINVAL;
- if (!level || level > ZSTD_maxCLevel()) {
+ if (!level || level > zstd_max_clevel()) {
f2fs_info(sbi, "invalid zstd compress level: %d", level);
return -EINVAL;
}
@@ -817,6 +818,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
} else if (!strcmp(name, "lfs")) {
F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
+ } else if (!strcmp(name, "fragment:segment")) {
+ F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
+ } else if (!strcmp(name, "fragment:block")) {
+ F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
} else {
kfree(name);
return -EINVAL;
@@ -1292,7 +1297,7 @@ default_check:
/* Not pass down write hints if the number of active logs is lesser
* than NR_CURSEG_PERSIST_TYPE.
*/
- if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
+ if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
@@ -1896,6 +1901,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, "adaptive");
else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
seq_puts(seq, "lfs");
+ else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
+ seq_puts(seq, "fragment:segment");
+ else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
+ seq_puts(seq, "fragment:block");
seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
if (test_opt(sbi, RESERVE_ROOT))
seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
@@ -2491,6 +2500,16 @@ retry:
return len - towrite;
}
+int f2fs_dquot_initialize(struct inode *inode)
+{
+ if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT)) {
+ f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_DQUOT_INIT);
+ return -ESRCH;
+ }
+
+ return dquot_initialize(inode);
+}
+
static struct dquot **f2fs_get_dquots(struct inode *inode)
{
return F2FS_I(inode)->i_dquot;
@@ -2875,6 +2894,11 @@ static const struct quotactl_ops f2fs_quotactl_ops = {
.get_nextdqblk = dquot_get_next_dqblk,
};
#else
+int f2fs_dquot_initialize(struct inode *inode)
+{
+ return 0;
+}
+
int f2fs_quota_sync(struct super_block *sb, int type)
{
return 0;
@@ -3486,7 +3510,7 @@ skip_cross:
NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
cp_payload, nat_bits_blocks);
- return -EFSCORRUPTED;
+ return 1;
}
if (unlikely(f2fs_cp_error(sbi))) {
@@ -3522,6 +3546,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
sbi->migration_granularity = sbi->segs_per_sec;
sbi->seq_file_ra_mul = MIN_RA_MUL;
+ sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
+ sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
@@ -3746,6 +3772,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
unsigned int max_devices = MAX_DEVICES;
+ unsigned int logical_blksize;
int i;
/* Initialize single device information */
@@ -3766,6 +3793,9 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
if (!sbi->devs)
return -ENOMEM;
+ logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
+ sbi->aligned_blksize = true;
+
for (i = 0; i < max_devices; i++) {
if (i > 0 && !RDEV(i).path[0])
@@ -3802,6 +3832,9 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
/* to release errored devices */
sbi->s_ndevs = i + 1;
+ if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
+ sbi->aligned_blksize = false;
+
#ifdef CONFIG_BLK_DEV_ZONED
if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
!f2fs_sb_has_blkzoned(sbi)) {
@@ -4351,6 +4384,8 @@ free_node_inode:
free_stats:
f2fs_destroy_stats(sbi);
free_nm:
+ /* stop discard thread before destroying node manager */
+ f2fs_stop_discard_thread(sbi);
f2fs_destroy_node_manager(sbi);
free_sm:
f2fs_destroy_segment_manager(sbi);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index a32fe31c33b8e4..7d289249cd7ebd 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -196,7 +196,7 @@ static ssize_t encoding_show(struct f2fs_attr *a,
struct super_block *sb = sbi->sb;
if (f2fs_sb_has_casefold(sbi))
- return snprintf(buf, PAGE_SIZE, "%s (%d.%d.%d)\n",
+ return sysfs_emit(buf, "%s (%d.%d.%d)\n",
sb->s_encoding->charset,
(sb->s_encoding->version >> 16) & 0xff,
(sb->s_encoding->version >> 8) & 0xff,
@@ -245,7 +245,7 @@ static ssize_t avg_vblocks_show(struct f2fs_attr *a,
static ssize_t main_blkaddr_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)MAIN_BLKADDR(sbi));
}
@@ -551,6 +551,22 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "max_fragment_chunk")) {
+ if (t >= MIN_FRAGMENT_SIZE && t <= MAX_FRAGMENT_SIZE)
+ sbi->max_fragment_chunk = t;
+ else
+ return -EINVAL;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "max_fragment_hole")) {
+ if (t >= MIN_FRAGMENT_SIZE && t <= MAX_FRAGMENT_SIZE)
+ sbi->max_fragment_hole = t;
+ else
+ return -EINVAL;
+ return count;
+ }
+
*ui = (unsigned int)t;
return count;
@@ -781,6 +797,8 @@ F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_age_threshold, age_threshold);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, seq_file_ra_mul, seq_file_ra_mul);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_segment_mode, gc_segment_mode);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_reclaimed_segments, gc_reclaimed_segs);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_chunk, max_fragment_chunk);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_hole, max_fragment_hole);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -859,6 +877,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(seq_file_ra_mul),
ATTR_LIST(gc_segment_mode),
ATTR_LIST(gc_reclaimed_segments),
+ ATTR_LIST(max_fragment_chunk),
+ ATTR_LIST(max_fragment_hole),
NULL,
};
ATTRIBUTE_GROUPS(f2fs);
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index 03549b5ba204ab..fe5acdccaae197 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -136,7 +136,7 @@ static int f2fs_begin_enable_verity(struct file *filp)
* here and not rely on ->open() doing it. This must be done before
* evicting the inline data.
*/
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 1d2d29dcd41ce0..e348f33bcb2bea 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -773,7 +773,7 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
diff --git a/fs/io-wq.c b/fs/io-wq.c
index afd955d53db90d..88202de519f6d1 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -423,9 +423,10 @@ static inline unsigned int io_get_work_hash(struct io_wq_work *work)
return work->flags >> IO_WQ_HASH_SHIFT;
}
-static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
+static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
{
struct io_wq *wq = wqe->wq;
+ bool ret = false;
spin_lock_irq(&wq->hash->wait.lock);
if (list_empty(&wqe->wait.entry)) {
@@ -433,9 +434,11 @@ static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
if (!test_bit(hash, &wq->hash->map)) {
__set_current_state(TASK_RUNNING);
list_del_init(&wqe->wait.entry);
+ ret = true;
}
}
spin_unlock_irq(&wq->hash->wait.lock);
+ return ret;
}
static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
@@ -475,14 +478,21 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
}
if (stall_hash != -1U) {
+ bool unstalled;
+
/*
* Set this before dropping the lock to avoid racing with new
* work being added and clearing the stalled bit.
*/
set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
raw_spin_unlock(&wqe->lock);
- io_wait_on_hash(wqe, stall_hash);
+ unstalled = io_wait_on_hash(wqe, stall_hash);
raw_spin_lock(&wqe->lock);
+ if (unstalled) {
+ clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
+ if (wq_has_sleeper(&wqe->wq->hash->wait))
+ wake_up(&wqe->wq->hash->wait);
+ }
}
return NULL;
@@ -564,8 +574,11 @@ get_next:
io_wqe_enqueue(wqe, linked);
if (hash != -1U && !next_hashed) {
+ /* serialize hash clear with wake_up() */
+ spin_lock_irq(&wq->hash->wait.lock);
clear_bit(hash, &wq->hash->map);
clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
+ spin_unlock_irq(&wq->hash->wait.lock);
if (wq_has_sleeper(&wq->hash->wait))
wake_up(&wq->hash->wait);
raw_spin_lock(&wqe->lock);
diff --git a/fs/ksmbd/Kconfig b/fs/ksmbd/Kconfig
index b83cbd756ae50d..e1fe17747ed69c 100644
--- a/fs/ksmbd/Kconfig
+++ b/fs/ksmbd/Kconfig
@@ -6,7 +6,6 @@ config SMB_SERVER
select NLS
select NLS_UTF8
select CRYPTO
- select CRYPTO_MD4
select CRYPTO_MD5
select CRYPTO_HMAC
select CRYPTO_ECB
@@ -19,6 +18,7 @@ config SMB_SERVER
select CRYPTO_GCM
select ASN1
select OID_REGISTRY
+ select CRC32
default n
help
Choose Y here if you want to allow SMB3 compliant clients
diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
index 30a92ddc181747..3503b1c48cb4e8 100644
--- a/fs/ksmbd/auth.c
+++ b/fs/ksmbd/auth.c
@@ -873,9 +873,9 @@ int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
__u8 *pi_hash)
{
int rc;
- struct smb2_hdr *rcv_hdr = (struct smb2_hdr *)buf;
+ struct smb2_hdr *rcv_hdr = smb2_get_msg(buf);
char *all_bytes_msg = (char *)&rcv_hdr->ProtocolId;
- int msg_size = be32_to_cpu(rcv_hdr->smb2_buf_length);
+ int msg_size = get_rfc1002_len(buf);
struct ksmbd_crypto_ctx *ctx = NULL;
if (conn->preauth_info->Preauth_HashId !=
@@ -983,7 +983,7 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
u8 *sign)
{
struct scatterlist *sg;
- unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24;
+ unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int i, nr_entries[3] = {0}, total_entries = 0, sg_idx = 0;
if (!nvec)
@@ -1047,9 +1047,8 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
int ksmbd_crypt_message(struct ksmbd_conn *conn, struct kvec *iov,
unsigned int nvec, int enc)
{
- struct smb2_transform_hdr *tr_hdr =
- (struct smb2_transform_hdr *)iov[0].iov_base;
- unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24;
+ struct smb2_transform_hdr *tr_hdr = smb2_get_msg(iov[0].iov_base);
+ unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int rc;
struct scatterlist *sg;
u8 sign[SMB2_SIGNATURE_SIZE] = {};
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index b57a0d8a392ff5..83a94d0bb480fa 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -158,26 +158,25 @@ void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
int ksmbd_conn_write(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb_hdr *rsp_hdr = work->response_buf;
size_t len = 0;
int sent;
struct kvec iov[3];
int iov_idx = 0;
ksmbd_conn_try_dequeue_request(work);
- if (!rsp_hdr) {
+ if (!work->response_buf) {
pr_err("NULL response header\n");
return -EINVAL;
}
if (work->tr_buf) {
iov[iov_idx] = (struct kvec) { work->tr_buf,
- sizeof(struct smb2_transform_hdr) };
+ sizeof(struct smb2_transform_hdr) + 4 };
len += iov[iov_idx++].iov_len;
}
if (work->aux_payload_sz) {
- iov[iov_idx] = (struct kvec) { rsp_hdr, work->resp_hdr_sz };
+ iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
len += iov[iov_idx++].iov_len;
iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
len += iov[iov_idx++].iov_len;
@@ -185,8 +184,8 @@ int ksmbd_conn_write(struct ksmbd_work *work)
if (work->tr_buf)
iov[iov_idx].iov_len = work->resp_hdr_sz;
else
- iov[iov_idx].iov_len = get_rfc1002_len(rsp_hdr) + 4;
- iov[iov_idx].iov_base = rsp_hdr;
+ iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
+ iov[iov_idx].iov_base = work->response_buf;
len += iov[iov_idx++].iov_len;
}
diff --git a/fs/ksmbd/ksmbd_work.c b/fs/ksmbd/ksmbd_work.c
index fd58eb4809f6a2..14b9caebf7a4f8 100644
--- a/fs/ksmbd/ksmbd_work.c
+++ b/fs/ksmbd/ksmbd_work.c
@@ -69,7 +69,6 @@ int ksmbd_workqueue_init(void)
void ksmbd_workqueue_destroy(void)
{
- flush_workqueue(ksmbd_wq);
destroy_workqueue(ksmbd_wq);
ksmbd_wq = NULL;
}
diff --git a/fs/ksmbd/ksmbd_work.h b/fs/ksmbd/ksmbd_work.h
index f7156bc500496f..5ece58e40c9797 100644
--- a/fs/ksmbd/ksmbd_work.h
+++ b/fs/ksmbd/ksmbd_work.h
@@ -92,7 +92,7 @@ struct ksmbd_work {
*/
static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
{
- return work->response_buf + work->next_smb2_rsp_hdr_off;
+ return work->response_buf + work->next_smb2_rsp_hdr_off + 4;
}
/**
@@ -101,7 +101,7 @@ static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
*/
static inline void *ksmbd_req_buf_next(struct ksmbd_work *work)
{
- return work->request_buf + work->next_smb2_rcv_hdr_off;
+ return work->request_buf + work->next_smb2_rcv_hdr_off + 4;
}
struct ksmbd_work *ksmbd_alloc_work_struct(void);
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index f9dae6ef21150a..077b8761d0993f 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -629,10 +629,10 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
return;
}
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
- rsp_hdr->smb2_buf_length =
- cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(conn->vals->header_size);
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->CreditRequest = cpu_to_le16(0);
@@ -645,7 +645,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
rsp_hdr->SessionId = 0;
memset(rsp_hdr->Signature, 0, 16);
- rsp = work->response_buf;
+ rsp = smb2_get_msg(work->response_buf);
rsp->StructureSize = cpu_to_le16(24);
if (!br_info->open_trunc &&
@@ -659,7 +659,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
rsp->PersistentFid = cpu_to_le64(fp->persistent_id);
rsp->VolatileFid = cpu_to_le64(fp->volatile_id);
- inc_rfc1001_len(rsp, 24);
+ inc_rfc1001_len(work->response_buf, 24);
ksmbd_debug(OPLOCK,
"sending oplock break v_id %llu p_id = %llu lock level = %d\n",
@@ -736,10 +736,10 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
return;
}
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
- rsp_hdr->smb2_buf_length =
- cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(conn->vals->header_size);
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->CreditRequest = cpu_to_le16(0);
@@ -752,7 +752,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
rsp_hdr->SessionId = 0;
memset(rsp_hdr->Signature, 0, 16);
- rsp = work->response_buf;
+ rsp = smb2_get_msg(work->response_buf);
rsp->StructureSize = cpu_to_le16(44);
rsp->Epoch = br_info->epoch;
rsp->Flags = 0;
@@ -768,7 +768,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
rsp->AccessMaskHint = 0;
rsp->ShareMaskHint = 0;
- inc_rfc1001_len(rsp, 44);
+ inc_rfc1001_len(work->response_buf, 44);
ksmbd_conn_write(work);
ksmbd_free_work_struct(work);
@@ -1335,19 +1335,16 @@ __u8 smb2_map_lease_to_oplock(__le32 lease_state)
*/
void create_lease_buf(u8 *rbuf, struct lease *lease)
{
- char *LeaseKey = (char *)&lease->lease_key;
-
if (lease->version == 2) {
struct create_lease_v2 *buf = (struct create_lease_v2 *)rbuf;
- char *ParentLeaseKey = (char *)&lease->parent_lease_key;
memset(buf, 0, sizeof(struct create_lease_v2));
- buf->lcontext.LeaseKeyLow = *((__le64 *)LeaseKey);
- buf->lcontext.LeaseKeyHigh = *((__le64 *)(LeaseKey + 8));
+ memcpy(buf->lcontext.LeaseKey, lease->lease_key,
+ SMB2_LEASE_KEY_SIZE);
buf->lcontext.LeaseFlags = lease->flags;
buf->lcontext.LeaseState = lease->state;
- buf->lcontext.ParentLeaseKeyLow = *((__le64 *)ParentLeaseKey);
- buf->lcontext.ParentLeaseKeyHigh = *((__le64 *)(ParentLeaseKey + 8));
+ memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+ SMB2_LEASE_KEY_SIZE);
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct create_lease_v2, lcontext));
buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
@@ -1362,8 +1359,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
struct create_lease *buf = (struct create_lease *)rbuf;
memset(buf, 0, sizeof(struct create_lease));
- buf->lcontext.LeaseKeyLow = *((__le64 *)LeaseKey);
- buf->lcontext.LeaseKeyHigh = *((__le64 *)(LeaseKey + 8));
+ memcpy(buf->lcontext.LeaseKey, lease->lease_key, SMB2_LEASE_KEY_SIZE);
buf->lcontext.LeaseFlags = lease->flags;
buf->lcontext.LeaseState = lease->state;
buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -1398,7 +1394,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (!lreq)
return NULL;
- data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);
+ data_offset = (char *)req + le32_to_cpu(req->CreateContextsOffset);
cc = (struct create_context *)data_offset;
do {
cc = (struct create_context *)((char *)cc + next);
@@ -1416,19 +1412,17 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
- *((__le64 *)lreq->lease_key) = lc->lcontext.LeaseKeyLow;
- *((__le64 *)(lreq->lease_key + 8)) = lc->lcontext.LeaseKeyHigh;
+ memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
lreq->flags = lc->lcontext.LeaseFlags;
lreq->duration = lc->lcontext.LeaseDuration;
- *((__le64 *)lreq->parent_lease_key) = lc->lcontext.ParentLeaseKeyLow;
- *((__le64 *)(lreq->parent_lease_key + 8)) = lc->lcontext.ParentLeaseKeyHigh;
+ memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
+ SMB2_LEASE_KEY_SIZE);
lreq->version = 2;
} else {
struct create_lease *lc = (struct create_lease *)cc;
- *((__le64 *)lreq->lease_key) = lc->lcontext.LeaseKeyLow;
- *((__le64 *)(lreq->lease_key + 8)) = lc->lcontext.LeaseKeyHigh;
+ memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
lreq->flags = lc->lcontext.LeaseFlags;
lreq->duration = lc->lcontext.LeaseDuration;
@@ -1462,7 +1456,7 @@ struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
* CreateContextsOffset and CreateContextsLength are guaranteed to
* be valid because of ksmbd_smb2_check_message().
*/
- cc = (struct create_context *)((char *)req + 4 +
+ cc = (struct create_context *)((char *)req +
le32_to_cpu(req->CreateContextsOffset));
remain_len = le32_to_cpu(req->CreateContextsLength);
do {
diff --git a/fs/ksmbd/oplock.h b/fs/ksmbd/oplock.h
index 119b8047cfbd40..0cf7a2b5bbc065 100644
--- a/fs/ksmbd/oplock.h
+++ b/fs/ksmbd/oplock.h
@@ -28,8 +28,6 @@
#define OPLOCK_WRITE_TO_NONE 0x04
#define OPLOCK_READ_TO_NONE 0x08
-#define SMB2_LEASE_KEY_SIZE 16
-
struct lease_ctx_info {
__u8 lease_key[SMB2_LEASE_KEY_SIZE];
__le32 req_state;
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
index 2a2b2135bfdede..2e12f6d8483bda 100644
--- a/fs/ksmbd/server.c
+++ b/fs/ksmbd/server.c
@@ -622,7 +622,6 @@ MODULE_DESCRIPTION("Linux kernel CIFS/SMB SERVER");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: ecb");
MODULE_SOFTDEP("pre: hmac");
-MODULE_SOFTDEP("pre: md4");
MODULE_SOFTDEP("pre: md5");
MODULE_SOFTDEP("pre: nls");
MODULE_SOFTDEP("pre: aes");
@@ -632,5 +631,6 @@ MODULE_SOFTDEP("pre: sha512");
MODULE_SOFTDEP("pre: aead2");
MODULE_SOFTDEP("pre: ccm");
MODULE_SOFTDEP("pre: gcm");
+MODULE_SOFTDEP("pre: crc32");
module_init(ksmbd_server_init)
module_exit(ksmbd_server_exit)
diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
index 030ca57c378498..50d0b1022289ef 100644
--- a/fs/ksmbd/smb2misc.c
+++ b/fs/ksmbd/smb2misc.c
@@ -6,7 +6,6 @@
#include "glob.h"
#include "nterr.h"
-#include "smb2pdu.h"
#include "smb_common.h"
#include "smbstatus.h"
#include "mgmt/user_session.h"
@@ -347,23 +346,16 @@ static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
int ksmbd_smb2_check_message(struct ksmbd_work *work)
{
- struct smb2_pdu *pdu = work->request_buf;
+ struct smb2_pdu *pdu = ksmbd_req_buf_next(work);
struct smb2_hdr *hdr = &pdu->hdr;
int command;
__u32 clc_len; /* calculated length */
- __u32 len = get_rfc1002_len(pdu);
+ __u32 len = get_rfc1002_len(work->request_buf);
- if (work->next_smb2_rcv_hdr_off) {
- pdu = ksmbd_req_buf_next(work);
- hdr = &pdu->hdr;
- }
-
- if (le32_to_cpu(hdr->NextCommand) > 0) {
+ if (le32_to_cpu(hdr->NextCommand) > 0)
len = le32_to_cpu(hdr->NextCommand);
- } else if (work->next_smb2_rcv_hdr_off) {
+ else if (work->next_smb2_rcv_hdr_off)
len -= work->next_smb2_rcv_hdr_off;
- len = round_up(len, 8);
- }
if (check_smb2_hdr(hdr))
return 1;
diff --git a/fs/ksmbd/smb2ops.c b/fs/ksmbd/smb2ops.c
index fb6a65d231391b..0a5d8450e835fa 100644
--- a/fs/ksmbd/smb2ops.c
+++ b/fs/ksmbd/smb2ops.c
@@ -6,7 +6,6 @@
#include <linux/slab.h>
#include "glob.h"
-#include "smb2pdu.h"
#include "auth.h"
#include "connection.h"
@@ -199,7 +198,7 @@ void init_smb2_1_server(struct ksmbd_conn *conn)
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
conn->max_credits = SMB2_MAX_CREDITS;
- conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256;
+ conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
@@ -217,7 +216,7 @@ void init_smb3_0_server(struct ksmbd_conn *conn)
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
conn->max_credits = SMB2_MAX_CREDITS;
- conn->signing_algorithm = SIGNING_ALG_AES_CMAC;
+ conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
@@ -242,7 +241,7 @@ void init_smb3_02_server(struct ksmbd_conn *conn)
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
conn->max_credits = SMB2_MAX_CREDITS;
- conn->signing_algorithm = SIGNING_ALG_AES_CMAC;
+ conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
@@ -267,7 +266,7 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
conn->max_credits = SMB2_MAX_CREDITS;
- conn->signing_algorithm = SIGNING_ALG_AES_CMAC;
+ conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 7e448df3f8474c..121f8e8c70aca1 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -13,7 +13,6 @@
#include <linux/falloc.h>
#include "glob.h"
-#include "smb2pdu.h"
#include "smbfsctl.h"
#include "oplock.h"
#include "smbacl.h"
@@ -44,8 +43,8 @@ static void __wbuf(struct ksmbd_work *work, void **req, void **rsp)
*req = ksmbd_req_buf_next(work);
*rsp = ksmbd_resp_buf_next(work);
} else {
- *req = work->request_buf;
- *rsp = work->response_buf;
+ *req = smb2_get_msg(work->request_buf);
+ *rsp = smb2_get_msg(work->response_buf);
}
}
@@ -93,13 +92,14 @@ struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn
*/
int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
{
- struct smb2_hdr *req_hdr = work->request_buf;
+ struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
+ unsigned int cmd = le16_to_cpu(req_hdr->Command);
int tree_id;
work->tcon = NULL;
- if (work->conn->ops->get_cmd_val(work) == SMB2_TREE_CONNECT_HE ||
- work->conn->ops->get_cmd_val(work) == SMB2_CANCEL_HE ||
- work->conn->ops->get_cmd_val(work) == SMB2_LOGOFF_HE) {
+ if (cmd == SMB2_TREE_CONNECT_HE ||
+ cmd == SMB2_CANCEL_HE ||
+ cmd == SMB2_LOGOFF_HE) {
ksmbd_debug(SMB, "skip to check tree connect request\n");
return 0;
}
@@ -130,7 +130,7 @@ void smb2_set_err_rsp(struct ksmbd_work *work)
if (work->next_smb2_rcv_hdr_off)
err_rsp = ksmbd_resp_buf_next(work);
else
- err_rsp = work->response_buf;
+ err_rsp = smb2_get_msg(work->response_buf);
if (err_rsp->hdr.Status != STATUS_STOPPED_ON_SYMLINK) {
err_rsp->StructureSize = SMB2_ERROR_STRUCTURE_SIZE2_LE;
@@ -150,7 +150,7 @@ void smb2_set_err_rsp(struct ksmbd_work *work)
*/
bool is_smb2_neg_cmd(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr = work->request_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
/* is it SMB2 header ? */
if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
@@ -174,7 +174,7 @@ bool is_smb2_neg_cmd(struct ksmbd_work *work)
*/
bool is_smb2_rsp(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr = work->response_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->response_buf);
/* is it SMB2 header ? */
if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
@@ -200,7 +200,7 @@ u16 get_smb2_cmd_val(struct ksmbd_work *work)
if (work->next_smb2_rcv_hdr_off)
rcv_hdr = ksmbd_req_buf_next(work);
else
- rcv_hdr = work->request_buf;
+ rcv_hdr = smb2_get_msg(work->request_buf);
return le16_to_cpu(rcv_hdr->Command);
}
@@ -216,7 +216,7 @@ void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
if (work->next_smb2_rcv_hdr_off)
rsp_hdr = ksmbd_resp_buf_next(work);
else
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
rsp_hdr->Status = err;
smb2_set_err_rsp(work);
}
@@ -237,13 +237,11 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
if (conn->need_neg == false)
return -EINVAL;
- rsp_hdr = work->response_buf;
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(conn->vals->header_size);
+ rsp_hdr = smb2_get_msg(work->response_buf);
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
-
- rsp_hdr->smb2_buf_length =
- cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
-
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->CreditRequest = cpu_to_le16(2);
@@ -256,7 +254,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
rsp_hdr->SessionId = 0;
memset(rsp_hdr->Signature, 0, 16);
- rsp = work->response_buf;
+ rsp = smb2_get_msg(work->response_buf);
WARN_ON(ksmbd_conn_good(work));
@@ -277,12 +275,12 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
rsp->SecurityBufferOffset = cpu_to_le16(128);
rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
- ksmbd_copy_gss_neg_header(((char *)(&rsp->hdr) +
- sizeof(rsp->hdr.smb2_buf_length)) +
+ ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
le16_to_cpu(rsp->SecurityBufferOffset));
- inc_rfc1001_len(rsp, sizeof(struct smb2_negotiate_rsp) -
- sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
- AUTH_GSS_LENGTH);
+ inc_rfc1001_len(work->response_buf,
+ sizeof(struct smb2_negotiate_rsp) -
+ sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
+ AUTH_GSS_LENGTH);
rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
@@ -387,8 +385,8 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
next_hdr_offset = le32_to_cpu(req->NextCommand);
new_len = ALIGN(len, 8);
- inc_rfc1001_len(work->response_buf, ((sizeof(struct smb2_hdr) - 4)
- + new_len - len));
+ inc_rfc1001_len(work->response_buf,
+ sizeof(struct smb2_hdr) + new_len - len);
rsp->NextCommand = cpu_to_le32(new_len);
work->next_smb2_rcv_hdr_off += next_hdr_offset;
@@ -406,7 +404,7 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
work->compound_fid = KSMBD_NO_FID;
work->compound_pfid = KSMBD_NO_FID;
}
- memset((char *)rsp_hdr + 4, 0, sizeof(struct smb2_hdr) + 2);
+ memset((char *)rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->Command = rcv_hdr->Command;
@@ -432,7 +430,7 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
*/
bool is_chained_smb2_message(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr = work->request_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
unsigned int len, next_cmd;
if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
@@ -483,13 +481,13 @@ bool is_chained_smb2_message(struct ksmbd_work *work)
*/
int init_smb2_rsp_hdr(struct ksmbd_work *work)
{
- struct smb2_hdr *rsp_hdr = work->response_buf;
- struct smb2_hdr *rcv_hdr = work->request_buf;
+ struct smb2_hdr *rsp_hdr = smb2_get_msg(work->response_buf);
+ struct smb2_hdr *rcv_hdr = smb2_get_msg(work->request_buf);
struct ksmbd_conn *conn = work->conn;
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
- rsp_hdr->smb2_buf_length =
- cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(conn->vals->header_size);
rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->Command = rcv_hdr->Command;
@@ -522,7 +520,7 @@ int init_smb2_rsp_hdr(struct ksmbd_work *work)
*/
int smb2_allocate_rsp_buf(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr = work->request_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
size_t large_sz = small_sz + work->conn->vals->max_trans_size;
size_t sz = small_sz;
@@ -534,7 +532,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
if (cmd == SMB2_QUERY_INFO_HE) {
struct smb2_query_info_req *req;
- req = work->request_buf;
+ req = smb2_get_msg(work->request_buf);
if (req->InfoType == SMB2_O_INFO_FILE &&
(req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
req->FileInfoClass == FILE_ALL_INFORMATION))
@@ -561,7 +559,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
*/
int smb2_check_user_session(struct ksmbd_work *work)
{
- struct smb2_hdr *req_hdr = work->request_buf;
+ struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
struct ksmbd_conn *conn = work->conn;
unsigned int cmd = conn->ops->get_cmd_val(work);
unsigned long long sess_id;
@@ -642,7 +640,7 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
struct ksmbd_conn *conn = work->conn;
int id;
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
id = ksmbd_acquire_async_msg_id(&conn->async_ida);
@@ -674,7 +672,7 @@ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
{
struct smb2_hdr *rsp_hdr;
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
smb2_set_err_rsp(work);
rsp_hdr->Status = status;
@@ -715,17 +713,17 @@ static int smb2_get_dos_mode(struct kstat *stat, int attribute)
int attr = 0;
if (S_ISDIR(stat->mode)) {
- attr = ATTR_DIRECTORY |
- (attribute & (ATTR_HIDDEN | ATTR_SYSTEM));
+ attr = FILE_ATTRIBUTE_DIRECTORY |
+ (attribute & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM));
} else {
- attr = (attribute & 0x00005137) | ATTR_ARCHIVE;
- attr &= ~(ATTR_DIRECTORY);
+ attr = (attribute & 0x00005137) | FILE_ATTRIBUTE_ARCHIVE;
+ attr &= ~(FILE_ATTRIBUTE_DIRECTORY);
if (S_ISREG(stat->mode) && (server_conf.share_fake_fscaps &
FILE_SUPPORTS_SPARSE_FILES))
- attr |= ATTR_SPARSE;
+ attr |= FILE_ATTRIBUTE_SPARSE_FILE;
if (smb2_get_reparse_tag_special_file(stat->mode))
- attr |= ATTR_REPARSE;
+ attr |= FILE_ATTRIBUTE_REPARSE_POINT;
}
return attr;
@@ -753,16 +751,16 @@ static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt,
pneg_ctxt->Ciphers[0] = cipher_type;
}
-static void build_compression_ctxt(struct smb2_compression_ctx *pneg_ctxt,
+static void build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt,
__le16 comp_algo)
{
pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
pneg_ctxt->DataLength =
- cpu_to_le16(sizeof(struct smb2_compression_ctx)
+ cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
- sizeof(struct smb2_neg_context));
pneg_ctxt->Reserved = cpu_to_le32(0);
pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(1);
- pneg_ctxt->Reserved1 = cpu_to_le32(0);
+ pneg_ctxt->Flags = cpu_to_le32(0);
pneg_ctxt->CompressionAlgorithms[0] = comp_algo;
}
@@ -802,11 +800,11 @@ static void build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
}
static void assemble_neg_contexts(struct ksmbd_conn *conn,
- struct smb2_negotiate_rsp *rsp)
+ struct smb2_negotiate_rsp *rsp,
+ void *smb2_buf_len)
{
- /* +4 is to account for the RFC1001 len field */
char *pneg_ctxt = (char *)rsp +
- le32_to_cpu(rsp->NegotiateContextOffset) + 4;
+ le32_to_cpu(rsp->NegotiateContextOffset);
int neg_ctxt_cnt = 1;
int ctxt_size;
@@ -815,7 +813,7 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt,
conn->preauth_info->Preauth_HashId);
rsp->NegotiateContextCount = cpu_to_le16(neg_ctxt_cnt);
- inc_rfc1001_len(rsp, AUTH_GSS_PADDING);
+ inc_rfc1001_len(smb2_buf_len, AUTH_GSS_PADDING);
ctxt_size = sizeof(struct smb2_preauth_neg_context);
/* Round to 8 byte boundary */
pneg_ctxt += round_up(sizeof(struct smb2_preauth_neg_context), 8);
@@ -839,12 +837,12 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
ksmbd_debug(SMB,
"assemble SMB2_COMPRESSION_CAPABILITIES context\n");
/* Temporarily set to SMB3_COMPRESS_NONE */
- build_compression_ctxt((struct smb2_compression_ctx *)pneg_ctxt,
+ build_compression_ctxt((struct smb2_compression_capabilities_context *)pneg_ctxt,
conn->compress_algorithm);
rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
- ctxt_size += sizeof(struct smb2_compression_ctx) + 2;
+ ctxt_size += sizeof(struct smb2_compression_capabilities_context) + 2;
/* Round to 8 byte boundary */
- pneg_ctxt += round_up(sizeof(struct smb2_compression_ctx) + 2,
+ pneg_ctxt += round_up(sizeof(struct smb2_compression_capabilities_context) + 2,
8);
}
@@ -869,7 +867,7 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
ctxt_size += sizeof(struct smb2_signing_capabilities) + 2;
}
- inc_rfc1001_len(rsp, ctxt_size);
+ inc_rfc1001_len(smb2_buf_len, ctxt_size);
}
static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
@@ -918,7 +916,7 @@ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
}
static void decode_compress_ctxt(struct ksmbd_conn *conn,
- struct smb2_compression_ctx *pneg_ctxt)
+ struct smb2_compression_capabilities_context *pneg_ctxt)
{
conn->compress_algorithm = SMB3_COMPRESS_NONE;
}
@@ -939,8 +937,8 @@ static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
}
for (i = 0; i < sign_algo_cnt; i++) {
- if (pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_HMAC_SHA256 ||
- pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_AES_CMAC) {
+ if (pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_HMAC_SHA256_LE ||
+ pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_AES_CMAC_LE) {
ksmbd_debug(SMB, "Signing Algorithm ID = 0x%x\n",
pneg_ctxt->SigningAlgorithms[i]);
conn->signing_negotiated = true;
@@ -952,14 +950,14 @@ static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
}
static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
- struct smb2_negotiate_req *req)
+ struct smb2_negotiate_req *req,
+ int len_of_smb)
{
/* +4 is to account for the RFC1001 len field */
- struct smb2_neg_context *pctx = (struct smb2_neg_context *)((char *)req + 4);
+ struct smb2_neg_context *pctx = (struct smb2_neg_context *)req;
int i = 0, len_of_ctxts;
int offset = le32_to_cpu(req->NegotiateContextOffset);
int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount);
- int len_of_smb = be32_to_cpu(req->hdr.smb2_buf_length);
__le32 status = STATUS_INVALID_PARAMETER;
ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt);
@@ -1011,7 +1009,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
break;
decode_compress_ctxt(conn,
- (struct smb2_compression_ctx *)pctx);
+ (struct smb2_compression_capabilities_context *)pctx);
} else if (pctx->ContextType == SMB2_NETNAME_NEGOTIATE_CONTEXT_ID) {
ksmbd_debug(SMB,
"deassemble SMB2_NETNAME_NEGOTIATE_CONTEXT_ID context\n");
@@ -1044,8 +1042,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
int smb2_handle_negotiate(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_negotiate_req *req = work->request_buf;
- struct smb2_negotiate_rsp *rsp = work->response_buf;
+ struct smb2_negotiate_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_negotiate_rsp *rsp = smb2_get_msg(work->response_buf);
int rc = 0;
unsigned int smb2_buf_len, smb2_neg_size;
__le32 status;
@@ -1066,7 +1064,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
}
smb2_buf_len = get_rfc1002_len(work->request_buf);
- smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects) - 4;
+ smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
if (smb2_neg_size > smb2_buf_len) {
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
rc = -EINVAL;
@@ -1115,7 +1113,8 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
goto err_out;
}
- status = deassemble_neg_contexts(conn, req);
+ status = deassemble_neg_contexts(conn, req,
+ get_rfc1002_len(work->request_buf));
if (status != STATUS_SUCCESS) {
pr_err("deassemble_neg_contexts error(0x%x)\n",
status);
@@ -1135,7 +1134,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
conn->preauth_info->Preauth_HashValue);
rsp->NegotiateContextOffset =
cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
- assemble_neg_contexts(conn, rsp);
+ assemble_neg_contexts(conn, rsp, work->response_buf);
break;
case SMB302_PROT_ID:
init_smb3_02_server(conn);
@@ -1183,10 +1182,9 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
rsp->SecurityBufferOffset = cpu_to_le16(128);
rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
- ksmbd_copy_gss_neg_header(((char *)(&rsp->hdr) +
- sizeof(rsp->hdr.smb2_buf_length)) +
- le16_to_cpu(rsp->SecurityBufferOffset));
- inc_rfc1001_len(rsp, sizeof(struct smb2_negotiate_rsp) -
+ ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
+ le16_to_cpu(rsp->SecurityBufferOffset));
+ inc_rfc1001_len(work->response_buf, sizeof(struct smb2_negotiate_rsp) -
sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
AUTH_GSS_LENGTH);
rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
@@ -1278,7 +1276,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
struct negotiate_message *negblob,
size_t negblob_len)
{
- struct smb2_sess_setup_rsp *rsp = work->response_buf;
+ struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct challenge_message *chgblob;
unsigned char *spnego_blob = NULL;
u16 spnego_blob_len;
@@ -1386,8 +1384,8 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
static int ntlm_authenticate(struct ksmbd_work *work)
{
- struct smb2_sess_setup_req *req = work->request_buf;
- struct smb2_sess_setup_rsp *rsp = work->response_buf;
+ struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess = work->sess;
struct channel *chann = NULL;
@@ -1410,7 +1408,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
kfree(spnego_blob);
- inc_rfc1001_len(rsp, spnego_blob_len - 1);
+ inc_rfc1001_len(work->response_buf, spnego_blob_len - 1);
}
user = session_user(conn, req);
@@ -1522,8 +1520,8 @@ binding_session:
#ifdef CONFIG_SMB_SERVER_KERBEROS5
static int krb5_authenticate(struct ksmbd_work *work)
{
- struct smb2_sess_setup_req *req = work->request_buf;
- struct smb2_sess_setup_rsp *rsp = work->response_buf;
+ struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess = work->sess;
char *in_blob, *out_blob;
@@ -1538,8 +1536,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
out_blob = (char *)&rsp->hdr.ProtocolId +
le16_to_cpu(rsp->SecurityBufferOffset);
out_len = work->response_sz -
- offsetof(struct smb2_hdr, smb2_buf_length) -
- le16_to_cpu(rsp->SecurityBufferOffset);
+ (le16_to_cpu(rsp->SecurityBufferOffset) + 4);
/* Check previous session */
prev_sess_id = le64_to_cpu(req->PreviousSessionId);
@@ -1556,7 +1553,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
return -EINVAL;
}
rsp->SecurityBufferLength = cpu_to_le16(out_len);
- inc_rfc1001_len(rsp, out_len - 1);
+ inc_rfc1001_len(work->response_buf, out_len - 1);
if ((conn->sign || server_conf.enforced_signing) ||
(req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
@@ -1612,8 +1609,8 @@ static int krb5_authenticate(struct ksmbd_work *work)
int smb2_sess_setup(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_sess_setup_req *req = work->request_buf;
- struct smb2_sess_setup_rsp *rsp = work->response_buf;
+ struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_session *sess;
struct negotiate_message *negblob;
unsigned int negblob_len, negblob_off;
@@ -1625,7 +1622,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
rsp->SessionFlags = 0;
rsp->SecurityBufferOffset = cpu_to_le16(72);
rsp->SecurityBufferLength = 0;
- inc_rfc1001_len(rsp, 9);
+ inc_rfc1001_len(work->response_buf, 9);
if (!req->hdr.SessionId) {
sess = ksmbd_smb2_session_create();
@@ -1699,7 +1696,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
negblob_off = le16_to_cpu(req->SecurityBufferOffset);
negblob_len = le16_to_cpu(req->SecurityBufferLength);
- if (negblob_off < (offsetof(struct smb2_sess_setup_req, Buffer) - 4) ||
+ if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
negblob_len < offsetof(struct negotiate_message, NegotiateFlags))
return -EINVAL;
@@ -1739,7 +1736,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
* Note: here total size -1 is done as an
* adjustment for 0 size blob
*/
- inc_rfc1001_len(rsp, le16_to_cpu(rsp->SecurityBufferLength) - 1);
+ inc_rfc1001_len(work->response_buf,
+ le16_to_cpu(rsp->SecurityBufferLength) - 1);
} else if (negblob->MessageType == NtLmAuthenticate) {
rc = ntlm_authenticate(work);
@@ -1828,8 +1826,8 @@ out_err:
int smb2_tree_connect(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_tree_connect_req *req = work->request_buf;
- struct smb2_tree_connect_rsp *rsp = work->response_buf;
+ struct smb2_tree_connect_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_tree_connect_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_session *sess = work->sess;
char *treename = NULL, *name = NULL;
struct ksmbd_tree_conn_status status;
@@ -1894,7 +1892,7 @@ out_err1:
rsp->Reserved = 0;
/* default manual caching */
rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
- inc_rfc1001_len(rsp, 16);
+ inc_rfc1001_len(work->response_buf, 16);
if (!IS_ERR(treename))
kfree(treename);
@@ -1999,17 +1997,18 @@ static int smb2_create_open_flags(bool file_present, __le32 access,
*/
int smb2_tree_disconnect(struct ksmbd_work *work)
{
- struct smb2_tree_disconnect_rsp *rsp = work->response_buf;
+ struct smb2_tree_disconnect_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_session *sess = work->sess;
struct ksmbd_tree_connect *tcon = work->tcon;
rsp->StructureSize = cpu_to_le16(4);
- inc_rfc1001_len(rsp, 4);
+ inc_rfc1001_len(work->response_buf, 4);
ksmbd_debug(SMB, "request\n");
if (!tcon) {
- struct smb2_tree_disconnect_req *req = work->request_buf;
+ struct smb2_tree_disconnect_req *req =
+ smb2_get_msg(work->request_buf);
ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
@@ -2031,11 +2030,11 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
int smb2_session_logoff(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_logoff_rsp *rsp = work->response_buf;
+ struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_session *sess = work->sess;
rsp->StructureSize = cpu_to_le16(4);
- inc_rfc1001_len(rsp, 4);
+ inc_rfc1001_len(work->response_buf, 4);
ksmbd_debug(SMB, "request\n");
@@ -2048,7 +2047,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
ksmbd_conn_wait_idle(conn);
if (ksmbd_tree_conn_session_logoff(sess)) {
- struct smb2_logoff_req *req = work->request_buf;
+ struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
@@ -2075,8 +2074,8 @@ int smb2_session_logoff(struct ksmbd_work *work)
*/
static noinline int create_smb2_pipe(struct ksmbd_work *work)
{
- struct smb2_create_rsp *rsp = work->response_buf;
- struct smb2_create_req *req = work->request_buf;
+ struct smb2_create_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_create_req *req = smb2_get_msg(work->request_buf);
int id;
int err;
char *name;
@@ -2099,7 +2098,7 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work)
rsp->hdr.Status = STATUS_SUCCESS;
rsp->StructureSize = cpu_to_le16(89);
rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
- rsp->Reserved = 0;
+ rsp->Flags = 0;
rsp->CreateAction = cpu_to_le32(FILE_OPENED);
rsp->CreationTime = cpu_to_le64(0);
@@ -2107,14 +2106,14 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work)
rsp->ChangeTime = cpu_to_le64(0);
rsp->AllocationSize = cpu_to_le64(0);
rsp->EndofFile = cpu_to_le64(0);
- rsp->FileAttributes = ATTR_NORMAL_LE;
+ rsp->FileAttributes = FILE_ATTRIBUTE_NORMAL_LE;
rsp->Reserved2 = 0;
rsp->VolatileFileId = cpu_to_le64(id);
rsp->PersistentFileId = 0;
rsp->CreateContextsOffset = 0;
rsp->CreateContextsLength = 0;
- inc_rfc1001_len(rsp, 88); /* StructureSize - 1*/
+ inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
kfree(name);
return 0;
@@ -2353,7 +2352,7 @@ static void smb2_update_xattrs(struct ksmbd_tree_connect *tcon,
struct xattr_dos_attrib da;
int rc;
- fp->f_ci->m_fattr &= ~(ATTR_HIDDEN_LE | ATTR_SYSTEM_LE);
+ fp->f_ci->m_fattr &= ~(FILE_ATTRIBUTE_HIDDEN_LE | FILE_ATTRIBUTE_SYSTEM_LE);
/* get FileAttributes from XATTR_NAME_DOS_ATTRIBUTE */
if (!test_share_config_flag(tcon->share_conf,
@@ -2463,7 +2462,7 @@ int smb2_open(struct ksmbd_work *work)
struct ksmbd_session *sess = work->sess;
struct ksmbd_tree_connect *tcon = work->tcon;
struct smb2_create_req *req;
- struct smb2_create_rsp *rsp, *rsp_org;
+ struct smb2_create_rsp *rsp;
struct path path;
struct ksmbd_share_config *share = tcon->share_conf;
struct ksmbd_file *fp = NULL;
@@ -2489,7 +2488,6 @@ int smb2_open(struct ksmbd_work *work)
umode_t posix_mode = 0;
__le32 daccess, maximal_access = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (req->hdr.NextCommand && !work->next_smb2_rcv_hdr_off &&
@@ -2559,7 +2557,7 @@ int smb2_open(struct ksmbd_work *work)
if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
lc = parse_lease_state(req);
- if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE_LE)) {
+ if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE)) {
pr_err("Invalid impersonationlevel : 0x%x\n",
le32_to_cpu(req->ImpersonationLevel));
rc = -EIO;
@@ -2567,7 +2565,7 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
- if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK)) {
+ if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) {
pr_err("Invalid create options : 0x%x\n",
le32_to_cpu(req->CreateOptions));
rc = -EINVAL;
@@ -2609,7 +2607,7 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
- if (req->FileAttributes && !(req->FileAttributes & ATTR_MASK_LE)) {
+ if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) {
pr_err("Invalid file attribute : 0x%x\n",
le32_to_cpu(req->FileAttributes));
rc = -EINVAL;
@@ -2740,7 +2738,7 @@ int smb2_open(struct ksmbd_work *work)
}
if (req->CreateOptions & FILE_DIRECTORY_FILE_LE &&
- req->FileAttributes & ATTR_NORMAL_LE) {
+ req->FileAttributes & FILE_ATTRIBUTE_NORMAL_LE) {
rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
rc = -EIO;
}
@@ -3119,7 +3117,7 @@ int smb2_open(struct ksmbd_work *work)
opinfo = rcu_dereference(fp->f_opinfo);
rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0;
rcu_read_unlock();
- rsp->Reserved = 0;
+ rsp->Flags = 0;
rsp->CreateAction = cpu_to_le32(file_info);
rsp->CreationTime = cpu_to_le64(fp->create_time);
time = ksmbd_UnixTimeToNT(stat.atime);
@@ -3140,7 +3138,7 @@ int smb2_open(struct ksmbd_work *work)
rsp->CreateContextsOffset = 0;
rsp->CreateContextsLength = 0;
- inc_rfc1001_len(rsp_org, 88); /* StructureSize - 1*/
+ inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
/* If lease is request send lease context response */
if (opinfo && opinfo->is_lease) {
@@ -3155,7 +3153,8 @@ int smb2_open(struct ksmbd_work *work)
create_lease_buf(rsp->Buffer, opinfo->o_lease);
le32_add_cpu(&rsp->CreateContextsLength,
conn->vals->create_lease_size);
- inc_rfc1001_len(rsp_org, conn->vals->create_lease_size);
+ inc_rfc1001_len(work->response_buf,
+ conn->vals->create_lease_size);
next_ptr = &lease_ccontext->Next;
next_off = conn->vals->create_lease_size;
}
@@ -3175,7 +3174,8 @@ int smb2_open(struct ksmbd_work *work)
le32_to_cpu(maximal_access));
le32_add_cpu(&rsp->CreateContextsLength,
conn->vals->create_mxac_size);
- inc_rfc1001_len(rsp_org, conn->vals->create_mxac_size);
+ inc_rfc1001_len(work->response_buf,
+ conn->vals->create_mxac_size);
if (next_ptr)
*next_ptr = cpu_to_le32(next_off);
next_ptr = &mxac_ccontext->Next;
@@ -3193,7 +3193,8 @@ int smb2_open(struct ksmbd_work *work)
stat.ino, tcon->id);
le32_add_cpu(&rsp->CreateContextsLength,
conn->vals->create_disk_id_size);
- inc_rfc1001_len(rsp_org, conn->vals->create_disk_id_size);
+ inc_rfc1001_len(work->response_buf,
+ conn->vals->create_disk_id_size);
if (next_ptr)
*next_ptr = cpu_to_le32(next_off);
next_ptr = &disk_id_ccontext->Next;
@@ -3207,15 +3208,15 @@ int smb2_open(struct ksmbd_work *work)
fp);
le32_add_cpu(&rsp->CreateContextsLength,
conn->vals->create_posix_size);
- inc_rfc1001_len(rsp_org, conn->vals->create_posix_size);
+ inc_rfc1001_len(work->response_buf,
+ conn->vals->create_posix_size);
if (next_ptr)
*next_ptr = cpu_to_le32(next_off);
}
if (contxt_cnt > 0) {
rsp->CreateContextsOffset =
- cpu_to_le32(offsetof(struct smb2_create_rsp, Buffer)
- - 4);
+ cpu_to_le32(offsetof(struct smb2_create_rsp, Buffer));
}
err_out:
@@ -3422,9 +3423,9 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
ffdinfo->EaSize =
smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
if (ffdinfo->EaSize)
- ffdinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+ ffdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
if (d_info->hide_dot_file && d_info->name[0] == '.')
- ffdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ ffdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(ffdinfo->FileName, conv_name, conv_len);
ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3438,11 +3439,11 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
fbdinfo->EaSize =
smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
if (fbdinfo->EaSize)
- fbdinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+ fbdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
fbdinfo->ShortNameLength = 0;
fbdinfo->Reserved = 0;
if (d_info->hide_dot_file && d_info->name[0] == '.')
- fbdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ fbdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(fbdinfo->FileName, conv_name, conv_len);
fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3454,7 +3455,7 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
fdinfo = (struct file_directory_info *)kstat;
fdinfo->FileNameLength = cpu_to_le32(conv_len);
if (d_info->hide_dot_file && d_info->name[0] == '.')
- fdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ fdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(fdinfo->FileName, conv_name, conv_len);
fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3478,11 +3479,11 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
dinfo->EaSize =
smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
if (dinfo->EaSize)
- dinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+ dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
dinfo->Reserved = 0;
dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
if (d_info->hide_dot_file && d_info->name[0] == '.')
- dinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(dinfo->FileName, conv_name, conv_len);
dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3496,13 +3497,13 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
fibdinfo->EaSize =
smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
if (fibdinfo->EaSize)
- fibdinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+ fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
fibdinfo->ShortNameLength = 0;
fibdinfo->Reserved = 0;
fibdinfo->Reserved2 = cpu_to_le16(0);
if (d_info->hide_dot_file && d_info->name[0] == '.')
- fibdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ fibdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(fibdinfo->FileName, conv_name, conv_len);
fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3528,9 +3529,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
posix_info->Mode = cpu_to_le32(ksmbd_kstat->kstat->mode);
posix_info->Inode = cpu_to_le64(ksmbd_kstat->kstat->ino);
posix_info->DosAttributes =
- S_ISDIR(ksmbd_kstat->kstat->mode) ? ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
+ S_ISDIR(ksmbd_kstat->kstat->mode) ?
+ FILE_ATTRIBUTE_DIRECTORY_LE : FILE_ATTRIBUTE_ARCHIVE_LE;
if (d_info->hide_dot_file && d_info->name[0] == '.')
- posix_info->DosAttributes |= ATTR_HIDDEN_LE;
+ posix_info->DosAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
id_to_sid(from_kuid_munged(&init_user_ns, ksmbd_kstat->kstat->uid),
SIDNFS_USER, (struct smb_sid *)&posix_info->SidBuffer[0]);
id_to_sid(from_kgid_munged(&init_user_ns, ksmbd_kstat->kstat->gid),
@@ -3816,7 +3818,7 @@ int smb2_query_dir(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
struct smb2_query_directory_req *req;
- struct smb2_query_directory_rsp *rsp, *rsp_org;
+ struct smb2_query_directory_rsp *rsp;
struct ksmbd_share_config *share = work->tcon->share_conf;
struct ksmbd_file *dir_fp = NULL;
struct ksmbd_dir_info d_info;
@@ -3826,7 +3828,6 @@ int smb2_query_dir(struct ksmbd_work *work)
int buffer_sz;
struct smb2_query_dir_private query_dir_private = {NULL, };
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (ksmbd_override_fsids(work)) {
@@ -3947,7 +3948,7 @@ int smb2_query_dir(struct ksmbd_work *work)
rsp->OutputBufferOffset = cpu_to_le16(0);
rsp->OutputBufferLength = cpu_to_le32(0);
rsp->Buffer[0] = 0;
- inc_rfc1001_len(rsp_org, 9);
+ inc_rfc1001_len(work->response_buf, 9);
} else {
((struct file_directory_info *)
((char *)rsp->Buffer + d_info.last_entry_offset))
@@ -3956,7 +3957,7 @@ int smb2_query_dir(struct ksmbd_work *work)
rsp->StructureSize = cpu_to_le16(9);
rsp->OutputBufferOffset = cpu_to_le16(72);
rsp->OutputBufferLength = cpu_to_le32(d_info.data_count);
- inc_rfc1001_len(rsp_org, 8 + d_info.data_count);
+ inc_rfc1001_len(work->response_buf, 8 + d_info.data_count);
}
kfree(srch_ptr);
@@ -3999,26 +4000,28 @@ err_out2:
* Return: 0 on success, otherwise error
*/
static int buffer_check_err(int reqOutputBufferLength,
- struct smb2_query_info_rsp *rsp, int infoclass_size)
+ struct smb2_query_info_rsp *rsp,
+ void *rsp_org, int infoclass_size)
{
if (reqOutputBufferLength < le32_to_cpu(rsp->OutputBufferLength)) {
if (reqOutputBufferLength < infoclass_size) {
pr_err("Invalid Buffer Size Requested\n");
rsp->hdr.Status = STATUS_INFO_LENGTH_MISMATCH;
- rsp->hdr.smb2_buf_length = cpu_to_be32(sizeof(struct smb2_hdr) - 4);
+ *(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr));
return -EINVAL;
}
ksmbd_debug(SMB, "Buffer Overflow\n");
rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
- rsp->hdr.smb2_buf_length = cpu_to_be32(sizeof(struct smb2_hdr) - 4 +
+ *(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr) +
reqOutputBufferLength);
rsp->OutputBufferLength = cpu_to_le32(reqOutputBufferLength);
}
return 0;
}
-static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp)
+static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp,
+ void *rsp_org)
{
struct smb2_file_standard_info *sinfo;
@@ -4031,10 +4034,11 @@ static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp)
sinfo->Directory = 0;
rsp->OutputBufferLength =
cpu_to_le32(sizeof(struct smb2_file_standard_info));
- inc_rfc1001_len(rsp, sizeof(struct smb2_file_standard_info));
+ inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_standard_info));
}
-static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num)
+static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num,
+ void *rsp_org)
{
struct smb2_file_internal_info *file_info;
@@ -4044,12 +4048,13 @@ static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num)
file_info->IndexNumber = cpu_to_le64(num | (1ULL << 63));
rsp->OutputBufferLength =
cpu_to_le32(sizeof(struct smb2_file_internal_info));
- inc_rfc1001_len(rsp, sizeof(struct smb2_file_internal_info));
+ inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
}
static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
struct smb2_query_info_req *req,
- struct smb2_query_info_rsp *rsp)
+ struct smb2_query_info_rsp *rsp,
+ void *rsp_org)
{
u64 id;
int rc;
@@ -4067,14 +4072,16 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
switch (req->FileInfoClass) {
case FILE_STANDARD_INFORMATION:
- get_standard_info_pipe(rsp);
+ get_standard_info_pipe(rsp, rsp_org);
rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
- rsp, FILE_STANDARD_INFORMATION_SIZE);
+ rsp, rsp_org,
+ FILE_STANDARD_INFORMATION_SIZE);
break;
case FILE_INTERNAL_INFORMATION:
- get_internal_info_pipe(rsp, id);
+ get_internal_info_pipe(rsp, id, rsp_org);
rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
- rsp, FILE_INTERNAL_INFORMATION_SIZE);
+ rsp, rsp_org,
+ FILE_INTERNAL_INFORMATION_SIZE);
break;
default:
ksmbd_debug(SMB, "smb2_info_file_pipe for %u not supported\n",
@@ -4688,7 +4695,7 @@ static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
static int smb2_get_info_file(struct ksmbd_work *work,
struct smb2_query_info_req *req,
- struct smb2_query_info_rsp *rsp, void *rsp_org)
+ struct smb2_query_info_rsp *rsp)
{
struct ksmbd_file *fp;
int fileinfoclass = 0;
@@ -4699,7 +4706,8 @@ static int smb2_get_info_file(struct ksmbd_work *work,
if (test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_PIPE)) {
/* smb2 info file called for pipe */
- return smb2_get_info_file_pipe(work->sess, req, rsp);
+ return smb2_get_info_file_pipe(work->sess, req, rsp,
+ work->response_buf);
}
if (work->next_smb2_rcv_hdr_off) {
@@ -4724,77 +4732,77 @@ static int smb2_get_info_file(struct ksmbd_work *work,
switch (fileinfoclass) {
case FILE_ACCESS_INFORMATION:
- get_file_access_info(rsp, fp, rsp_org);
+ get_file_access_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_ACCESS_INFORMATION_SIZE;
break;
case FILE_BASIC_INFORMATION:
- rc = get_file_basic_info(rsp, fp, rsp_org);
+ rc = get_file_basic_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_BASIC_INFORMATION_SIZE;
break;
case FILE_STANDARD_INFORMATION:
- get_file_standard_info(rsp, fp, rsp_org);
+ get_file_standard_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_STANDARD_INFORMATION_SIZE;
break;
case FILE_ALIGNMENT_INFORMATION:
- get_file_alignment_info(rsp, rsp_org);
+ get_file_alignment_info(rsp, work->response_buf);
file_infoclass_size = FILE_ALIGNMENT_INFORMATION_SIZE;
break;
case FILE_ALL_INFORMATION:
- rc = get_file_all_info(work, rsp, fp, rsp_org);
+ rc = get_file_all_info(work, rsp, fp, work->response_buf);
file_infoclass_size = FILE_ALL_INFORMATION_SIZE;
break;
case FILE_ALTERNATE_NAME_INFORMATION:
- get_file_alternate_info(work, rsp, fp, rsp_org);
+ get_file_alternate_info(work, rsp, fp, work->response_buf);
file_infoclass_size = FILE_ALTERNATE_NAME_INFORMATION_SIZE;
break;
case FILE_STREAM_INFORMATION:
- get_file_stream_info(work, rsp, fp, rsp_org);
+ get_file_stream_info(work, rsp, fp, work->response_buf);
file_infoclass_size = FILE_STREAM_INFORMATION_SIZE;
break;
case FILE_INTERNAL_INFORMATION:
- get_file_internal_info(rsp, fp, rsp_org);
+ get_file_internal_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_INTERNAL_INFORMATION_SIZE;
break;
case FILE_NETWORK_OPEN_INFORMATION:
- rc = get_file_network_open_info(rsp, fp, rsp_org);
+ rc = get_file_network_open_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_NETWORK_OPEN_INFORMATION_SIZE;
break;
case FILE_EA_INFORMATION:
- get_file_ea_info(rsp, rsp_org);
+ get_file_ea_info(rsp, work->response_buf);
file_infoclass_size = FILE_EA_INFORMATION_SIZE;
break;
case FILE_FULL_EA_INFORMATION:
- rc = smb2_get_ea(work, fp, req, rsp, rsp_org);
+ rc = smb2_get_ea(work, fp, req, rsp, work->response_buf);
file_infoclass_size = FILE_FULL_EA_INFORMATION_SIZE;
break;
case FILE_POSITION_INFORMATION:
- get_file_position_info(rsp, fp, rsp_org);
+ get_file_position_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_POSITION_INFORMATION_SIZE;
break;
case FILE_MODE_INFORMATION:
- get_file_mode_info(rsp, fp, rsp_org);
+ get_file_mode_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_MODE_INFORMATION_SIZE;
break;
case FILE_COMPRESSION_INFORMATION:
- get_file_compression_info(rsp, fp, rsp_org);
+ get_file_compression_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_COMPRESSION_INFORMATION_SIZE;
break;
case FILE_ATTRIBUTE_TAG_INFORMATION:
- rc = get_file_attribute_tag_info(rsp, fp, rsp_org);
+ rc = get_file_attribute_tag_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_ATTRIBUTE_TAG_INFORMATION_SIZE;
break;
case SMB_FIND_FILE_POSIX_INFO:
@@ -4802,7 +4810,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
rc = -EOPNOTSUPP;
} else {
- rc = find_file_posix_info(rsp, fp, rsp_org);
+ rc = find_file_posix_info(rsp, fp, work->response_buf);
file_infoclass_size = sizeof(struct smb311_posix_qinfo);
}
break;
@@ -4813,7 +4821,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
}
if (!rc)
rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
- rsp,
+ rsp, work->response_buf,
file_infoclass_size);
ksmbd_fd_put(work, fp);
return rc;
@@ -4821,7 +4829,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
static int smb2_get_info_filesystem(struct ksmbd_work *work,
struct smb2_query_info_req *req,
- struct smb2_query_info_rsp *rsp, void *rsp_org)
+ struct smb2_query_info_rsp *rsp)
{
struct ksmbd_session *sess = work->sess;
struct ksmbd_conn *conn = sess->conn;
@@ -4857,7 +4865,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->DeviceType = cpu_to_le32(stfs.f_type);
info->DeviceCharacteristics = cpu_to_le32(0x00000020);
rsp->OutputBufferLength = cpu_to_le32(8);
- inc_rfc1001_len(rsp_org, 8);
+ inc_rfc1001_len(work->response_buf, 8);
fs_infoclass_size = FS_DEVICE_INFORMATION_SIZE;
break;
}
@@ -4883,7 +4891,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->FileSystemNameLen = cpu_to_le32(len);
sz = sizeof(struct filesystem_attribute_info) - 2 + len;
rsp->OutputBufferLength = cpu_to_le32(sz);
- inc_rfc1001_len(rsp_org, sz);
+ inc_rfc1001_len(work->response_buf, sz);
fs_infoclass_size = FS_ATTRIBUTE_INFORMATION_SIZE;
break;
}
@@ -4891,11 +4899,18 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
{
struct filesystem_vol_info *info;
size_t sz;
+ unsigned int serial_crc = 0;
info = (struct filesystem_vol_info *)(rsp->Buffer);
info->VolumeCreationTime = 0;
+ serial_crc = crc32_le(serial_crc, share->name,
+ strlen(share->name));
+ serial_crc = crc32_le(serial_crc, share->path,
+ strlen(share->path));
+ serial_crc = crc32_le(serial_crc, ksmbd_netbios_name(),
+ strlen(ksmbd_netbios_name()));
/* Taking dummy value of serial number*/
- info->SerialNumber = cpu_to_le32(0xbc3ac512);
+ info->SerialNumber = cpu_to_le32(serial_crc);
len = smbConvertToUTF16((__le16 *)info->VolumeLabel,
share->name, PATH_MAX,
conn->local_nls, 0);
@@ -4904,7 +4919,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->Reserved = 0;
sz = sizeof(struct filesystem_vol_info) - 2 + len;
rsp->OutputBufferLength = cpu_to_le32(sz);
- inc_rfc1001_len(rsp_org, sz);
+ inc_rfc1001_len(work->response_buf, sz);
fs_infoclass_size = FS_VOLUME_INFORMATION_SIZE;
break;
}
@@ -4918,7 +4933,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->SectorsPerAllocationUnit = cpu_to_le32(1);
info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
rsp->OutputBufferLength = cpu_to_le32(24);
- inc_rfc1001_len(rsp_org, 24);
+ inc_rfc1001_len(work->response_buf, 24);
fs_infoclass_size = FS_SIZE_INFORMATION_SIZE;
break;
}
@@ -4935,7 +4950,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->SectorsPerAllocationUnit = cpu_to_le32(1);
info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
rsp->OutputBufferLength = cpu_to_le32(32);
- inc_rfc1001_len(rsp_org, 32);
+ inc_rfc1001_len(work->response_buf, 32);
fs_infoclass_size = FS_FULL_SIZE_INFORMATION_SIZE;
break;
}
@@ -4956,7 +4971,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->extended_info.rel_date = 0;
memcpy(info->extended_info.version_string, "1.1.0", strlen("1.1.0"));
rsp->OutputBufferLength = cpu_to_le32(64);
- inc_rfc1001_len(rsp_org, 64);
+ inc_rfc1001_len(work->response_buf, 64);
fs_infoclass_size = FS_OBJECT_ID_INFORMATION_SIZE;
break;
}
@@ -4977,7 +4992,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->ByteOffsetForSectorAlignment = 0;
info->ByteOffsetForPartitionAlignment = 0;
rsp->OutputBufferLength = cpu_to_le32(28);
- inc_rfc1001_len(rsp_org, 28);
+ inc_rfc1001_len(work->response_buf, 28);
fs_infoclass_size = FS_SECTOR_SIZE_INFORMATION_SIZE;
break;
}
@@ -4999,7 +5014,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->DefaultQuotaLimit = cpu_to_le64(SMB2_NO_FID);
info->Padding = 0;
rsp->OutputBufferLength = cpu_to_le32(48);
- inc_rfc1001_len(rsp_org, 48);
+ inc_rfc1001_len(work->response_buf, 48);
fs_infoclass_size = FS_CONTROL_INFORMATION_SIZE;
break;
}
@@ -5020,7 +5035,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->TotalFileNodes = cpu_to_le64(stfs.f_files);
info->FreeFileNodes = cpu_to_le64(stfs.f_ffree);
rsp->OutputBufferLength = cpu_to_le32(56);
- inc_rfc1001_len(rsp_org, 56);
+ inc_rfc1001_len(work->response_buf, 56);
fs_infoclass_size = FS_POSIX_INFORMATION_SIZE;
}
break;
@@ -5030,7 +5045,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
return -EOPNOTSUPP;
}
rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
- rsp,
+ rsp, work->response_buf,
fs_infoclass_size);
path_put(&path);
return rc;
@@ -5038,7 +5053,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
static int smb2_get_info_sec(struct ksmbd_work *work,
struct smb2_query_info_req *req,
- struct smb2_query_info_rsp *rsp, void *rsp_org)
+ struct smb2_query_info_rsp *rsp)
{
struct ksmbd_file *fp;
struct user_namespace *user_ns;
@@ -5065,7 +5080,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
secdesclen = sizeof(struct smb_ntsd);
rsp->OutputBufferLength = cpu_to_le32(secdesclen);
- inc_rfc1001_len(rsp_org, secdesclen);
+ inc_rfc1001_len(work->response_buf, secdesclen);
return 0;
}
@@ -5107,7 +5122,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
return rc;
rsp->OutputBufferLength = cpu_to_le32(secdesclen);
- inc_rfc1001_len(rsp_org, secdesclen);
+ inc_rfc1001_len(work->response_buf, secdesclen);
return 0;
}
@@ -5120,10 +5135,9 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
int smb2_query_info(struct ksmbd_work *work)
{
struct smb2_query_info_req *req;
- struct smb2_query_info_rsp *rsp, *rsp_org;
+ struct smb2_query_info_rsp *rsp;
int rc = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
ksmbd_debug(SMB, "GOT query info request\n");
@@ -5131,15 +5145,15 @@ int smb2_query_info(struct ksmbd_work *work)
switch (req->InfoType) {
case SMB2_O_INFO_FILE:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
- rc = smb2_get_info_file(work, req, rsp, (void *)rsp_org);
+ rc = smb2_get_info_file(work, req, rsp);
break;
case SMB2_O_INFO_FILESYSTEM:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILESYSTEM\n");
- rc = smb2_get_info_filesystem(work, req, rsp, (void *)rsp_org);
+ rc = smb2_get_info_filesystem(work, req, rsp);
break;
case SMB2_O_INFO_SECURITY:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
- rc = smb2_get_info_sec(work, req, rsp, (void *)rsp_org);
+ rc = smb2_get_info_sec(work, req, rsp);
break;
default:
ksmbd_debug(SMB, "InfoType %d not supported yet\n",
@@ -5164,7 +5178,7 @@ int smb2_query_info(struct ksmbd_work *work)
}
rsp->StructureSize = cpu_to_le16(9);
rsp->OutputBufferOffset = cpu_to_le16(72);
- inc_rfc1001_len(rsp_org, 8);
+ inc_rfc1001_len(work->response_buf, 8);
return 0;
}
@@ -5177,8 +5191,8 @@ int smb2_query_info(struct ksmbd_work *work)
static noinline int smb2_close_pipe(struct ksmbd_work *work)
{
u64 id;
- struct smb2_close_req *req = work->request_buf;
- struct smb2_close_rsp *rsp = work->response_buf;
+ struct smb2_close_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
id = le64_to_cpu(req->VolatileFileId);
ksmbd_session_rpc_close(work->sess, id);
@@ -5193,7 +5207,7 @@ static noinline int smb2_close_pipe(struct ksmbd_work *work)
rsp->AllocationSize = 0;
rsp->EndOfFile = 0;
rsp->Attributes = 0;
- inc_rfc1001_len(rsp, 60);
+ inc_rfc1001_len(work->response_buf, 60);
return 0;
}
@@ -5209,14 +5223,12 @@ int smb2_close(struct ksmbd_work *work)
u64 sess_id;
struct smb2_close_req *req;
struct smb2_close_rsp *rsp;
- struct smb2_close_rsp *rsp_org;
struct ksmbd_conn *conn = work->conn;
struct ksmbd_file *fp;
struct inode *inode;
u64 time;
int err = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (test_share_config_flag(work->tcon->share_conf,
@@ -5306,7 +5318,7 @@ out:
rsp->hdr.Status = STATUS_FILE_CLOSED;
smb2_set_err_rsp(work);
} else {
- inc_rfc1001_len(rsp_org, 60);
+ inc_rfc1001_len(work->response_buf, 60);
}
return 0;
@@ -5320,11 +5332,11 @@ out:
*/
int smb2_echo(struct ksmbd_work *work)
{
- struct smb2_echo_rsp *rsp = work->response_buf;
+ struct smb2_echo_rsp *rsp = smb2_get_msg(work->response_buf);
rsp->StructureSize = cpu_to_le16(4);
rsp->Reserved = 0;
- inc_rfc1001_len(rsp, 4);
+ inc_rfc1001_len(work->response_buf, 4);
return 0;
}
@@ -5566,14 +5578,14 @@ static int set_file_basic_info(struct ksmbd_file *fp,
if (file_info->Attributes) {
if (!S_ISDIR(inode->i_mode) &&
- file_info->Attributes & ATTR_DIRECTORY_LE) {
+ file_info->Attributes & FILE_ATTRIBUTE_DIRECTORY_LE) {
pr_err("can't change a file to a directory\n");
return -EINVAL;
}
- if (!(S_ISDIR(inode->i_mode) && file_info->Attributes == ATTR_NORMAL_LE))
+ if (!(S_ISDIR(inode->i_mode) && file_info->Attributes == FILE_ATTRIBUTE_NORMAL_LE))
fp->f_ci->m_fattr = file_info->Attributes |
- (fp->f_ci->m_fattr & ATTR_DIRECTORY_LE);
+ (fp->f_ci->m_fattr & FILE_ATTRIBUTE_DIRECTORY_LE);
}
if (test_share_config_flag(share, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS) &&
@@ -5794,9 +5806,7 @@ static int set_file_mode_info(struct ksmbd_file *fp,
mode = file_info->Mode;
- if ((mode & ~FILE_MODE_INFO_MASK) ||
- (mode & FILE_SYNCHRONOUS_IO_ALERT_LE &&
- mode & FILE_SYNCHRONOUS_IO_NONALERT_LE)) {
+ if ((mode & ~FILE_MODE_INFO_MASK)) {
pr_err("Mode is not valid : 0x%x\n", le32_to_cpu(mode));
return -EINVAL;
}
@@ -5943,14 +5953,13 @@ static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
int smb2_set_info(struct ksmbd_work *work)
{
struct smb2_set_info_req *req;
- struct smb2_set_info_rsp *rsp, *rsp_org;
+ struct smb2_set_info_rsp *rsp;
struct ksmbd_file *fp;
int rc = 0;
unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
ksmbd_debug(SMB, "Received set info request\n");
- rsp_org = work->response_buf;
if (work->next_smb2_rcv_hdr_off) {
req = ksmbd_req_buf_next(work);
rsp = ksmbd_resp_buf_next(work);
@@ -5961,8 +5970,8 @@ int smb2_set_info(struct ksmbd_work *work)
pid = work->compound_pfid;
}
} else {
- req = work->request_buf;
- rsp = work->response_buf;
+ req = smb2_get_msg(work->request_buf);
+ rsp = smb2_get_msg(work->response_buf);
}
if (!has_file_id(id)) {
@@ -6002,7 +6011,7 @@ int smb2_set_info(struct ksmbd_work *work)
goto err_out;
rsp->StructureSize = cpu_to_le16(2);
- inc_rfc1001_len(rsp_org, 2);
+ inc_rfc1001_len(work->response_buf, 2);
ksmbd_fd_put(work, fp);
return 0;
@@ -6042,12 +6051,12 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
int nbytes = 0, err;
u64 id;
struct ksmbd_rpc_command *rpc_resp;
- struct smb2_read_req *req = work->request_buf;
- struct smb2_read_rsp *rsp = work->response_buf;
+ struct smb2_read_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
id = le64_to_cpu(req->VolatileFileId);
- inc_rfc1001_len(rsp, 16);
+ inc_rfc1001_len(work->response_buf, 16);
rpc_resp = ksmbd_rpc_read(work->sess, id);
if (rpc_resp) {
if (rpc_resp->flags != KSMBD_RPC_OK) {
@@ -6066,7 +6075,7 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
rpc_resp->payload_sz);
nbytes = rpc_resp->payload_sz;
- work->resp_hdr_sz = get_rfc1002_len(rsp) + 4;
+ work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
work->aux_payload_sz = nbytes;
kvfree(rpc_resp);
}
@@ -6076,8 +6085,8 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
rsp->Reserved = 0;
rsp->DataLength = cpu_to_le32(nbytes);
rsp->DataRemaining = 0;
- rsp->Reserved2 = 0;
- inc_rfc1001_len(rsp, nbytes);
+ rsp->Flags = 0;
+ inc_rfc1001_len(work->response_buf, nbytes);
return 0;
out:
@@ -6127,14 +6136,13 @@ int smb2_read(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
struct smb2_read_req *req;
- struct smb2_read_rsp *rsp, *rsp_org;
+ struct smb2_read_rsp *rsp;
struct ksmbd_file *fp;
loff_t offset;
size_t length, mincount;
ssize_t nbytes = 0, remain_bytes = 0;
int err = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (test_share_config_flag(work->tcon->share_conf,
@@ -6215,11 +6223,11 @@ int smb2_read(struct ksmbd_work *work)
rsp->Reserved = 0;
rsp->DataLength = cpu_to_le32(nbytes);
rsp->DataRemaining = cpu_to_le32(remain_bytes);
- rsp->Reserved2 = 0;
- inc_rfc1001_len(rsp_org, 16);
- work->resp_hdr_sz = get_rfc1002_len(rsp_org) + 4;
+ rsp->Flags = 0;
+ inc_rfc1001_len(work->response_buf, 16);
+ work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
work->aux_payload_sz = nbytes;
- inc_rfc1001_len(rsp_org, nbytes);
+ inc_rfc1001_len(work->response_buf, nbytes);
ksmbd_fd_put(work, fp);
return 0;
@@ -6254,8 +6262,8 @@ out:
*/
static noinline int smb2_write_pipe(struct ksmbd_work *work)
{
- struct smb2_write_req *req = work->request_buf;
- struct smb2_write_rsp *rsp = work->response_buf;
+ struct smb2_write_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_write_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_rpc_command *rpc_resp;
u64 id = 0;
int err = 0, ret = 0;
@@ -6266,13 +6274,14 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
id = le64_to_cpu(req->VolatileFileId);
if (le16_to_cpu(req->DataOffset) ==
- (offsetof(struct smb2_write_req, Buffer) - 4)) {
+ offsetof(struct smb2_write_req, Buffer)) {
data_buf = (char *)&req->Buffer[0];
} else {
- if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
+ if ((u64)le16_to_cpu(req->DataOffset) + length >
+ get_rfc1002_len(work->request_buf)) {
pr_err("invalid write data offset %u, smb_len %u\n",
le16_to_cpu(req->DataOffset),
- get_rfc1002_len(req));
+ get_rfc1002_len(work->request_buf));
err = -EINVAL;
goto out;
}
@@ -6304,7 +6313,7 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
rsp->DataLength = cpu_to_le32(length);
rsp->DataRemaining = 0;
rsp->Reserved2 = 0;
- inc_rfc1001_len(rsp, 16);
+ inc_rfc1001_len(work->response_buf, 16);
return 0;
out:
if (err) {
@@ -6372,7 +6381,7 @@ static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
int smb2_write(struct ksmbd_work *work)
{
struct smb2_write_req *req;
- struct smb2_write_rsp *rsp, *rsp_org;
+ struct smb2_write_rsp *rsp;
struct ksmbd_file *fp = NULL;
loff_t offset;
size_t length;
@@ -6381,7 +6390,6 @@ int smb2_write(struct ksmbd_work *work)
bool writethrough = false;
int err = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) {
@@ -6424,13 +6432,14 @@ int smb2_write(struct ksmbd_work *work)
if (req->Channel != SMB2_CHANNEL_RDMA_V1 &&
req->Channel != SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
if (le16_to_cpu(req->DataOffset) ==
- (offsetof(struct smb2_write_req, Buffer) - 4)) {
+ offsetof(struct smb2_write_req, Buffer)) {
data_buf = (char *)&req->Buffer[0];
} else {
- if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
+ if ((u64)le16_to_cpu(req->DataOffset) + length >
+ get_rfc1002_len(work->request_buf)) {
pr_err("invalid write data offset %u, smb_len %u\n",
le16_to_cpu(req->DataOffset),
- get_rfc1002_len(req));
+ get_rfc1002_len(work->request_buf));
err = -EINVAL;
goto out;
}
@@ -6468,7 +6477,7 @@ int smb2_write(struct ksmbd_work *work)
rsp->DataLength = cpu_to_le32(nbytes);
rsp->DataRemaining = 0;
rsp->Reserved2 = 0;
- inc_rfc1001_len(rsp_org, 16);
+ inc_rfc1001_len(work->response_buf, 16);
ksmbd_fd_put(work, fp);
return 0;
@@ -6502,10 +6511,9 @@ out:
int smb2_flush(struct ksmbd_work *work)
{
struct smb2_flush_req *req;
- struct smb2_flush_rsp *rsp, *rsp_org;
+ struct smb2_flush_rsp *rsp;
int err;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n",
@@ -6519,7 +6527,7 @@ int smb2_flush(struct ksmbd_work *work)
rsp->StructureSize = cpu_to_le16(4);
rsp->Reserved = 0;
- inc_rfc1001_len(rsp_org, 4);
+ inc_rfc1001_len(work->response_buf, 4);
return 0;
out:
@@ -6540,7 +6548,7 @@ out:
int smb2_cancel(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_hdr *hdr = work->request_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
struct smb2_hdr *chdr;
struct ksmbd_work *cancel_work = NULL;
int canceled = 0;
@@ -6555,7 +6563,7 @@ int smb2_cancel(struct ksmbd_work *work)
spin_lock(&conn->request_lock);
list_for_each_entry(cancel_work, command_list,
async_request_entry) {
- chdr = cancel_work->request_buf;
+ chdr = smb2_get_msg(cancel_work->request_buf);
if (cancel_work->async_id !=
le64_to_cpu(hdr->Id.AsyncId))
@@ -6574,7 +6582,7 @@ int smb2_cancel(struct ksmbd_work *work)
spin_lock(&conn->request_lock);
list_for_each_entry(cancel_work, command_list, request_entry) {
- chdr = cancel_work->request_buf;
+ chdr = smb2_get_msg(cancel_work->request_buf);
if (chdr->MessageId != hdr->MessageId ||
cancel_work == work)
@@ -6709,8 +6717,8 @@ static inline bool lock_defer_pending(struct file_lock *fl)
*/
int smb2_lock(struct ksmbd_work *work)
{
- struct smb2_lock_req *req = work->request_buf;
- struct smb2_lock_rsp *rsp = work->response_buf;
+ struct smb2_lock_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_lock_rsp *rsp = smb2_get_msg(work->response_buf);
struct smb2_lock_element *lock_ele;
struct ksmbd_file *fp = NULL;
struct file_lock *flock = NULL;
@@ -7017,7 +7025,7 @@ skip:
ksmbd_debug(SMB, "successful in taking lock\n");
rsp->hdr.Status = STATUS_SUCCESS;
rsp->Reserved = 0;
- inc_rfc1001_len(rsp, 4);
+ inc_rfc1001_len(work->response_buf, 4);
ksmbd_fd_put(work, fp);
return 0;
@@ -7312,7 +7320,7 @@ static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
int ret = 0;
int dialect;
- if (in_buf_len < sizeof(struct validate_negotiate_info_req) +
+ if (in_buf_len < offsetof(struct validate_negotiate_info_req, Dialects) +
le16_to_cpu(neg_req->DialectCount) * sizeof(__le16))
return -EINVAL;
@@ -7435,9 +7443,9 @@ static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
old_fattr = fp->f_ci->m_fattr;
if (sparse->SetSparse)
- fp->f_ci->m_fattr |= ATTR_SPARSE_FILE_LE;
+ fp->f_ci->m_fattr |= FILE_ATTRIBUTE_SPARSE_FILE_LE;
else
- fp->f_ci->m_fattr &= ~ATTR_SPARSE_FILE_LE;
+ fp->f_ci->m_fattr &= ~FILE_ATTRIBUTE_SPARSE_FILE_LE;
if (fp->f_ci->m_fattr != old_fattr &&
test_share_config_flag(work->tcon->share_conf,
@@ -7490,13 +7498,12 @@ static int fsctl_request_resume_key(struct ksmbd_work *work,
int smb2_ioctl(struct ksmbd_work *work)
{
struct smb2_ioctl_req *req;
- struct smb2_ioctl_rsp *rsp, *rsp_org;
+ struct smb2_ioctl_rsp *rsp;
unsigned int cnt_code, nbytes = 0, out_buf_len, in_buf_len;
u64 id = KSMBD_NO_FID;
struct ksmbd_conn *conn = work->conn;
int ret = 0;
- rsp_org = work->response_buf;
if (work->next_smb2_rcv_hdr_off) {
req = ksmbd_req_buf_next(work);
rsp = ksmbd_resp_buf_next(work);
@@ -7506,8 +7513,8 @@ int smb2_ioctl(struct ksmbd_work *work)
id = work->compound_fid;
}
} else {
- req = work->request_buf;
- rsp = work->response_buf;
+ req = smb2_get_msg(work->request_buf);
+ rsp = smb2_get_msg(work->response_buf);
}
if (!has_file_id(id))
@@ -7787,7 +7794,7 @@ dup_ext_out:
rsp->Reserved = cpu_to_le16(0);
rsp->Flags = cpu_to_le32(0);
rsp->Reserved2 = cpu_to_le32(0);
- inc_rfc1001_len(rsp_org, 48 + nbytes);
+ inc_rfc1001_len(work->response_buf, 48 + nbytes);
return 0;
@@ -7814,8 +7821,8 @@ out:
*/
static void smb20_oplock_break_ack(struct ksmbd_work *work)
{
- struct smb2_oplock_break *req = work->request_buf;
- struct smb2_oplock_break *rsp = work->response_buf;
+ struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
+ struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_file *fp;
struct oplock_info *opinfo = NULL;
__le32 err = 0;
@@ -7922,7 +7929,7 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
rsp->Reserved2 = 0;
rsp->VolatileFid = cpu_to_le64(volatile_id);
rsp->PersistentFid = cpu_to_le64(persistent_id);
- inc_rfc1001_len(rsp, 24);
+ inc_rfc1001_len(work->response_buf, 24);
return;
err_out:
@@ -7958,8 +7965,8 @@ static int check_lease_state(struct lease *lease, __le32 req_state)
static void smb21_lease_break_ack(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_lease_ack *req = work->request_buf;
- struct smb2_lease_ack *rsp = work->response_buf;
+ struct smb2_lease_ack *req = smb2_get_msg(work->request_buf);
+ struct smb2_lease_ack *rsp = smb2_get_msg(work->response_buf);
struct oplock_info *opinfo;
__le32 err = 0;
int ret = 0;
@@ -8071,7 +8078,7 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
memcpy(rsp->LeaseKey, req->LeaseKey, 16);
rsp->LeaseState = lease_state;
rsp->LeaseDuration = 0;
- inc_rfc1001_len(rsp, 36);
+ inc_rfc1001_len(work->response_buf, 36);
return;
err_out:
@@ -8092,8 +8099,8 @@ err_out:
*/
int smb2_oplock_break(struct ksmbd_work *work)
{
- struct smb2_oplock_break *req = work->request_buf;
- struct smb2_oplock_break *rsp = work->response_buf;
+ struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
+ struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
switch (le16_to_cpu(req->StructureSize)) {
case OP_BREAK_STRUCT_SIZE_20:
@@ -8120,8 +8127,8 @@ int smb2_oplock_break(struct ksmbd_work *work)
*/
int smb2_notify(struct ksmbd_work *work)
{
- struct smb2_notify_req *req;
- struct smb2_notify_rsp *rsp;
+ struct smb2_change_notify_req *req;
+ struct smb2_change_notify_rsp *rsp;
WORK_BUFFERS(work, req, rsp);
@@ -8145,7 +8152,7 @@ int smb2_notify(struct ksmbd_work *work)
*/
bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command)
{
- struct smb2_hdr *rcv_hdr2 = work->request_buf;
+ struct smb2_hdr *rcv_hdr2 = smb2_get_msg(work->request_buf);
if ((rcv_hdr2->Flags & SMB2_FLAGS_SIGNED) &&
command != SMB2_NEGOTIATE_HE &&
@@ -8164,22 +8171,22 @@ bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command)
*/
int smb2_check_sign_req(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr, *hdr_org;
+ struct smb2_hdr *hdr;
char signature_req[SMB2_SIGNATURE_SIZE];
char signature[SMB2_HMACSHA256_SIZE];
struct kvec iov[1];
size_t len;
- hdr_org = hdr = work->request_buf;
+ hdr = smb2_get_msg(work->request_buf);
if (work->next_smb2_rcv_hdr_off)
hdr = ksmbd_req_buf_next(work);
if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
- len = be32_to_cpu(hdr_org->smb2_buf_length);
+ len = get_rfc1002_len(work->request_buf);
else if (hdr->NextCommand)
len = le32_to_cpu(hdr->NextCommand);
else
- len = be32_to_cpu(hdr_org->smb2_buf_length) -
+ len = get_rfc1002_len(work->request_buf) -
work->next_smb2_rcv_hdr_off;
memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE);
@@ -8207,25 +8214,26 @@ int smb2_check_sign_req(struct ksmbd_work *work)
*/
void smb2_set_sign_rsp(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr, *hdr_org;
+ struct smb2_hdr *hdr;
struct smb2_hdr *req_hdr;
char signature[SMB2_HMACSHA256_SIZE];
struct kvec iov[2];
size_t len;
int n_vec = 1;
- hdr_org = hdr = work->response_buf;
+ hdr = smb2_get_msg(work->response_buf);
if (work->next_smb2_rsp_hdr_off)
hdr = ksmbd_resp_buf_next(work);
req_hdr = ksmbd_req_buf_next(work);
if (!work->next_smb2_rsp_hdr_off) {
- len = get_rfc1002_len(hdr_org);
+ len = get_rfc1002_len(work->response_buf);
if (req_hdr->NextCommand)
len = ALIGN(len, 8);
} else {
- len = get_rfc1002_len(hdr_org) - work->next_smb2_rsp_hdr_off;
+ len = get_rfc1002_len(work->response_buf) -
+ work->next_smb2_rsp_hdr_off;
len = ALIGN(len, 8);
}
@@ -8261,23 +8269,23 @@ int smb3_check_sign_req(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
char *signing_key;
- struct smb2_hdr *hdr, *hdr_org;
+ struct smb2_hdr *hdr;
struct channel *chann;
char signature_req[SMB2_SIGNATURE_SIZE];
char signature[SMB2_CMACAES_SIZE];
struct kvec iov[1];
size_t len;
- hdr_org = hdr = work->request_buf;
+ hdr = smb2_get_msg(work->request_buf);
if (work->next_smb2_rcv_hdr_off)
hdr = ksmbd_req_buf_next(work);
if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
- len = be32_to_cpu(hdr_org->smb2_buf_length);
+ len = get_rfc1002_len(work->request_buf);
else if (hdr->NextCommand)
len = le32_to_cpu(hdr->NextCommand);
else
- len = be32_to_cpu(hdr_org->smb2_buf_length) -
+ len = get_rfc1002_len(work->request_buf) -
work->next_smb2_rcv_hdr_off;
if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
@@ -8318,8 +8326,7 @@ int smb3_check_sign_req(struct ksmbd_work *work)
void smb3_set_sign_rsp(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_hdr *req_hdr;
- struct smb2_hdr *hdr, *hdr_org;
+ struct smb2_hdr *req_hdr, *hdr;
struct channel *chann;
char signature[SMB2_CMACAES_SIZE];
struct kvec iov[2];
@@ -8327,18 +8334,19 @@ void smb3_set_sign_rsp(struct ksmbd_work *work)
size_t len;
char *signing_key;
- hdr_org = hdr = work->response_buf;
+ hdr = smb2_get_msg(work->response_buf);
if (work->next_smb2_rsp_hdr_off)
hdr = ksmbd_resp_buf_next(work);
req_hdr = ksmbd_req_buf_next(work);
if (!work->next_smb2_rsp_hdr_off) {
- len = get_rfc1002_len(hdr_org);
+ len = get_rfc1002_len(work->response_buf);
if (req_hdr->NextCommand)
len = ALIGN(len, 8);
} else {
- len = get_rfc1002_len(hdr_org) - work->next_smb2_rsp_hdr_off;
+ len = get_rfc1002_len(work->response_buf) -
+ work->next_smb2_rsp_hdr_off;
len = ALIGN(len, 8);
}
@@ -8391,7 +8399,7 @@ void smb3_preauth_hash_rsp(struct ksmbd_work *work)
if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&
conn->preauth_info)
- ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
+ ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
conn->preauth_info->Preauth_HashValue);
if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE && sess) {
@@ -8409,35 +8417,34 @@ void smb3_preauth_hash_rsp(struct ksmbd_work *work)
if (!hash_value)
return;
}
- ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
+ ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
hash_value);
}
}
-static void fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, char *old_buf,
- __le16 cipher_type)
+static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type)
{
- struct smb2_hdr *hdr = (struct smb2_hdr *)old_buf;
+ struct smb2_transform_hdr *tr_hdr = tr_buf + 4;
+ struct smb2_hdr *hdr = smb2_get_msg(old_buf);
unsigned int orig_len = get_rfc1002_len(old_buf);
- memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
+ memset(tr_buf, 0, sizeof(struct smb2_transform_hdr) + 4);
tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
- tr_hdr->Flags = cpu_to_le16(0x01);
+ tr_hdr->Flags = cpu_to_le16(TRANSFORM_FLAG_ENCRYPTED);
if (cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
cipher_type == SMB2_ENCRYPTION_AES256_GCM)
get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
else
get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
memcpy(&tr_hdr->SessionId, &hdr->SessionId, 8);
- inc_rfc1001_len(tr_hdr, sizeof(struct smb2_transform_hdr) - 4);
- inc_rfc1001_len(tr_hdr, orig_len);
+ inc_rfc1001_len(tr_buf, sizeof(struct smb2_transform_hdr));
+ inc_rfc1001_len(tr_buf, orig_len);
}
int smb3_encrypt_resp(struct ksmbd_work *work)
{
char *buf = work->response_buf;
- struct smb2_transform_hdr *tr_hdr;
struct kvec iov[3];
int rc = -ENOMEM;
int buf_size = 0, rq_nvec = 2 + (work->aux_payload_sz ? 1 : 0);
@@ -8445,15 +8452,15 @@ int smb3_encrypt_resp(struct ksmbd_work *work)
if (ARRAY_SIZE(iov) < rq_nvec)
return -ENOMEM;
- tr_hdr = kzalloc(sizeof(struct smb2_transform_hdr), GFP_KERNEL);
- if (!tr_hdr)
+ work->tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL);
+ if (!work->tr_buf)
return rc;
/* fill transform header */
- fill_transform_hdr(tr_hdr, buf, work->conn->cipher_type);
+ fill_transform_hdr(work->tr_buf, buf, work->conn->cipher_type);
- iov[0].iov_base = tr_hdr;
- iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+ iov[0].iov_base = work->tr_buf;
+ iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
buf_size += iov[0].iov_len - 4;
iov[1].iov_base = buf + 4;
@@ -8473,15 +8480,14 @@ int smb3_encrypt_resp(struct ksmbd_work *work)
return rc;
memmove(buf, iov[1].iov_base, iov[1].iov_len);
- tr_hdr->smb2_buf_length = cpu_to_be32(buf_size);
- work->tr_buf = tr_hdr;
+ *(__be32 *)work->tr_buf = cpu_to_be32(buf_size);
return rc;
}
bool smb3_is_transform_hdr(void *buf)
{
- struct smb2_transform_hdr *trhdr = buf;
+ struct smb2_transform_hdr *trhdr = smb2_get_msg(buf);
return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
}
@@ -8491,12 +8497,10 @@ int smb3_decrypt_req(struct ksmbd_work *work)
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess;
char *buf = work->request_buf;
- struct smb2_hdr *hdr;
unsigned int pdu_length = get_rfc1002_len(buf);
struct kvec iov[2];
- int buf_data_size = pdu_length + 4 -
- sizeof(struct smb2_transform_hdr);
- struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
+ int buf_data_size = pdu_length - sizeof(struct smb2_transform_hdr);
+ struct smb2_transform_hdr *tr_hdr = smb2_get_msg(buf);
int rc = 0;
if (buf_data_size < sizeof(struct smb2_hdr)) {
@@ -8518,16 +8522,15 @@ int smb3_decrypt_req(struct ksmbd_work *work)
}
iov[0].iov_base = buf;
- iov[0].iov_len = sizeof(struct smb2_transform_hdr);
- iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+ iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
+ iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr) + 4;
iov[1].iov_len = buf_data_size;
rc = ksmbd_crypt_message(conn, iov, 2, 0);
if (rc)
return rc;
memmove(buf + 4, iov[1].iov_base, buf_data_size);
- hdr = (struct smb2_hdr *)buf;
- hdr->smb2_buf_length = cpu_to_be32(buf_data_size);
+ *(__be32 *)buf = cpu_to_be32(buf_data_size);
return rc;
}
@@ -8535,7 +8538,7 @@ int smb3_decrypt_req(struct ksmbd_work *work)
bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_hdr *rsp = work->response_buf;
+ struct smb2_hdr *rsp = smb2_get_msg(work->response_buf);
if (conn->dialect < SMB30_PROT_ID)
return false;
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
index ff5a2f01d34ae5..4a3e4339d4c4f5 100644
--- a/fs/ksmbd/smb2pdu.h
+++ b/fs/ksmbd/smb2pdu.h
@@ -10,60 +10,6 @@
#include "ntlmssp.h"
#include "smbacl.h"
-/*
- * Note that, due to trying to use names similar to the protocol specifications,
- * there are many mixed case field names in the structures below. Although
- * this does not match typical Linux kernel style, it is necessary to be
- * able to match against the protocol specfication.
- *
- * SMB2 commands
- * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
- * (ie no useful data other than the SMB error code itself) and are marked such.
- * Knowing this helps avoid response buffer allocations and copy in some cases.
- */
-
-/* List of commands in host endian */
-#define SMB2_NEGOTIATE_HE 0x0000
-#define SMB2_SESSION_SETUP_HE 0x0001
-#define SMB2_LOGOFF_HE 0x0002 /* trivial request/resp */
-#define SMB2_TREE_CONNECT_HE 0x0003
-#define SMB2_TREE_DISCONNECT_HE 0x0004 /* trivial req/resp */
-#define SMB2_CREATE_HE 0x0005
-#define SMB2_CLOSE_HE 0x0006
-#define SMB2_FLUSH_HE 0x0007 /* trivial resp */
-#define SMB2_READ_HE 0x0008
-#define SMB2_WRITE_HE 0x0009
-#define SMB2_LOCK_HE 0x000A
-#define SMB2_IOCTL_HE 0x000B
-#define SMB2_CANCEL_HE 0x000C
-#define SMB2_ECHO_HE 0x000D
-#define SMB2_QUERY_DIRECTORY_HE 0x000E
-#define SMB2_CHANGE_NOTIFY_HE 0x000F
-#define SMB2_QUERY_INFO_HE 0x0010
-#define SMB2_SET_INFO_HE 0x0011
-#define SMB2_OPLOCK_BREAK_HE 0x0012
-
-/* The same list in little endian */
-#define SMB2_NEGOTIATE cpu_to_le16(SMB2_NEGOTIATE_HE)
-#define SMB2_SESSION_SETUP cpu_to_le16(SMB2_SESSION_SETUP_HE)
-#define SMB2_LOGOFF cpu_to_le16(SMB2_LOGOFF_HE)
-#define SMB2_TREE_CONNECT cpu_to_le16(SMB2_TREE_CONNECT_HE)
-#define SMB2_TREE_DISCONNECT cpu_to_le16(SMB2_TREE_DISCONNECT_HE)
-#define SMB2_CREATE cpu_to_le16(SMB2_CREATE_HE)
-#define SMB2_CLOSE cpu_to_le16(SMB2_CLOSE_HE)
-#define SMB2_FLUSH cpu_to_le16(SMB2_FLUSH_HE)
-#define SMB2_READ cpu_to_le16(SMB2_READ_HE)
-#define SMB2_WRITE cpu_to_le16(SMB2_WRITE_HE)
-#define SMB2_LOCK cpu_to_le16(SMB2_LOCK_HE)
-#define SMB2_IOCTL cpu_to_le16(SMB2_IOCTL_HE)
-#define SMB2_CANCEL cpu_to_le16(SMB2_CANCEL_HE)
-#define SMB2_ECHO cpu_to_le16(SMB2_ECHO_HE)
-#define SMB2_QUERY_DIRECTORY cpu_to_le16(SMB2_QUERY_DIRECTORY_HE)
-#define SMB2_CHANGE_NOTIFY cpu_to_le16(SMB2_CHANGE_NOTIFY_HE)
-#define SMB2_QUERY_INFO cpu_to_le16(SMB2_QUERY_INFO_HE)
-#define SMB2_SET_INFO cpu_to_le16(SMB2_SET_INFO_HE)
-#define SMB2_OPLOCK_BREAK cpu_to_le16(SMB2_OPLOCK_BREAK_HE)
-
/*Create Action Flags*/
#define FILE_SUPERSEDED 0x00000000
#define FILE_OPENED 0x00000001
@@ -96,9 +42,6 @@
/* SMB2 Max Credits */
#define SMB2_MAX_CREDITS 8192
-#define SMB2_CLIENT_GUID_SIZE 16
-#define SMB2_CREATE_GUID_SIZE 16
-
/* Maximum buffer size value we can send with 1 credit */
#define SMB2_MAX_BUFFER_SIZE 65536
@@ -107,9 +50,6 @@
/* BB FIXME - analyze following length BB */
#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
-#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) /* 'B''M''S' */
-#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
-
#define SMB21_DEFAULT_IOSIZE (1024 * 1024)
#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
#define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
@@ -117,78 +57,6 @@
#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
/*
- * SMB2 Header Definition
- *
- * "MBZ" : Must be Zero
- * "BB" : BugBug, Something to check/review/analyze later
- * "PDU" : "Protocol Data Unit" (ie a network "frame")
- *
- */
-
-#define __SMB2_HEADER_STRUCTURE_SIZE 64
-#define SMB2_HEADER_STRUCTURE_SIZE \
- cpu_to_le16(__SMB2_HEADER_STRUCTURE_SIZE)
-
-struct smb2_hdr {
- __be32 smb2_buf_length; /* big endian on wire */
- /*
- * length is only two or three bytes - with
- * one or two byte type preceding it that MBZ
- */
- __le32 ProtocolId; /* 0xFE 'S' 'M' 'B' */
- __le16 StructureSize; /* 64 */
- __le16 CreditCharge; /* MBZ */
- __le32 Status; /* Error from server */
- __le16 Command;
- __le16 CreditRequest; /* CreditResponse */
- __le32 Flags;
- __le32 NextCommand;
- __le64 MessageId;
- union {
- struct {
- __le32 ProcessId;
- __le32 TreeId;
- } __packed SyncId;
- __le64 AsyncId;
- } __packed Id;
- __le64 SessionId;
- __u8 Signature[16];
-} __packed;
-
-struct smb2_pdu {
- struct smb2_hdr hdr;
- __le16 StructureSize2; /* size of wct area (varies, request specific) */
-} __packed;
-
-#define SMB3_AES_CCM_NONCE 11
-#define SMB3_AES_GCM_NONCE 12
-
-struct smb2_transform_hdr {
- __be32 smb2_buf_length; /* big endian on wire */
- /*
- * length is only two or three bytes - with
- * one or two byte type preceding it that MBZ
- */
- __le32 ProtocolId; /* 0xFD 'S' 'M' 'B' */
- __u8 Signature[16];
- __u8 Nonce[16];
- __le32 OriginalMessageSize;
- __u16 Reserved1;
- __le16 Flags; /* EncryptionAlgorithm */
- __le64 SessionId;
-} __packed;
-
-/*
- * SMB2 flag definitions
- */
-#define SMB2_FLAGS_SERVER_TO_REDIR cpu_to_le32(0x00000001)
-#define SMB2_FLAGS_ASYNC_COMMAND cpu_to_le32(0x00000002)
-#define SMB2_FLAGS_RELATED_OPERATIONS cpu_to_le32(0x00000004)
-#define SMB2_FLAGS_SIGNED cpu_to_le32(0x00000008)
-#define SMB2_FLAGS_DFS_OPERATIONS cpu_to_le32(0x10000000)
-#define SMB2_FLAGS_REPLAY_OPERATIONS cpu_to_le32(0x20000000)
-
-/*
* Definitions for SMB2 Protocol Data Units (network frames)
*
* See MS-SMB2.PDF specification for protocol details.
@@ -209,425 +77,30 @@ struct smb2_err_rsp {
__u8 ErrorData[1]; /* variable length */
} __packed;
-struct smb2_negotiate_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 36 */
- __le16 DialectCount;
- __le16 SecurityMode;
- __le16 Reserved; /* MBZ */
- __le32 Capabilities;
- __u8 ClientGUID[SMB2_CLIENT_GUID_SIZE];
- /* In SMB3.02 and earlier next three were MBZ le64 ClientStartTime */
- __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
- __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
- __le16 Reserved2;
- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
-} __packed;
-
-/* SecurityMode flags */
-#define SMB2_NEGOTIATE_SIGNING_ENABLED_LE cpu_to_le16(0x0001)
-#define SMB2_NEGOTIATE_SIGNING_REQUIRED 0x0002
-#define SMB2_NEGOTIATE_SIGNING_REQUIRED_LE cpu_to_le16(0x0002)
-/* Capabilities flags */
-#define SMB2_GLOBAL_CAP_DFS 0x00000001
-#define SMB2_GLOBAL_CAP_LEASING 0x00000002 /* Resp only New to SMB2.1 */
-#define SMB2_GLOBAL_CAP_LARGE_MTU 0X00000004 /* Resp only New to SMB2.1 */
-#define SMB2_GLOBAL_CAP_MULTI_CHANNEL 0x00000008 /* New to SMB3 */
-#define SMB2_GLOBAL_CAP_PERSISTENT_HANDLES 0x00000010 /* New to SMB3 */
-#define SMB2_GLOBAL_CAP_DIRECTORY_LEASING 0x00000020 /* New to SMB3 */
-#define SMB2_GLOBAL_CAP_ENCRYPTION 0x00000040 /* New to SMB3 */
-/* Internal types */
-#define SMB2_NT_FIND 0x00100000
-#define SMB2_LARGE_FILES 0x00200000
-
-#define SMB311_SALT_SIZE 32
-/* Hash Algorithm Types */
-#define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001)
-
-#define PREAUTH_HASHVALUE_SIZE 64
-
struct preauth_integrity_info {
/* PreAuth integrity Hash ID */
__le16 Preauth_HashId;
/* PreAuth integrity Hash Value */
- __u8 Preauth_HashValue[PREAUTH_HASHVALUE_SIZE];
+ __u8 Preauth_HashValue[SMB2_PREAUTH_HASH_SIZE];
};
-/* offset is sizeof smb2_negotiate_rsp - 4 but rounded up to 8 bytes. */
+/* offset is sizeof smb2_negotiate_rsp but rounded up to 8 bytes. */
#ifdef CONFIG_SMB_SERVER_KERBEROS5
-/* sizeof(struct smb2_negotiate_rsp) - 4 =
+/* sizeof(struct smb2_negotiate_rsp) =
* header(64) + response(64) + GSS_LENGTH(96) + GSS_PADDING(0)
*/
#define OFFSET_OF_NEG_CONTEXT 0xe0
#else
-/* sizeof(struct smb2_negotiate_rsp) - 4 =
+/* sizeof(struct smb2_negotiate_rsp) =
* header(64) + response(64) + GSS_LENGTH(74) + GSS_PADDING(6)
*/
#define OFFSET_OF_NEG_CONTEXT 0xd0
#endif
-#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
-#define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
-#define SMB2_COMPRESSION_CAPABILITIES cpu_to_le16(3)
-#define SMB2_NETNAME_NEGOTIATE_CONTEXT_ID cpu_to_le16(5)
-#define SMB2_SIGNING_CAPABILITIES cpu_to_le16(8)
-#define SMB2_POSIX_EXTENSIONS_AVAILABLE cpu_to_le16(0x100)
-
-struct smb2_neg_context {
- __le16 ContextType;
- __le16 DataLength;
- __le32 Reserved;
- /* Followed by array of data */
-} __packed;
-
-struct smb2_preauth_neg_context {
- __le16 ContextType; /* 1 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 HashAlgorithmCount; /* 1 */
- __le16 SaltLength;
- __le16 HashAlgorithms; /* HashAlgorithms[0] since only one defined */
- __u8 Salt[SMB311_SALT_SIZE];
-} __packed;
-
-/* Encryption Algorithms Ciphers */
-#define SMB2_ENCRYPTION_AES128_CCM cpu_to_le16(0x0001)
-#define SMB2_ENCRYPTION_AES128_GCM cpu_to_le16(0x0002)
-#define SMB2_ENCRYPTION_AES256_CCM cpu_to_le16(0x0003)
-#define SMB2_ENCRYPTION_AES256_GCM cpu_to_le16(0x0004)
-
-struct smb2_encryption_neg_context {
- __le16 ContextType; /* 2 */
- __le16 DataLength;
- __le32 Reserved;
- /* CipherCount usally 2, but can be 3 when AES256-GCM enabled */
- __le16 CipherCount; /* AES-128-GCM and AES-128-CCM by default */
- __le16 Ciphers[];
-} __packed;
-
-#define SMB3_COMPRESS_NONE cpu_to_le16(0x0000)
-#define SMB3_COMPRESS_LZNT1 cpu_to_le16(0x0001)
-#define SMB3_COMPRESS_LZ77 cpu_to_le16(0x0002)
-#define SMB3_COMPRESS_LZ77_HUFF cpu_to_le16(0x0003)
-
-struct smb2_compression_ctx {
- __le16 ContextType; /* 3 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 CompressionAlgorithmCount;
- __u16 Padding;
- __le32 Reserved1;
- __le16 CompressionAlgorithms[];
-} __packed;
-
-#define POSIX_CTXT_DATA_LEN 16
-struct smb2_posix_neg_context {
- __le16 ContextType; /* 0x100 */
- __le16 DataLength;
- __le32 Reserved;
- __u8 Name[16]; /* POSIX ctxt GUID 93AD25509CB411E7B42383DE968BCD7C */
-} __packed;
-
-struct smb2_netname_neg_context {
- __le16 ContextType; /* 0x100 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 NetName[]; /* hostname of target converted to UCS-2 */
-} __packed;
-
-/* Signing algorithms */
-#define SIGNING_ALG_HMAC_SHA256 cpu_to_le16(0)
-#define SIGNING_ALG_AES_CMAC cpu_to_le16(1)
-#define SIGNING_ALG_AES_GMAC cpu_to_le16(2)
-
-struct smb2_signing_capabilities {
- __le16 ContextType; /* 8 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 SigningAlgorithmCount;
- __le16 SigningAlgorithms[];
-} __packed;
-
-struct smb2_negotiate_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 65 */
- __le16 SecurityMode;
- __le16 DialectRevision;
- __le16 NegotiateContextCount; /* Prior to SMB3.1.1 was Reserved & MBZ */
- __u8 ServerGUID[16];
- __le32 Capabilities;
- __le32 MaxTransactSize;
- __le32 MaxReadSize;
- __le32 MaxWriteSize;
- __le64 SystemTime; /* MBZ */
- __le64 ServerStartTime;
- __le16 SecurityBufferOffset;
- __le16 SecurityBufferLength;
- __le32 NegotiateContextOffset; /* Pre:SMB3.1.1 was reserved/ignored */
- __u8 Buffer[1]; /* variable length GSS security buffer */
-} __packed;
-
-/* Flags */
-#define SMB2_SESSION_REQ_FLAG_BINDING 0x01
-#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04
-
#define SMB2_SESSION_EXPIRED (0)
#define SMB2_SESSION_IN_PROGRESS BIT(0)
#define SMB2_SESSION_VALID BIT(1)
-/* Flags */
-#define SMB2_SESSION_REQ_FLAG_BINDING 0x01
-#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04
-
-struct smb2_sess_setup_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 25 */
- __u8 Flags;
- __u8 SecurityMode;
- __le32 Capabilities;
- __le32 Channel;
- __le16 SecurityBufferOffset;
- __le16 SecurityBufferLength;
- __le64 PreviousSessionId;
- __u8 Buffer[1]; /* variable length GSS security buffer */
-} __packed;
-
-/* Flags/Reserved for SMB3.1.1 */
-#define SMB2_SHAREFLAG_CLUSTER_RECONNECT 0x0001
-
-/* Currently defined SessionFlags */
-#define SMB2_SESSION_FLAG_IS_GUEST_LE cpu_to_le16(0x0001)
-#define SMB2_SESSION_FLAG_IS_NULL_LE cpu_to_le16(0x0002)
-#define SMB2_SESSION_FLAG_ENCRYPT_DATA_LE cpu_to_le16(0x0004)
-struct smb2_sess_setup_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 SessionFlags;
- __le16 SecurityBufferOffset;
- __le16 SecurityBufferLength;
- __u8 Buffer[1]; /* variable length GSS security buffer */
-} __packed;
-
-struct smb2_logoff_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_logoff_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_tree_connect_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 Reserved; /* Flags in SMB3.1.1 */
- __le16 PathOffset;
- __le16 PathLength;
- __u8 Buffer[1]; /* variable length */
-} __packed;
-
-struct smb2_tree_connect_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 16 */
- __u8 ShareType; /* see below */
- __u8 Reserved;
- __le32 ShareFlags; /* see below */
- __le32 Capabilities; /* see below */
- __le32 MaximalAccess;
-} __packed;
-
-/* Possible ShareType values */
-#define SMB2_SHARE_TYPE_DISK 0x01
-#define SMB2_SHARE_TYPE_PIPE 0x02
-#define SMB2_SHARE_TYPE_PRINT 0x03
-
-/*
- * Possible ShareFlags - exactly one and only one of the first 4 caching flags
- * must be set (any of the remaining, SHI1005, flags may be set individually
- * or in combination.
- */
-#define SMB2_SHAREFLAG_MANUAL_CACHING 0x00000000
-#define SMB2_SHAREFLAG_AUTO_CACHING 0x00000010
-#define SMB2_SHAREFLAG_VDO_CACHING 0x00000020
-#define SMB2_SHAREFLAG_NO_CACHING 0x00000030
-#define SHI1005_FLAGS_DFS 0x00000001
-#define SHI1005_FLAGS_DFS_ROOT 0x00000002
-#define SHI1005_FLAGS_RESTRICT_EXCLUSIVE_OPENS 0x00000100
-#define SHI1005_FLAGS_FORCE_SHARED_DELETE 0x00000200
-#define SHI1005_FLAGS_ALLOW_NAMESPACE_CACHING 0x00000400
-#define SHI1005_FLAGS_ACCESS_BASED_DIRECTORY_ENUM 0x00000800
-#define SHI1005_FLAGS_FORCE_LEVELII_OPLOCK 0x00001000
-#define SHI1005_FLAGS_ENABLE_HASH 0x00002000
-
-/* Possible share capabilities */
-#define SMB2_SHARE_CAP_DFS cpu_to_le32(0x00000008)
-
-struct smb2_tree_disconnect_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_tree_disconnect_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-#define ATTR_READONLY_LE cpu_to_le32(ATTR_READONLY)
-#define ATTR_HIDDEN_LE cpu_to_le32(ATTR_HIDDEN)
-#define ATTR_SYSTEM_LE cpu_to_le32(ATTR_SYSTEM)
-#define ATTR_DIRECTORY_LE cpu_to_le32(ATTR_DIRECTORY)
-#define ATTR_ARCHIVE_LE cpu_to_le32(ATTR_ARCHIVE)
-#define ATTR_NORMAL_LE cpu_to_le32(ATTR_NORMAL)
-#define ATTR_TEMPORARY_LE cpu_to_le32(ATTR_TEMPORARY)
-#define ATTR_SPARSE_FILE_LE cpu_to_le32(ATTR_SPARSE)
-#define ATTR_REPARSE_POINT_LE cpu_to_le32(ATTR_REPARSE)
-#define ATTR_COMPRESSED_LE cpu_to_le32(ATTR_COMPRESSED)
-#define ATTR_OFFLINE_LE cpu_to_le32(ATTR_OFFLINE)
-#define ATTR_NOT_CONTENT_INDEXED_LE cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
-#define ATTR_ENCRYPTED_LE cpu_to_le32(ATTR_ENCRYPTED)
-#define ATTR_INTEGRITY_STREAML_LE cpu_to_le32(0x00008000)
-#define ATTR_NO_SCRUB_DATA_LE cpu_to_le32(0x00020000)
-#define ATTR_MASK_LE cpu_to_le32(0x00007FB7)
-
-/* Oplock levels */
-#define SMB2_OPLOCK_LEVEL_NONE 0x00
-#define SMB2_OPLOCK_LEVEL_II 0x01
-#define SMB2_OPLOCK_LEVEL_EXCLUSIVE 0x08
-#define SMB2_OPLOCK_LEVEL_BATCH 0x09
-#define SMB2_OPLOCK_LEVEL_LEASE 0xFF
-/* Non-spec internal type */
-#define SMB2_OPLOCK_LEVEL_NOCHANGE 0x99
-
-/* Desired Access Flags */
-#define FILE_READ_DATA_LE cpu_to_le32(0x00000001)
-#define FILE_LIST_DIRECTORY_LE cpu_to_le32(0x00000001)
-#define FILE_WRITE_DATA_LE cpu_to_le32(0x00000002)
-#define FILE_ADD_FILE_LE cpu_to_le32(0x00000002)
-#define FILE_APPEND_DATA_LE cpu_to_le32(0x00000004)
-#define FILE_ADD_SUBDIRECTORY_LE cpu_to_le32(0x00000004)
-#define FILE_READ_EA_LE cpu_to_le32(0x00000008)
-#define FILE_WRITE_EA_LE cpu_to_le32(0x00000010)
-#define FILE_EXECUTE_LE cpu_to_le32(0x00000020)
-#define FILE_TRAVERSE_LE cpu_to_le32(0x00000020)
-#define FILE_DELETE_CHILD_LE cpu_to_le32(0x00000040)
-#define FILE_READ_ATTRIBUTES_LE cpu_to_le32(0x00000080)
-#define FILE_WRITE_ATTRIBUTES_LE cpu_to_le32(0x00000100)
-#define FILE_DELETE_LE cpu_to_le32(0x00010000)
-#define FILE_READ_CONTROL_LE cpu_to_le32(0x00020000)
-#define FILE_WRITE_DAC_LE cpu_to_le32(0x00040000)
-#define FILE_WRITE_OWNER_LE cpu_to_le32(0x00080000)
-#define FILE_SYNCHRONIZE_LE cpu_to_le32(0x00100000)
-#define FILE_ACCESS_SYSTEM_SECURITY_LE cpu_to_le32(0x01000000)
-#define FILE_MAXIMAL_ACCESS_LE cpu_to_le32(0x02000000)
-#define FILE_GENERIC_ALL_LE cpu_to_le32(0x10000000)
-#define FILE_GENERIC_EXECUTE_LE cpu_to_le32(0x20000000)
-#define FILE_GENERIC_WRITE_LE cpu_to_le32(0x40000000)
-#define FILE_GENERIC_READ_LE cpu_to_le32(0x80000000)
-#define DESIRED_ACCESS_MASK cpu_to_le32(0xF21F01FF)
-
-/* ShareAccess Flags */
-#define FILE_SHARE_READ_LE cpu_to_le32(0x00000001)
-#define FILE_SHARE_WRITE_LE cpu_to_le32(0x00000002)
-#define FILE_SHARE_DELETE_LE cpu_to_le32(0x00000004)
-#define FILE_SHARE_ALL_LE cpu_to_le32(0x00000007)
-
-/* CreateDisposition Flags */
-#define FILE_SUPERSEDE_LE cpu_to_le32(0x00000000)
-#define FILE_OPEN_LE cpu_to_le32(0x00000001)
-#define FILE_CREATE_LE cpu_to_le32(0x00000002)
-#define FILE_OPEN_IF_LE cpu_to_le32(0x00000003)
-#define FILE_OVERWRITE_LE cpu_to_le32(0x00000004)
-#define FILE_OVERWRITE_IF_LE cpu_to_le32(0x00000005)
-#define FILE_CREATE_MASK_LE cpu_to_le32(0x00000007)
-
-#define FILE_READ_DESIRED_ACCESS_LE (FILE_READ_DATA_LE | \
- FILE_READ_EA_LE | \
- FILE_GENERIC_READ_LE)
-#define FILE_WRITE_DESIRE_ACCESS_LE (FILE_WRITE_DATA_LE | \
- FILE_APPEND_DATA_LE | \
- FILE_WRITE_EA_LE | \
- FILE_WRITE_ATTRIBUTES_LE | \
- FILE_GENERIC_WRITE_LE)
-
-/* Impersonation Levels */
-#define IL_ANONYMOUS_LE cpu_to_le32(0x00000000)
-#define IL_IDENTIFICATION_LE cpu_to_le32(0x00000001)
-#define IL_IMPERSONATION_LE cpu_to_le32(0x00000002)
-#define IL_DELEGATE_LE cpu_to_le32(0x00000003)
-
-/* Create Context Values */
-#define SMB2_CREATE_EA_BUFFER "ExtA" /* extended attributes */
-#define SMB2_CREATE_SD_BUFFER "SecD" /* security descriptor */
-#define SMB2_CREATE_DURABLE_HANDLE_REQUEST "DHnQ"
-#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT "DHnC"
-#define SMB2_CREATE_ALLOCATION_SIZE "AlSi"
-#define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
-#define SMB2_CREATE_TIMEWARP_REQUEST "TWrp"
-#define SMB2_CREATE_QUERY_ON_DISK_ID "QFid"
-#define SMB2_CREATE_REQUEST_LEASE "RqLs"
-#define SMB2_CREATE_DURABLE_HANDLE_REQUEST_V2 "DH2Q"
-#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 "DH2C"
-#define SMB2_CREATE_APP_INSTANCE_ID "\x45\xBC\xA6\x6A\xEF\xA7\xF7\x4A\x90\x08\xFA\x46\x2E\x14\x4D\x74"
- #define SMB2_CREATE_APP_INSTANCE_VERSION "\xB9\x82\xD0\xB7\x3B\x56\x07\x4F\xA0\x7B\x52\x4A\x81\x16\xA0\x10"
-#define SVHDX_OPEN_DEVICE_CONTEXT 0x83CE6F1AD851E0986E34401CC9BCFCE9
-#define SMB2_CREATE_TAG_POSIX "\x93\xAD\x25\x50\x9C\xB4\x11\xE7\xB4\x23\x83\xDE\x96\x8B\xCD\x7C"
-
-struct smb2_create_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 57 */
- __u8 SecurityFlags;
- __u8 RequestedOplockLevel;
- __le32 ImpersonationLevel;
- __le64 SmbCreateFlags;
- __le64 Reserved;
- __le32 DesiredAccess;
- __le32 FileAttributes;
- __le32 ShareAccess;
- __le32 CreateDisposition;
- __le32 CreateOptions;
- __le16 NameOffset;
- __le16 NameLength;
- __le32 CreateContextsOffset;
- __le32 CreateContextsLength;
- __u8 Buffer[0];
-} __packed;
-
-struct smb2_create_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 89 */
- __u8 OplockLevel;
- __u8 Reserved;
- __le32 CreateAction;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize;
- __le64 EndofFile;
- __le32 FileAttributes;
- __le32 Reserved2;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 CreateContextsOffset;
- __le32 CreateContextsLength;
- __u8 Buffer[1];
-} __packed;
-
-struct create_context {
- __le32 Next;
- __le16 NameOffset;
- __le16 NameLength;
- __le16 Reserved;
- __le16 DataOffset;
- __le32 DataLength;
- __u8 Buffer[0];
-} __packed;
-
struct create_durable_req_v2 {
struct create_context ccontext;
__u8 Name[8];
@@ -743,22 +216,21 @@ struct create_posix_rsp {
#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE cpu_to_le32(0x02)
+#define SMB2_LEASE_KEY_SIZE 16
+
struct lease_context {
- __le64 LeaseKeyLow;
- __le64 LeaseKeyHigh;
+ __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
__le32 LeaseState;
__le32 LeaseFlags;
__le64 LeaseDuration;
} __packed;
struct lease_context_v2 {
- __le64 LeaseKeyLow;
- __le64 LeaseKeyHigh;
+ __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
__le32 LeaseState;
__le32 LeaseFlags;
__le64 LeaseDuration;
- __le64 ParentLeaseKeyLow;
- __le64 ParentLeaseKeyHigh;
+ __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
__le16 Epoch;
__le16 Reserved;
} __packed;
@@ -776,114 +248,12 @@ struct create_lease_v2 {
__u8 Pad[4];
} __packed;
-/* Currently defined values for close flags */
-#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001)
-struct smb2_close_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __le16 Flags;
- __le32 Reserved;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
-} __packed;
-
-struct smb2_close_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* 60 */
- __le16 Flags;
- __le32 Reserved;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
- __le64 EndOfFile;
- __le32 Attributes;
-} __packed;
-
-struct smb2_flush_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __le16 Reserved1;
- __le32 Reserved2;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
-} __packed;
-
-struct smb2_flush_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize;
- __le16 Reserved;
-} __packed;
-
struct smb2_buffer_desc_v1 {
__le64 offset;
__le32 token;
__le32 length;
} __packed;
-#define SMB2_CHANNEL_NONE cpu_to_le32(0x00000000)
-#define SMB2_CHANNEL_RDMA_V1 cpu_to_le32(0x00000001)
-#define SMB2_CHANNEL_RDMA_V1_INVALIDATE cpu_to_le32(0x00000002)
-
-struct smb2_read_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 49 */
- __u8 Padding; /* offset from start of SMB2 header to place read */
- __u8 Reserved;
- __le32 Length;
- __le64 Offset;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 MinimumCount;
- __le32 Channel; /* Reserved MBZ */
- __le32 RemainingBytes;
- __le16 ReadChannelInfoOffset; /* Reserved MBZ */
- __le16 ReadChannelInfoLength; /* Reserved MBZ */
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_read_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 17 */
- __u8 DataOffset;
- __u8 Reserved;
- __le32 DataLength;
- __le32 DataRemaining;
- __u32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
-/* For write request Flags field below the following flag is defined: */
-#define SMB2_WRITEFLAG_WRITE_THROUGH 0x00000001
-
-struct smb2_write_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 49 */
- __le16 DataOffset; /* offset from start of SMB2 header to write data */
- __le32 Length;
- __le64 Offset;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 Channel; /* Reserved MBZ */
- __le32 RemainingBytes;
- __le16 WriteChannelInfoOffset; /* Reserved MBZ */
- __le16 WriteChannelInfoLength; /* Reserved MBZ */
- __le32 Flags;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_write_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 17 */
- __u8 DataOffset;
- __u8 Reserved;
- __le32 DataLength;
- __le32 DataRemaining;
- __u32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
#define SMB2_0_IOCTL_IS_FSCTL 0x00000001
struct duplicate_extents_to_file {
@@ -1033,43 +403,6 @@ struct reparse_data_buffer {
__u8 DataBuffer[]; /* Variable Length */
} __packed;
-/* Completion Filter flags for Notify */
-#define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001
-#define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002
-#define FILE_NOTIFY_CHANGE_NAME 0x00000003
-#define FILE_NOTIFY_CHANGE_ATTRIBUTES 0x00000004
-#define FILE_NOTIFY_CHANGE_SIZE 0x00000008
-#define FILE_NOTIFY_CHANGE_LAST_WRITE 0x00000010
-#define FILE_NOTIFY_CHANGE_LAST_ACCESS 0x00000020
-#define FILE_NOTIFY_CHANGE_CREATION 0x00000040
-#define FILE_NOTIFY_CHANGE_EA 0x00000080
-#define FILE_NOTIFY_CHANGE_SECURITY 0x00000100
-#define FILE_NOTIFY_CHANGE_STREAM_NAME 0x00000200
-#define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400
-#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800
-
-/* Flags */
-#define SMB2_WATCH_TREE 0x0001
-
-struct smb2_notify_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 32 */
- __le16 Flags;
- __le32 OutputBufferLength;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __u32 CompletionFileter;
- __u32 Reserved;
-} __packed;
-
-struct smb2_notify_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
/* SMB2 Notify Action Flags */
#define FILE_ACTION_ADDED 0x00000001
#define FILE_ACTION_REMOVED 0x00000002
@@ -1528,7 +861,7 @@ struct smb2_file_pos_info {
__le64 CurrentByteOffset;
} __packed;
-#define FILE_MODE_INFO_MASK cpu_to_le32(0x0000103e)
+#define FILE_MODE_INFO_MASK cpu_to_le32(0x0000100e)
struct smb2_file_mode_info {
__le32 Mode;
@@ -1705,4 +1038,13 @@ int smb2_ioctl(struct ksmbd_work *work);
int smb2_oplock_break(struct ksmbd_work *work);
int smb2_notify(struct ksmbd_work *ksmbd_work);
+/*
+ * Get the body of the smb2 message excluding the 4 byte rfc1002 headers
+ * from request/response buffer.
+ */
+static inline void *smb2_get_msg(void *buf)
+{
+ return buf + 4;
+}
+
#endif /* _SMB2PDU_H */
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index 707490ab1f4c42..ef7f42b0290a8f 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -132,7 +132,7 @@ int ksmbd_lookup_protocol_idx(char *str)
*/
int ksmbd_verify_smb_message(struct ksmbd_work *work)
{
- struct smb2_hdr *smb2_hdr = work->request_buf + work->next_smb2_rcv_hdr_off;
+ struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work);
struct smb_hdr *hdr;
if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
@@ -239,14 +239,14 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count)
static int ksmbd_negotiate_smb_dialect(void *buf)
{
int smb_buf_length = get_rfc1002_len(buf);
- __le32 proto = ((struct smb2_hdr *)buf)->ProtocolId;
+ __le32 proto = ((struct smb2_hdr *)smb2_get_msg(buf))->ProtocolId;
if (proto == SMB2_PROTO_NUMBER) {
struct smb2_negotiate_req *req;
int smb2_neg_size =
- offsetof(struct smb2_negotiate_req, Dialects) - 4;
+ offsetof(struct smb2_negotiate_req, Dialects);
- req = (struct smb2_negotiate_req *)buf;
+ req = (struct smb2_negotiate_req *)smb2_get_msg(buf);
if (smb2_neg_size > smb_buf_length)
goto err_out;
@@ -445,11 +445,12 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
struct ksmbd_conn *conn = work->conn;
int ret;
- conn->dialect = ksmbd_negotiate_smb_dialect(work->request_buf);
+ conn->dialect =
+ ksmbd_negotiate_smb_dialect(work->request_buf);
ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
if (command == SMB2_NEGOTIATE_HE) {
- struct smb2_hdr *smb2_hdr = work->request_buf;
+ struct smb2_hdr *smb2_hdr = smb2_get_msg(work->request_buf);
if (smb2_hdr->ProtocolId != SMB2_PROTO_NUMBER) {
ksmbd_debug(SMB, "Downgrade to SMB1 negotiation\n");
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index 6e79e7577f6b7b..50590842b651e7 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -10,6 +10,7 @@
#include "glob.h"
#include "nterr.h"
+#include "../smbfs_common/smb2pdu.h"
#include "smb2pdu.h"
/* ksmbd's Specific ERRNO */
@@ -32,17 +33,6 @@
#define SMB302_VERSION_STRING "3.02"
#define SMB311_VERSION_STRING "3.1.1"
-/* Dialects */
-#define SMB10_PROT_ID 0x00
-#define SMB20_PROT_ID 0x0202
-#define SMB21_PROT_ID 0x0210
-/* multi-protocol negotiate request */
-#define SMB2X_PROT_ID 0x02FF
-#define SMB30_PROT_ID 0x0300
-#define SMB302_PROT_ID 0x0302
-#define SMB311_PROT_ID 0x0311
-#define BAD_PROT_ID 0xFFFF
-
#define SMB_ECHO_INTERVAL (60 * HZ)
#define CIFS_DEFAULT_IOSIZE (64 * 1024)
@@ -59,21 +49,6 @@
/*
* File Attribute flags
*/
-#define ATTR_READONLY 0x0001
-#define ATTR_HIDDEN 0x0002
-#define ATTR_SYSTEM 0x0004
-#define ATTR_VOLUME 0x0008
-#define ATTR_DIRECTORY 0x0010
-#define ATTR_ARCHIVE 0x0020
-#define ATTR_DEVICE 0x0040
-#define ATTR_NORMAL 0x0080
-#define ATTR_TEMPORARY 0x0100
-#define ATTR_SPARSE 0x0200
-#define ATTR_REPARSE 0x0400
-#define ATTR_COMPRESSED 0x0800
-#define ATTR_OFFLINE 0x1000
-#define ATTR_NOT_CONTENT_INDEXED 0x2000
-#define ATTR_ENCRYPTED 0x4000
#define ATTR_POSIX_SEMANTICS 0x01000000
#define ATTR_BACKUP_SEMANTICS 0x02000000
#define ATTR_DELETE_ON_CLOSE 0x04000000
@@ -82,23 +57,6 @@
#define ATTR_NO_BUFFERING 0x20000000
#define ATTR_WRITE_THROUGH 0x80000000
-#define ATTR_READONLY_LE cpu_to_le32(ATTR_READONLY)
-#define ATTR_HIDDEN_LE cpu_to_le32(ATTR_HIDDEN)
-#define ATTR_SYSTEM_LE cpu_to_le32(ATTR_SYSTEM)
-#define ATTR_DIRECTORY_LE cpu_to_le32(ATTR_DIRECTORY)
-#define ATTR_ARCHIVE_LE cpu_to_le32(ATTR_ARCHIVE)
-#define ATTR_NORMAL_LE cpu_to_le32(ATTR_NORMAL)
-#define ATTR_TEMPORARY_LE cpu_to_le32(ATTR_TEMPORARY)
-#define ATTR_SPARSE_FILE_LE cpu_to_le32(ATTR_SPARSE)
-#define ATTR_REPARSE_POINT_LE cpu_to_le32(ATTR_REPARSE)
-#define ATTR_COMPRESSED_LE cpu_to_le32(ATTR_COMPRESSED)
-#define ATTR_OFFLINE_LE cpu_to_le32(ATTR_OFFLINE)
-#define ATTR_NOT_CONTENT_INDEXED_LE cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
-#define ATTR_ENCRYPTED_LE cpu_to_le32(ATTR_ENCRYPTED)
-#define ATTR_INTEGRITY_STREAML_LE cpu_to_le32(0x00008000)
-#define ATTR_NO_SCRUB_DATA_LE cpu_to_le32(0x00020000)
-#define ATTR_MASK_LE cpu_to_le32(0x00007FB7)
-
/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
#define FILE_SUPPORTS_SPARSE_VDL 0x10000000 /* faster nonsparse extend */
#define FILE_SUPPORTS_BLOCK_REFCOUNTING 0x08000000 /* allow ioctl dup extents */
@@ -160,11 +118,6 @@
/* file_execute, file_read_attributes*/
/* write_dac, and delete. */
-#define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES)
-#define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
- | FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES)
-#define FILE_EXEC_RIGHTS (FILE_EXECUTE)
-
#define SET_FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA \
| FILE_READ_ATTRIBUTES \
| DELETE | READ_CONTROL | WRITE_DAC \
@@ -477,12 +430,6 @@ struct smb_version_cmds {
int (*proc)(struct ksmbd_work *swork);
};
-static inline size_t
-smb2_hdr_size_no_buflen(struct smb_version_values *vals)
-{
- return vals->header_size - 4;
-}
-
int ksmbd_min_protocol(void);
int ksmbd_max_protocol(void);
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index a2fd5a4d4cd5e4..7e57cbb0bb356c 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -484,7 +484,7 @@ static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
struct smb_direct_data_transfer *req =
(struct smb_direct_data_transfer *)recvmsg->packet;
struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
- + le32_to_cpu(req->data_offset) - 4);
+ + le32_to_cpu(req->data_offset));
ksmbd_debug(RDMA,
"CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
le16_to_cpu(req->credits_granted),
@@ -2043,7 +2043,6 @@ int ksmbd_rdma_destroy(void)
smb_direct_listener.cm_id = NULL;
if (smb_direct_wq) {
- flush_workqueue(smb_direct_wq);
destroy_workqueue(smb_direct_wq);
smb_direct_wq = NULL;
}
diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
index 835b384b089593..19d36393974ca9 100644
--- a/fs/ksmbd/vfs.c
+++ b/fs/ksmbd/vfs.c
@@ -1013,7 +1013,7 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
loff_t off, loff_t len)
{
smb_break_all_levII_oplock(work, fp, 1);
- if (fp->f_ci->m_fattr & ATTR_SPARSE_FILE_LE)
+ if (fp->f_ci->m_fattr & FILE_ATTRIBUTE_SPARSE_FILE_LE)
return vfs_fallocate(fp->filp,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
off, len);
@@ -1624,7 +1624,7 @@ void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat)
time = ksmbd_UnixTimeToNT(kstat->ctime);
info->ChangeTime = cpu_to_le64(time);
- if (ksmbd_kstat->file_attributes & ATTR_DIRECTORY_LE) {
+ if (ksmbd_kstat->file_attributes & FILE_ATTRIBUTE_DIRECTORY_LE) {
info->EndOfFile = 0;
info->AllocationSize = 0;
} else {
@@ -1654,9 +1654,9 @@ int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
* or that acl is disable in server's filesystem and the config is yes.
*/
if (S_ISDIR(ksmbd_kstat->kstat->mode))
- ksmbd_kstat->file_attributes = ATTR_DIRECTORY_LE;
+ ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_DIRECTORY_LE;
else
- ksmbd_kstat->file_attributes = ATTR_ARCHIVE_LE;
+ ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_ARCHIVE_LE;
if (test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
index b0d5b8feb4a36b..adf94a4f22fa6d 100644
--- a/fs/ksmbd/vfs.h
+++ b/fs/ksmbd/vfs.h
@@ -25,48 +25,9 @@ enum {
};
/* CreateOptions */
-/* Flag is set, it must not be a file , valid for directory only */
-#define FILE_DIRECTORY_FILE_LE cpu_to_le32(0x00000001)
-#define FILE_WRITE_THROUGH_LE cpu_to_le32(0x00000002)
-#define FILE_SEQUENTIAL_ONLY_LE cpu_to_le32(0x00000004)
-
-/* Should not buffer on server*/
-#define FILE_NO_INTERMEDIATE_BUFFERING_LE cpu_to_le32(0x00000008)
-/* MBZ */
-#define FILE_SYNCHRONOUS_IO_ALERT_LE cpu_to_le32(0x00000010)
-/* MBZ */
-#define FILE_SYNCHRONOUS_IO_NONALERT_LE cpu_to_le32(0x00000020)
-
-/* Flaf must not be set for directory */
-#define FILE_NON_DIRECTORY_FILE_LE cpu_to_le32(0x00000040)
-
-/* Should be zero */
#define CREATE_TREE_CONNECTION cpu_to_le32(0x00000080)
-#define FILE_COMPLETE_IF_OPLOCKED_LE cpu_to_le32(0x00000100)
-#define FILE_NO_EA_KNOWLEDGE_LE cpu_to_le32(0x00000200)
-#define FILE_OPEN_REMOTE_INSTANCE cpu_to_le32(0x00000400)
-
-/**
- * Doc says this is obsolete "open for recovery" flag should be zero
- * in any case.
- */
-#define CREATE_OPEN_FOR_RECOVERY cpu_to_le32(0x00000400)
-#define FILE_RANDOM_ACCESS_LE cpu_to_le32(0x00000800)
-#define FILE_DELETE_ON_CLOSE_LE cpu_to_le32(0x00001000)
-#define FILE_OPEN_BY_FILE_ID_LE cpu_to_le32(0x00002000)
-#define FILE_OPEN_FOR_BACKUP_INTENT_LE cpu_to_le32(0x00004000)
-#define FILE_NO_COMPRESSION_LE cpu_to_le32(0x00008000)
-
-/* Should be zero*/
-#define FILE_OPEN_REQUIRING_OPLOCK cpu_to_le32(0x00010000)
-#define FILE_DISALLOW_EXCLUSIVE cpu_to_le32(0x00020000)
#define FILE_RESERVE_OPFILTER_LE cpu_to_le32(0x00100000)
-#define FILE_OPEN_REPARSE_POINT_LE cpu_to_le32(0x00200000)
-#define FILE_OPEN_NO_RECALL_LE cpu_to_le32(0x00400000)
-/* Should be zero */
-#define FILE_OPEN_FOR_FREE_SPACE_QUERY_LE cpu_to_le32(0x00800000)
-#define CREATE_OPTIONS_MASK cpu_to_le32(0x00FFFFFF)
#define CREATE_OPTION_READONLY 0x10000000
/* system. NB not sent over wire */
#define CREATE_OPTION_SPECIAL 0x20000000
diff --git a/fs/libfs.c b/fs/libfs.c
index 51b4de3b3447fd..ba7438ab93710f 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -448,6 +448,30 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
}
EXPORT_SYMBOL(simple_rmdir);
+int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ bool old_is_dir = d_is_dir(old_dentry);
+ bool new_is_dir = d_is_dir(new_dentry);
+
+ if (old_dir != new_dir && old_is_dir != new_is_dir) {
+ if (old_is_dir) {
+ drop_nlink(old_dir);
+ inc_nlink(new_dir);
+ } else {
+ drop_nlink(new_dir);
+ inc_nlink(old_dir);
+ }
+ }
+ old_dir->i_ctime = old_dir->i_mtime =
+ new_dir->i_ctime = new_dir->i_mtime =
+ d_inode(old_dentry)->i_ctime =
+ d_inode(new_dentry)->i_ctime = current_time(old_dir);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(simple_rename_exchange);
+
int simple_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
@@ -455,9 +479,12 @@ int simple_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
struct inode *inode = d_inode(old_dentry);
int they_are_dirs = d_is_dir(old_dentry);
- if (flags & ~RENAME_NOREPLACE)
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
return -EINVAL;
+ if (flags & RENAME_EXCHANGE)
+ return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
+
if (!simple_empty(new_dentry))
return -ENOTEMPTY;
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 994ec22d40402b..9320a42dfaf973 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -230,7 +230,7 @@ static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async
/*
* Deal with the completion of writing the data to the cache. We have to clear
- * the PG_fscache bits on the pages involved and release the caller's ref.
+ * the PG_fscache bits on the folios involved and release the caller's ref.
*
* May be called in softirq mode and we inherit a ref from the caller.
*/
@@ -238,7 +238,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
bool was_async)
{
struct netfs_read_subrequest *subreq;
- struct page *page;
+ struct folio *folio;
pgoff_t unlocked = 0;
bool have_unlocked = false;
@@ -247,14 +247,14 @@ static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
- xas_for_each(&xas, page, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+ xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
/* We might have multiple writes from the same huge
- * page, but we mustn't unlock a page more than once.
+ * folio, but we mustn't unlock a folio more than once.
*/
- if (have_unlocked && page->index <= unlocked)
+ if (have_unlocked && folio_index(folio) <= unlocked)
continue;
- unlocked = page->index;
- end_page_fscache(page);
+ unlocked = folio_index(folio);
+ folio_end_fscache(folio);
have_unlocked = true;
}
}
@@ -367,18 +367,17 @@ static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
}
/*
- * Unlock the pages in a read operation. We need to set PG_fscache on any
- * pages we're going to write back before we unlock them.
+ * Unlock the folios in a read operation. We need to set PG_fscache on any
+ * folios we're going to write back before we unlock them.
*/
static void netfs_rreq_unlock(struct netfs_read_request *rreq)
{
struct netfs_read_subrequest *subreq;
- struct page *page;
+ struct folio *folio;
unsigned int iopos, account = 0;
pgoff_t start_page = rreq->start / PAGE_SIZE;
pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
bool subreq_failed = false;
- int i;
XA_STATE(xas, &rreq->mapping->i_pages, start_page);
@@ -403,9 +402,9 @@ static void netfs_rreq_unlock(struct netfs_read_request *rreq)
trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
rcu_read_lock();
- xas_for_each(&xas, page, last_page) {
- unsigned int pgpos = (page->index - start_page) * PAGE_SIZE;
- unsigned int pgend = pgpos + thp_size(page);
+ xas_for_each(&xas, folio, last_page) {
+ unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
+ unsigned int pgend = pgpos + folio_size(folio);
bool pg_failed = false;
for (;;) {
@@ -414,7 +413,7 @@ static void netfs_rreq_unlock(struct netfs_read_request *rreq)
break;
}
if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
- set_page_fscache(page);
+ folio_start_fscache(folio);
pg_failed |= subreq_failed;
if (pgend < iopos + subreq->len)
break;
@@ -433,17 +432,16 @@ static void netfs_rreq_unlock(struct netfs_read_request *rreq)
}
if (!pg_failed) {
- for (i = 0; i < thp_nr_pages(page); i++)
- flush_dcache_page(page);
- SetPageUptodate(page);
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
}
- if (!test_bit(NETFS_RREQ_DONT_UNLOCK_PAGES, &rreq->flags)) {
- if (page->index == rreq->no_unlock_page &&
- test_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags))
+ if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
+ if (folio_index(folio) == rreq->no_unlock_folio &&
+ test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
_debug("no unlock");
else
- unlock_page(page);
+ folio_unlock(folio);
}
}
rcu_read_unlock();
@@ -876,7 +874,6 @@ void netfs_readahead(struct readahead_control *ractl,
void *netfs_priv)
{
struct netfs_read_request *rreq;
- struct page *page;
unsigned int debug_index = 0;
int ret;
@@ -911,11 +908,11 @@ void netfs_readahead(struct readahead_control *ractl,
} while (rreq->submitted < rreq->len);
- /* Drop the refs on the pages here rather than in the cache or
+ /* Drop the refs on the folios here rather than in the cache or
* filesystem. The locks will be dropped in netfs_rreq_unlock().
*/
- while ((page = readahead_page(ractl)))
- put_page(page);
+ while (readahead_folio(ractl))
+ ;
/* If we decrement nr_rd_ops to 0, the ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_rd_ops))
@@ -935,7 +932,7 @@ EXPORT_SYMBOL(netfs_readahead);
/**
* netfs_readpage - Helper to manage a readpage request
* @file: The file to read from
- * @page: The page to read
+ * @folio: The folio to read
* @ops: The network filesystem's operations for the helper to use
* @netfs_priv: Private netfs data to be retained in the request
*
@@ -950,7 +947,7 @@ EXPORT_SYMBOL(netfs_readahead);
* This is usable whether or not caching is enabled.
*/
int netfs_readpage(struct file *file,
- struct page *page,
+ struct folio *folio,
const struct netfs_read_request_ops *ops,
void *netfs_priv)
{
@@ -958,23 +955,23 @@ int netfs_readpage(struct file *file,
unsigned int debug_index = 0;
int ret;
- _enter("%lx", page_index(page));
+ _enter("%lx", folio_index(folio));
rreq = netfs_alloc_read_request(ops, netfs_priv, file);
if (!rreq) {
if (netfs_priv)
- ops->cleanup(netfs_priv, page_file_mapping(page));
- unlock_page(page);
+ ops->cleanup(netfs_priv, folio_file_mapping(folio));
+ folio_unlock(folio);
return -ENOMEM;
}
- rreq->mapping = page_file_mapping(page);
- rreq->start = page_file_offset(page);
- rreq->len = thp_size(page);
+ rreq->mapping = folio_file_mapping(folio);
+ rreq->start = folio_file_pos(folio);
+ rreq->len = folio_size(folio);
if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
- unlock_page(page);
+ folio_unlock(folio);
goto out;
}
}
@@ -1012,40 +1009,40 @@ out:
EXPORT_SYMBOL(netfs_readpage);
/**
- * netfs_skip_page_read - prep a page for writing without reading first
- * @page: page being prepared
+ * netfs_skip_folio_read - prep a folio for writing without reading first
+ * @folio: The folio being prepared
* @pos: starting position for the write
* @len: length of write
*
* In some cases, write_begin doesn't need to read at all:
- * - full page write
- * - write that lies in a page that is completely beyond EOF
- * - write that covers the the page from start to EOF or beyond it
+ * - full folio write
+ * - write that lies in a folio that is completely beyond EOF
+ * - write that covers the folio from start to EOF or beyond it
*
* If any of these criteria are met, then zero out the unwritten parts
- * of the page and return true. Otherwise, return false.
+ * of the folio and return true. Otherwise, return false.
*/
-static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
+static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio_inode(folio);
loff_t i_size = i_size_read(inode);
- size_t offset = offset_in_thp(page, pos);
+ size_t offset = offset_in_folio(folio, pos);
- /* Full page write */
- if (offset == 0 && len >= thp_size(page))
+ /* Full folio write */
+ if (offset == 0 && len >= folio_size(folio))
return true;
- /* pos beyond last page in the file */
+ /* pos beyond last folio in the file */
if (pos - offset >= i_size)
goto zero_out;
- /* Write that covers from the start of the page to EOF or beyond */
+ /* Write that covers from the start of the folio to EOF or beyond */
if (offset == 0 && (pos + len) >= i_size)
goto zero_out;
return false;
zero_out:
- zero_user_segments(page, 0, offset, offset + len, thp_size(page));
+ zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio));
return true;
}
@@ -1054,9 +1051,9 @@ zero_out:
* @file: The file to read from
* @mapping: The mapping to read from
* @pos: File position at which the write will begin
- * @len: The length of the write (may extend beyond the end of the page chosen)
- * @flags: AOP_* flags
- * @_page: Where to put the resultant page
+ * @len: The length of the write (may extend beyond the end of the folio chosen)
+ * @aop_flags: AOP_* flags
+ * @_folio: Where to put the resultant folio
* @_fsdata: Place for the netfs to store a cookie
* @ops: The network filesystem's operations for the helper to use
* @netfs_priv: Private netfs data to be retained in the request
@@ -1072,37 +1069,41 @@ zero_out:
* issue_op, is mandatory.
*
* The check_write_begin() operation can be provided to check for and flush
- * conflicting writes once the page is grabbed and locked. It is passed a
+ * conflicting writes once the folio is grabbed and locked. It is passed a
* pointer to the fsdata cookie that gets returned to the VM to be passed to
* write_end. It is permitted to sleep. It should return 0 if the request
- * should go ahead; unlock the page and return -EAGAIN to cause the page to be
- * regot; or return an error.
+ * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
+ * be regot; or return an error.
*
* This is usable whether or not caching is enabled.
*/
int netfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int flags,
- struct page **_page, void **_fsdata,
+ loff_t pos, unsigned int len, unsigned int aop_flags,
+ struct folio **_folio, void **_fsdata,
const struct netfs_read_request_ops *ops,
void *netfs_priv)
{
struct netfs_read_request *rreq;
- struct page *page, *xpage;
+ struct folio *folio;
struct inode *inode = file_inode(file);
- unsigned int debug_index = 0;
+ unsigned int debug_index = 0, fgp_flags;
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
retry:
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
+ fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
+ if (aop_flags & AOP_FLAG_NOFS)
+ fgp_flags |= FGP_NOFS;
+ folio = __filemap_get_folio(mapping, index, fgp_flags,
+ mapping_gfp_mask(mapping));
+ if (!folio)
return -ENOMEM;
if (ops->check_write_begin) {
/* Allow the netfs (eg. ceph) to flush conflicts. */
- ret = ops->check_write_begin(file, pos, len, page, _fsdata);
+ ret = ops->check_write_begin(file, pos, len, folio, _fsdata);
if (ret < 0) {
trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
if (ret == -EAGAIN)
@@ -1111,28 +1112,28 @@ retry:
}
}
- if (PageUptodate(page))
- goto have_page;
+ if (folio_test_uptodate(folio))
+ goto have_folio;
/* If the page is beyond the EOF, we want to clear it - unless it's
* within the cache granule containing the EOF, in which case we need
* to preload the granule.
*/
if (!ops->is_cache_enabled(inode) &&
- netfs_skip_page_read(page, pos, len)) {
+ netfs_skip_folio_read(folio, pos, len)) {
netfs_stat(&netfs_n_rh_write_zskip);
- goto have_page_no_wait;
+ goto have_folio_no_wait;
}
ret = -ENOMEM;
rreq = netfs_alloc_read_request(ops, netfs_priv, file);
if (!rreq)
goto error;
- rreq->mapping = page->mapping;
- rreq->start = page_offset(page);
- rreq->len = thp_size(page);
- rreq->no_unlock_page = page->index;
- __set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags);
+ rreq->mapping = folio_file_mapping(folio);
+ rreq->start = folio_file_pos(folio);
+ rreq->len = folio_size(folio);
+ rreq->no_unlock_folio = folio_index(folio);
+ __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
netfs_priv = NULL;
if (ops->begin_cache_operation) {
@@ -1147,14 +1148,14 @@ retry:
/* Expand the request to meet caching requirements and download
* preferences.
*/
- ractl._nr_pages = thp_nr_pages(page);
+ ractl._nr_pages = folio_nr_pages(folio);
netfs_rreq_expand(rreq, &ractl);
netfs_get_read_request(rreq);
- /* We hold the page locks, so we can drop the references */
- while ((xpage = readahead_page(&ractl)))
- if (xpage != page)
- put_page(xpage);
+ /* We hold the folio locks, so we can drop the references */
+ folio_get(folio);
+ while (readahead_folio(&ractl))
+ ;
atomic_set(&rreq->nr_rd_ops, 1);
do {
@@ -1184,22 +1185,22 @@ retry:
if (ret < 0)
goto error;
-have_page:
- ret = wait_on_page_fscache_killable(page);
+have_folio:
+ ret = folio_wait_fscache_killable(folio);
if (ret < 0)
goto error;
-have_page_no_wait:
+have_folio_no_wait:
if (netfs_priv)
ops->cleanup(netfs_priv, mapping);
- *_page = page;
+ *_folio = folio;
_leave(" = 0");
return 0;
error_put:
netfs_put_read_request(rreq, false);
error:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (netfs_priv)
ops->cleanup(netfs_priv, mapping);
_leave(" = %d", ret);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b9614db48b1def..f243cb5e6a4fb5 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -218,7 +218,7 @@ static int zbufsize_842(size_t size)
#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
static int zbufsize_zstd(size_t size)
{
- return ZSTD_compressBound(size);
+ return zstd_compress_bound(size);
}
#endif
diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
index 0015cf8b55820b..c40445dbf38c77 100644
--- a/fs/squashfs/zstd_wrapper.c
+++ b/fs/squashfs/zstd_wrapper.c
@@ -34,7 +34,7 @@ static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
goto failed;
wksp->window_size = max_t(size_t,
msblk->block_size, SQUASHFS_METADATA_SIZE);
- wksp->mem_size = ZSTD_DStreamWorkspaceBound(wksp->window_size);
+ wksp->mem_size = zstd_dstream_workspace_bound(wksp->window_size);
wksp->mem = vmalloc(wksp->mem_size);
if (wksp->mem == NULL)
goto failed;
@@ -63,15 +63,15 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct squashfs_page_actor *output)
{
struct workspace *wksp = strm;
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
size_t total_out = 0;
int error = 0;
- ZSTD_inBuffer in_buf = { NULL, 0, 0 };
- ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+ zstd_in_buffer in_buf = { NULL, 0, 0 };
+ zstd_out_buffer out_buf = { NULL, 0, 0 };
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
- stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size);
+ stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);
if (!stream) {
ERROR("Failed to initialize zstd decompressor\n");
@@ -116,14 +116,14 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
total_out -= out_buf.pos;
- zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ zstd_err = zstd_decompress_stream(stream, &out_buf, &in_buf);
total_out += out_buf.pos; /* add the additional data produced */
if (zstd_err == 0)
break;
- if (ZSTD_isError(zstd_err)) {
+ if (zstd_is_error(zstd_err)) {
ERROR("zstd decompression error: %d\n",
- (int)ZSTD_getErrorCode(zstd_err));
+ (int)zstd_get_error_code(zstd_err));
error = -EIO;
break;
}
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index aafd07388eb7bf..b84693fbd2b504 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -24,6 +24,8 @@
#ifndef DRM_MODESET_LOCK_H_
#define DRM_MODESET_LOCK_H_
+#include <linux/types.h> /* stackdepot.h is not self-contained */
+#include <linux/stackdepot.h>
#include <linux/ww_mutex.h>
struct drm_modeset_lock;
@@ -52,6 +54,12 @@ struct drm_modeset_acquire_ctx {
struct drm_modeset_lock *contended;
/*
+ * Stack depot for debugging when a contended lock was not backed off
+ * from.
+ */
+ depot_stack_handle_t stack_depot;
+
+ /*
* list of held locks (drm_modeset_lock)
*/
struct list_head locked;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 76d7c33884da17..cd785cfa312340 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -351,9 +351,10 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
- * @flags: Initial placement flags.
+ * @placement: Initial placement for buffer object.
* @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
+ * @sg: Scatter-gather table.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -394,7 +395,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
- * @flags: Initial placement flags.
+ * @placement: Initial placement for buffer object.
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
@@ -402,6 +403,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
+ * @sg: Scatter-gather table.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -582,8 +584,7 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
- pgoff_t num_prefault,
- pgoff_t fault_page_size);
+ pgoff_t num_prefault);
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 2be6dfd68df995..f715e8863f4de9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -484,6 +484,12 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
+static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
+ insn->src_reg == BPF_PSEUDO_FUNC;
+}
+
struct bpf_prog_ops {
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index bc2699feddbeb7..7ad6c3d0db7da1 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -302,6 +302,8 @@ enum {
CEPH_SESSION_REQUEST_FLUSH_MDLOG,
};
+#define CEPH_SESSION_BLOCKLISTED (1 << 0) /* session blocklisted */
+
extern const char *ceph_session_op_name(int op);
struct ceph_mds_session_head {
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 83fa08a0650710..3431011f364dd4 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -475,6 +475,14 @@ extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
u64 expected_object_size,
u64 expected_write_size,
u32 flags);
+extern int osd_req_op_copy_from_init(struct ceph_osd_request *req,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ u32 dst_fadvise_flags,
+ u32 truncate_seq, u64 truncate_size,
+ u8 copy_from_flags);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
@@ -515,17 +523,6 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
struct page *req_page, size_t req_len,
struct page **resp_pages, size_t *resp_len);
-int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
- u64 src_snapid, u64 src_version,
- struct ceph_object_id *src_oid,
- struct ceph_object_locator *src_oloc,
- u32 src_fadvise_flags,
- struct ceph_object_id *dst_oid,
- struct ceph_object_locator *dst_oloc,
- u32 dst_fadvise_flags,
- u32 truncate_seq, u64 truncate_size,
- u8 copy_from_flags);
-
/* watch/notify */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 8b6c20636a7925..dbd235ab447f96 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -170,15 +170,20 @@ struct dma_resv_iter {
/** @index: index into the shared fences */
unsigned int index;
- /** @fences: the shared fences */
+ /** @fences: the shared fences; private, *MUST* not dereference */
struct dma_resv_list *fences;
+ /** @shared_count: number of shared fences */
+ unsigned int shared_count;
+
/** @is_restarted: true if this is the first returned fence */
bool is_restarted;
};
struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
/**
* dma_resv_iter_begin - initialize a dma_resv_iter object
@@ -244,6 +249,24 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
for (fence = dma_resv_iter_first_unlocked(cursor); \
fence; fence = dma_resv_iter_next_unlocked(cursor))
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @all_fences: true if all fences should be returned
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * &dma_resv.lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator and the fence stays
+ * valid as long as the lock is held and so no extra reference to the fence is
+ * taken.
+ */
+#define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \
+ for (dma_resv_iter_begin(cursor, obj, all_fences), \
+ fence = dma_resv_iter_first(cursor); fence; \
+ fence = dma_resv_iter_next(cursor))
+
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
index d42010cf54682d..7ee708ad7df241 100644
--- a/include/linux/dsa/ocelot.h
+++ b/include/linux/dsa/ocelot.h
@@ -12,6 +12,7 @@
struct ocelot_skb_cb {
struct sk_buff *clone;
unsigned int ptp_class; /* valid only for clones */
+ u32 tstamp_lo;
u8 ptp_cmd;
u8 ts_id;
};
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 6b5d36babfcc44..dbd39b20e03457 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -362,6 +362,7 @@ void efi_native_runtime_setup(void);
/* OEM GUIDs */
#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55)
+#define AMD_SEV_MEM_ENCRYPT_GUID EFI_GUID(0x0cf29b71, 0x9e51, 0x433a, 0xa3, 0xb7, 0x81, 0xf3, 0xab, 0x16, 0xb8, 0x75)
typedef struct {
efi_guid_t guid;
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index 1e7bf78cb3829c..aba348d58ff61c 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -10,6 +10,9 @@
#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
+#define ETHTOOL_PAUSE_STAT_CNT (__ETHTOOL_A_PAUSE_STAT_CNT - \
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES)
+
enum ethtool_multicast_groups {
ETHNL_MCGRP_MONITOR,
};
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 5950f8f5dc74d6..6f3db99ab990a1 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -262,7 +262,7 @@ struct fb_ops {
/* Draws a rectangle */
void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect);
- /* Copy data from area to another */
+ /* Copy data from area to another. Obsolete. */
void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region);
/* Draws a image to the display */
void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3afca821df32e5..1cb616fc11053b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3385,6 +3385,8 @@ extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
+extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename(struct user_namespace *, struct inode *,
struct dentry *, struct inode *, struct dentry *,
unsigned int);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 462634b4b48fb4..74c4102631130a 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -205,9 +205,9 @@ static inline dev_t disk_devt(struct gendisk *disk)
void disk_uevent(struct gendisk *disk, enum kobject_action action);
/* block/genhd.c */
-int device_add_disk(struct device *parent, struct gendisk *disk,
- const struct attribute_group **groups);
-static inline int add_disk(struct gendisk *disk)
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
+static inline int __must_check add_disk(struct gendisk *disk)
{
return device_add_disk(NULL, disk, NULL);
}
diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h
deleted file mode 100644
index ee1d44545f3041..00000000000000
--- a/include/linux/input/cy8ctmg110_pdata.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_CY8CTMG110_PDATA_H
-#define _LINUX_CY8CTMG110_PDATA_H
-
-struct cy8ctmg110_pdata
-{
- int reset_pin; /* Reset pin is wired to this GPIO (optional) */
-};
-
-#endif
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 9fd0ad80fef6cf..5f5965246877a3 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask);
/* Scoped access information. */
struct kcsan_scoped_access {
struct list_head list;
+ /* Access information. */
const volatile void *ptr;
size_t size;
int type;
+ /* Location where scoped access was set up. */
+ unsigned long ip;
};
/*
* Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 968b4c4fe65b7b..77755ac3e189bf 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -85,7 +85,7 @@
struct completion;
struct user;
-#ifdef CONFIG_PREEMPT_VOLUNTARY
+#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
extern int __cond_resched(void);
# define might_resched() __cond_resched()
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 60a35d9fe25981..9e0667e3723e91 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -150,7 +150,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_UNBLOCK 2
#define KVM_REQ_UNHALT 3
-#define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
@@ -617,6 +617,7 @@ struct kvm {
unsigned int max_halt_poll_ns;
u32 dirty_ring_size;
bool vm_bugged;
+ bool vm_dead;
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
struct notifier_block pm_notifier;
@@ -650,12 +651,19 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline void kvm_vm_dead(struct kvm *kvm)
+{
+ kvm->vm_dead = true;
+ kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
+}
+
static inline void kvm_vm_bugged(struct kvm *kvm)
{
kvm->vm_bugged = true;
- kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED);
+ kvm_vm_dead(kvm);
}
+
#define KVM_BUG(cond, kvm, fmt...) \
({ \
int __ret = (cond); \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2884383f171864..2a8404b26083c0 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -394,7 +394,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
@@ -427,6 +427,7 @@ enum {
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
+ ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index a9ac70ae01abf1..df8de62f4710f5 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -329,11 +329,11 @@ LSM_HOOK(int, 0, tun_dev_create, void)
LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
LSM_HOOK(int, 0, tun_dev_open, void *security)
-LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_endpoint *ep,
+LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc,
struct sk_buff *skb)
LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname,
struct sockaddr *address, int addrlen)
-LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_endpoint *ep,
+LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc,
struct sock *sk, struct sock *newsk)
#endif /* CONFIG_SECURITY_NETWORK */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 0bada4df23fce3..d45b6f6e27fdaf 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1027,9 +1027,9 @@
* Security hooks for SCTP
*
* @sctp_assoc_request:
- * Passes the @ep and @chunk->skb of the association INIT packet to
+ * Passes the @asoc and @chunk->skb of the association INIT packet to
* the security module.
- * @ep pointer to sctp endpoint structure.
+ * @asoc pointer to sctp association structure.
* @skb pointer to skbuff of association packet.
* Return 0 on success, error on failure.
* @sctp_bind_connect:
@@ -1047,9 +1047,9 @@
* Called whenever a new socket is created by accept(2) (i.e. a TCP
* style socket) or when a socket is 'peeled off' e.g userspace
* calls sctp_peeloff(3).
- * @ep pointer to current sctp endpoint structure.
+ * @asoc pointer to current sctp association structure.
* @sk pointer to current sock structure.
- * @sk pointer to new sock structure.
+ * @newsk pointer to new sock structure.
*
* Security hooks for Infiniband
*
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index eeb818c4fc7820..4850cc5bf81386 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page,
*/
#define MIGRATE_PFN_VALID (1UL << 0)
#define MIGRATE_PFN_MIGRATE (1UL << 1)
-#define MIGRATE_PFN_LOCKED (1UL << 2)
#define MIGRATE_PFN_WRITE (1UL << 3)
#define MIGRATE_PFN_SHIFT 6
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 12c4177f7703d8..ca0683b9e3d11c 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -166,13 +166,13 @@ struct netfs_read_request {
short error; /* 0 or error that occurred */
loff_t i_size; /* Size of the file */
loff_t start; /* Start position */
- pgoff_t no_unlock_page; /* Don't unlock this page after read */
+ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
refcount_t usage;
unsigned long flags;
#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
#define NETFS_RREQ_WRITE_TO_CACHE 1 /* Need to write to the cache */
-#define NETFS_RREQ_NO_UNLOCK_PAGE 2 /* Don't unlock no_unlock_page on completion */
-#define NETFS_RREQ_DONT_UNLOCK_PAGES 3 /* Don't unlock the pages on completion */
+#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
+#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
const struct netfs_read_request_ops *netfs_ops;
@@ -190,7 +190,7 @@ struct netfs_read_request_ops {
void (*issue_op)(struct netfs_read_subrequest *subreq);
bool (*is_still_valid)(struct netfs_read_request *rreq);
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
- struct page *page, void **_fsdata);
+ struct folio *folio, void **_fsdata);
void (*done)(struct netfs_read_request *rreq);
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
};
@@ -240,11 +240,11 @@ extern void netfs_readahead(struct readahead_control *,
const struct netfs_read_request_ops *,
void *);
extern int netfs_readpage(struct file *,
- struct page *,
+ struct folio *,
const struct netfs_read_request_ops *,
void *);
extern int netfs_write_begin(struct file *, struct address_space *,
- loff_t, unsigned int, unsigned int, struct page **,
+ loff_t, unsigned int, unsigned int, struct folio **,
void **,
const struct netfs_read_request_ops *,
void *);
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 43c638c51c1f58..119a0c9d2a8b50 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,9 +8,9 @@
extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
-extern void __reset_page_owner(struct page *page, unsigned int order);
+extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask);
+ unsigned short order, gfp_t gfp_mask);
extern void __split_page_owner(struct page *page, unsigned int nr);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
@@ -18,14 +18,14 @@ extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
if (static_branch_unlikely(&page_owner_inited))
__reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner(page, order, gfp_mask);
@@ -52,7 +52,7 @@ static inline void dump_page_owner(const struct page *page)
__dump_page_owner(page);
}
#else
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
}
static inline void set_page_owner(struct page *page,
@@ -60,7 +60,7 @@ static inline void set_page_owner(struct page *page,
{
}
static inline void split_page_owner(struct page *page,
- unsigned int order)
+ unsigned short order)
{
}
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 6a30916b76e541..1a0c646eb6ff7c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -253,6 +253,20 @@ static inline struct address_space *page_mapping_file(struct page *page)
return folio_mapping(folio);
}
+/**
+ * folio_inode - Get the host inode for this folio.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the inode that this folio
+ * belongs to.
+ *
+ * Do not call this for folios which aren't in the page cache.
+ */
+static inline struct inode *folio_inode(struct folio *folio)
+{
+ return folio->mapping->host;
+}
+
static inline bool page_cache_add_speculative(struct page *page, int count)
{
VM_BUG_ON_PAGE(PageTail(page), page);
@@ -280,6 +294,25 @@ static inline void folio_attach_private(struct folio *folio, void *data)
}
/**
+ * folio_change_private - Change private data on a folio.
+ * @folio: Folio to change the data on.
+ * @data: Data to set on the folio.
+ *
+ * Change the private data attached to a folio and return the old
+ * data. The page must previously have had data attached and the data
+ * must be detached before the folio will be freed.
+ *
+ * Return: Data that was previously attached to the folio.
+ */
+static inline void *folio_change_private(struct folio *folio, void *data)
+{
+ void *old = folio_get_private(folio);
+
+ folio->private = data;
+ return old;
+}
+
+/**
* folio_detach_private - Detach private data from a folio.
* @folio: Folio to detach data from.
*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d0dba7fd98481f..18a75c8e615cdb 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -344,6 +344,7 @@ struct pci_dev {
u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
+ struct pci_driver *driver; /* Driver bound to this device */
u64 dma_mask; /* Mask of the bits of bus address this
device implements. Normally this is
0xffffffff. You only need to change
@@ -1667,6 +1668,7 @@ void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
+void pci_dev_lock(struct pci_dev *dev);
int pci_dev_trylock(struct pci_dev *dev);
void pci_dev_unlock(struct pci_dev *dev);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 725c9b784e609d..e6dac95e496092 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -429,16 +429,19 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
#else
static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline void pwm_free(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline int pwm_apply_state(struct pwm_device *pwm,
const struct pwm_state *state)
{
+ might_sleep();
return -ENOTSUPP;
}
@@ -450,6 +453,7 @@ static inline int pwm_adjust_config(struct pwm_device *pwm)
static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
int period_ns)
{
+ might_sleep();
return -EINVAL;
}
@@ -462,11 +466,13 @@ static inline int pwm_capture(struct pwm_device *pwm,
static inline int pwm_enable(struct pwm_device *pwm)
{
+ might_sleep();
return -EINVAL;
}
static inline void pwm_disable(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
@@ -493,12 +499,14 @@ static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
@@ -506,16 +514,19 @@ static inline struct pwm_device *of_pwm_get(struct device *dev,
struct device_node *np,
const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline void pwm_put(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline struct pwm_device *devm_pwm_get(struct device *dev,
const char *consumer)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
@@ -523,6 +534,7 @@ static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
struct device_node *np,
const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
@@ -530,6 +542,7 @@ static inline struct pwm_device *
devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode,
const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index bd611e26291d45..47fd1c2d3a579f 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -66,6 +66,8 @@ struct rtc_class_ops {
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
int (*read_offset)(struct device *, long *offset);
int (*set_offset)(struct device *, long offset);
+ int (*param_get)(struct device *, struct rtc_param *param);
+ int (*param_set)(struct device *, struct rtc_param *param);
};
struct rtc_device;
@@ -80,6 +82,7 @@ struct rtc_timer {
/* flags */
#define RTC_DEV_BUSY 0
+#define RTC_NO_CDEV 1
struct rtc_device {
struct device dev;
diff --git a/include/linux/security.h b/include/linux/security.h
index 7e0ba63b5dde27..bbf44a46683262 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -179,7 +179,7 @@ struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
-struct sctp_endpoint;
+struct sctp_association;
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
@@ -1425,10 +1425,10 @@ int security_tun_dev_create(void);
int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
-int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb);
+int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb);
int security_sctp_bind_connect(struct sock *sk, int optname,
struct sockaddr *address, int addrlen);
-void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
+void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk);
#else /* CONFIG_SECURITY_NETWORK */
@@ -1631,7 +1631,7 @@ static inline int security_tun_dev_open(void *security)
return 0;
}
-static inline int security_sctp_assoc_request(struct sctp_endpoint *ep,
+static inline int security_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
return 0;
@@ -1644,7 +1644,7 @@ static inline int security_sctp_bind_connect(struct sock *sk, int optname,
return 0;
}
-static inline void security_sctp_sk_clone(struct sctp_endpoint *ep,
+static inline void security_sctp_sk_clone(struct sctp_association *asoc,
struct sock *sk,
struct sock *newsk)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0bd6520329f6fd..686a666d073d51 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -454,9 +454,15 @@ enum {
* all frags to avoid possible bad checksum
*/
SKBFL_SHARED_FRAG = BIT(1),
+
+ /* segment contains only zerocopy data and should not be
+ * charged to the kernel memory.
+ */
+ SKBFL_PURE_ZEROCOPY = BIT(2),
};
#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
+#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY)
/*
* The callback notifies userspace to release buffers when skb DMA is done in
@@ -1464,6 +1470,17 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
return is_zcopy ? skb_uarg(skb) : NULL;
}
+static inline bool skb_zcopy_pure(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
+}
+
+static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
+ const struct sk_buff *skb2)
+{
+ return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
+}
+
static inline void net_zcopy_get(struct ubuf_info *uarg)
{
refcount_inc(&uarg->refcnt);
@@ -1528,7 +1545,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
if (!skb_zcopy_is_nouarg(skb))
uarg->callback(skb, uarg, zerocopy_success);
- skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
+ skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
}
}
@@ -1675,6 +1692,22 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
return 0;
}
+/* This variant of skb_unclone() makes sure skb->truesize is not changed */
+static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(gfpflags_allow_blocking(pri));
+
+ if (skb_cloned(skb)) {
+ unsigned int save = skb->truesize;
+ int res;
+
+ res = pskb_expand_head(skb, 0, 0, pri);
+ skb->truesize = save;
+ return res;
+ }
+ return 0;
+}
+
/**
* skb_header_cloned - is the header a clone
* @skb: buffer to check
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index b4256847c70791..584d94be9c8b06 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -507,6 +507,18 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return !!psock->saved_data_ready;
}
+static inline bool sk_is_tcp(const struct sock *sk)
+{
+ return sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+ return sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP;
+}
+
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
#define BPF_F_STRPARSER (1UL << 1)
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
index 1a5eaef3b7f272..d424c1aadf3821 100644
--- a/include/linux/spi/ads7846.h
+++ b/include/linux/spi/ads7846.h
@@ -1,17 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* linux/spi/ads7846.h */
-/* Touchscreen characteristics vary between boards and models. The
- * platform_data for the device's "struct device" holds this information.
- *
- * It's OK if the min/max values are zero.
- */
-enum ads7846_filter {
- ADS7846_FILTER_OK,
- ADS7846_FILTER_REPEAT,
- ADS7846_FILTER_IGNORE,
-};
-
struct ads7846_platform_data {
u16 model; /* 7843, 7845, 7846, 7873. */
u16 vref_delay_usecs; /* 0 for external vref; etc */
@@ -51,10 +40,6 @@ struct ads7846_platform_data {
int gpio_pendown_debounce; /* platform specific debounce time for
* the gpio_pendown */
int (*get_pendown_state)(void);
- int (*filter_init) (const struct ads7846_platform_data *pdata,
- void **filter_data);
- int (*filter) (void *filter_data, int data_idx, int *val);
- void (*filter_cleanup)(void *filter_data);
void (*wait_for_sync)(void);
bool wakeup;
unsigned long irq_flags;
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 2c1fc9212cf287..548a028f2dabb7 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -124,7 +124,6 @@ struct usb_hcd {
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
-#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -135,7 +134,6 @@ struct usb_hcd {
#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
-#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
/*
* Specifies if interfaces are authorized by default
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 1eaaa93c37bff4..329d63babaeb80 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -15,7 +15,7 @@
#else
#define MODULE_VERMAGIC_SMP ""
#endif
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_BUILD
#define MODULE_VERMAGIC_PREEMPT "preempt "
#elif defined(CONFIG_PREEMPT_RT)
#define MODULE_VERMAGIC_PREEMPT "preempt_rt "
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
index e87f78c9b19ccf..113408eef6ecef 100644
--- a/include/linux/zstd.h
+++ b/include/linux/zstd.h
@@ -1,138 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd) and
+ * the GPLv2 (found in the COPYING file in the root directory of
+ * https://github.com/facebook/zstd). You may select, at your option, one of the
+ * above-listed licenses.
*/
-#ifndef ZSTD_H
-#define ZSTD_H
+#ifndef LINUX_ZSTD_H
+#define LINUX_ZSTD_H
-/* ====== Dependency ======*/
-#include <linux/types.h> /* size_t */
+/**
+ * This is a kernel-style API that wraps the upstream zstd API, which cannot be
+ * used directly because the symbols aren't exported. It exposes the minimal
+ * functionality which is currently required by users of zstd in the kernel.
+ * Expose extra functions from lib/zstd/zstd.h as needed.
+ */
+/* ====== Dependency ====== */
+#include <linux/types.h>
+#include <linux/zstd_errors.h>
+#include <linux/zstd_lib.h>
-/*-*****************************************************************************
- * Introduction
+/* ====== Helper Functions ====== */
+/**
+ * zstd_compress_bound() - maximum compressed size in worst case scenario
+ * @src_size: The size of the data to compress.
*
- * zstd, short for Zstandard, is a fast lossless compression algorithm,
- * targeting real-time compression scenarios at zlib-level and better
- * compression ratios. The zstd compression library provides in-memory
- * compression and decompression functions. The library supports compression
- * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
- * ultra, should be used with caution, as they require more memory.
- * Compression can be done in:
- * - a single step, reusing a context (described as Explicit memory management)
- * - unbounded multiple steps (described as Streaming compression)
- * The compression ratio achievable on small data can be highly improved using
- * compression with a dictionary in:
- * - a single step (described as Simple dictionary API)
- * - a single step, reusing a dictionary (described as Fast dictionary API)
- ******************************************************************************/
-
-/*====== Helper functions ======*/
+ * Return: The maximum compressed size in the worst case scenario.
+ */
+size_t zstd_compress_bound(size_t src_size);
/**
- * enum ZSTD_ErrorCode - zstd error codes
+ * zstd_is_error() - tells if a size_t function result is an error code
+ * @code: The function result to check for error.
*
- * Functions that return size_t can be checked for errors using ZSTD_isError()
- * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
+ * Return: Non-zero iff the code is an error.
+ */
+unsigned int zstd_is_error(size_t code);
+
+/**
+ * enum zstd_error_code - zstd error codes
*/
-typedef enum {
- ZSTD_error_no_error,
- ZSTD_error_GENERIC,
- ZSTD_error_prefix_unknown,
- ZSTD_error_version_unsupported,
- ZSTD_error_parameter_unknown,
- ZSTD_error_frameParameter_unsupported,
- ZSTD_error_frameParameter_unsupportedBy32bits,
- ZSTD_error_frameParameter_windowTooLarge,
- ZSTD_error_compressionParameter_unsupported,
- ZSTD_error_init_missing,
- ZSTD_error_memory_allocation,
- ZSTD_error_stage_wrong,
- ZSTD_error_dstSize_tooSmall,
- ZSTD_error_srcSize_wrong,
- ZSTD_error_corruption_detected,
- ZSTD_error_checksum_wrong,
- ZSTD_error_tableLog_tooLarge,
- ZSTD_error_maxSymbolValue_tooLarge,
- ZSTD_error_maxSymbolValue_tooSmall,
- ZSTD_error_dictionary_corrupted,
- ZSTD_error_dictionary_wrong,
- ZSTD_error_dictionaryCreation_failed,
- ZSTD_error_maxCode
-} ZSTD_ErrorCode;
+typedef ZSTD_ErrorCode zstd_error_code;
/**
- * ZSTD_maxCLevel() - maximum compression level available
+ * zstd_get_error_code() - translates an error function result to an error code
+ * @code: The function result for which zstd_is_error(code) is true.
*
- * Return: Maximum compression level available.
+ * Return: A unique error code for this error.
*/
-int ZSTD_maxCLevel(void);
+zstd_error_code zstd_get_error_code(size_t code);
+
/**
- * ZSTD_compressBound() - maximum compressed size in worst case scenario
- * @srcSize: The size of the data to compress.
+ * zstd_get_error_name() - translates an error function result to a string
+ * @code: The function result for which zstd_is_error(code) is true.
*
- * Return: The maximum compressed size in the worst case scenario.
+ * Return: An error string corresponding to the error code.
*/
-size_t ZSTD_compressBound(size_t srcSize);
+const char *zstd_get_error_name(size_t code);
+
/**
- * ZSTD_isError() - tells if a size_t function result is an error code
- * @code: The function result to check for error.
+ * zstd_min_clevel() - minimum allowed compression level
*
- * Return: Non-zero iff the code is an error.
+ * Return: The minimum allowed compression level.
*/
-static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
-{
- return code > (size_t)-ZSTD_error_maxCode;
-}
+int zstd_min_clevel(void);
+
/**
- * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
- * @functionResult: The result of a function for which ZSTD_isError() is true.
+ * zstd_max_clevel() - maximum allowed compression level
*
- * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0
- * if the functionResult isn't an error.
+ * Return: The maximum allowed compression level.
*/
-static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
- size_t functionResult)
-{
- if (!ZSTD_isError(functionResult))
- return (ZSTD_ErrorCode)0;
- return (ZSTD_ErrorCode)(0 - functionResult);
-}
+int zstd_max_clevel(void);
+
+/* ====== Parameter Selection ====== */
/**
- * enum ZSTD_strategy - zstd compression search strategy
+ * enum zstd_strategy - zstd compression search strategy
*
- * From faster to stronger.
+ * From faster to stronger. See zstd_lib.h.
*/
-typedef enum {
- ZSTD_fast,
- ZSTD_dfast,
- ZSTD_greedy,
- ZSTD_lazy,
- ZSTD_lazy2,
- ZSTD_btlazy2,
- ZSTD_btopt,
- ZSTD_btopt2
-} ZSTD_strategy;
+typedef ZSTD_strategy zstd_strategy;
/**
- * struct ZSTD_compressionParameters - zstd compression parameters
+ * struct zstd_compression_parameters - zstd compression parameters
* @windowLog: Log of the largest match distance. Larger means more
* compression, and more memory needed during decompression.
- * @chainLog: Fully searched segment. Larger means more compression, slower,
- * and more memory (useless for fast).
+ * @chainLog: Fully searched segment. Larger means more compression,
+ * slower, and more memory (useless for fast).
* @hashLog: Dispatch table. Larger means more compression,
* slower, and more memory.
* @searchLog: Number of searches. Larger means more compression and slower.
@@ -141,1017 +100,348 @@ typedef enum {
* @targetLength: Acceptable match size for optimal parser (only). Larger means
* more compression, and slower.
* @strategy: The zstd compression strategy.
+ *
+ * See zstd_lib.h.
*/
-typedef struct {
- unsigned int windowLog;
- unsigned int chainLog;
- unsigned int hashLog;
- unsigned int searchLog;
- unsigned int searchLength;
- unsigned int targetLength;
- ZSTD_strategy strategy;
-} ZSTD_compressionParameters;
+typedef ZSTD_compressionParameters zstd_compression_parameters;
/**
- * struct ZSTD_frameParameters - zstd frame parameters
- * @contentSizeFlag: Controls whether content size will be present in the frame
- * header (when known).
- * @checksumFlag: Controls whether a 32-bit checksum is generated at the end
- * of the frame for error detection.
- * @noDictIDFlag: Controls whether dictID will be saved into the frame header
- * when using dictionary compression.
+ * struct zstd_frame_parameters - zstd frame parameters
+ * @contentSizeFlag: Controls whether content size will be present in the
+ * frame header (when known).
+ * @checksumFlag: Controls whether a 32-bit checksum is generated at the
+ * end of the frame for error detection.
+ * @noDictIDFlag: Controls whether dictID will be saved into the frame
+ * header when using dictionary compression.
*
- * The default value is all fields set to 0.
+ * The default value is all fields set to 0. See zstd_lib.h.
*/
-typedef struct {
- unsigned int contentSizeFlag;
- unsigned int checksumFlag;
- unsigned int noDictIDFlag;
-} ZSTD_frameParameters;
+typedef ZSTD_frameParameters zstd_frame_parameters;
/**
- * struct ZSTD_parameters - zstd parameters
+ * struct zstd_parameters - zstd parameters
* @cParams: The compression parameters.
* @fParams: The frame parameters.
*/
-typedef struct {
- ZSTD_compressionParameters cParams;
- ZSTD_frameParameters fParams;
-} ZSTD_parameters;
+typedef ZSTD_parameters zstd_parameters;
/**
- * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
- * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
- * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
- * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
+ * zstd_get_params() - returns zstd_parameters for selected level
+ * @level: The compression level
+ * @estimated_src_size: The estimated source size to compress or 0
+ * if unknown.
*
- * Return: The selected ZSTD_compressionParameters.
+ * Return: The selected zstd_parameters.
*/
-ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
- unsigned long long estimatedSrcSize, size_t dictSize);
+zstd_parameters zstd_get_params(int level,
+ unsigned long long estimated_src_size);
-/**
- * ZSTD_getParams() - returns ZSTD_parameters for selected level
- * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
- * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
- * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
- *
- * The same as ZSTD_getCParams() except also selects the default frame
- * parameters (all zero).
- *
- * Return: The selected ZSTD_parameters.
- */
-ZSTD_parameters ZSTD_getParams(int compressionLevel,
- unsigned long long estimatedSrcSize, size_t dictSize);
+/* ====== Single-pass Compression ====== */
-/*-*************************************
- * Explicit memory management
- **************************************/
+typedef ZSTD_CCtx zstd_cctx;
/**
- * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
- * @cParams: The compression parameters to be used for compression.
+ * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
+ * @parameters: The compression parameters to be used.
*
* If multiple compression parameters might be used, the caller must call
- * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
+ * zstd_cctx_workspace_bound() for each set of parameters and use the maximum
* size.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCCtx().
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_cctx().
*/
-size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
+size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
/**
- * struct ZSTD_CCtx - the zstd compression context
- *
- * When compressing many times it is recommended to allocate a context just once
- * and reuse it for each successive compression operation.
- */
-typedef struct ZSTD_CCtx_s ZSTD_CCtx;
-/**
- * ZSTD_initCCtx() - initialize a zstd compression context
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
- * determine how large the workspace must be.
- *
- * Return: A compression context emplaced into workspace.
- */
-ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
-
-/**
- * ZSTD_compressCCtx() - compress src into dst
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, ZSTD_parameters params);
-
-/**
- * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
- *
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDCtx().
- */
-size_t ZSTD_DCtxWorkspaceBound(void);
-
-/**
- * struct ZSTD_DCtx - the zstd decompression context
- *
- * When decompressing many times it is recommended to allocate a context just
- * once and reuse it for each successive decompression operation.
- */
-typedef struct ZSTD_DCtx_s ZSTD_DCtx;
-/**
- * ZSTD_initDCtx() - initialize a zstd decompression context
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
- * determine how large the workspace must be.
- *
- * Return: A decompression context emplaced into workspace.
- */
-ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
-
-/**
- * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- *
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-
-/*-************************
- * Simple dictionary API
- **************************/
-
-/**
- * ZSTD_compress_usingDict() - compress src into dst using a dictionary
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @dict: The dictionary to use for compression.
- * @dictSize: The size of the dictionary.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Compression using a predefined dictionary. The same dictionary must be used
- * during decompression.
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const void *dict, size_t dictSize,
- ZSTD_parameters params);
-
-/**
- * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- * @dict: The dictionary to use for decompression. The same dictionary
- * must've been used to compress the data.
- * @dictSize: The size of the dictionary.
- *
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const void *dict, size_t dictSize);
-
-/*-**************************
- * Fast dictionary API
- ***************************/
-
-/**
- * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
- * @cParams: The compression parameters to be used for compression.
+ * zstd_init_cctx() - initialize a zstd compression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace. Use zstd_cctx_workspace_bound() to
+ * determine how large the workspace must be.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCDict().
- */
-size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
-
-/**
- * struct ZSTD_CDict - a digested dictionary to be used for compression
+ * Return: A zstd compression context or NULL on error.
*/
-typedef struct ZSTD_CDict_s ZSTD_CDict;
+zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
/**
- * ZSTD_initCDict() - initialize a digested dictionary for compression
- * @dictBuffer: The dictionary to digest. The buffer is referenced by the
- * ZSTD_CDict so it must outlive the returned ZSTD_CDict.
- * @dictSize: The size of the dictionary.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- * @workspace: The workspace. It must outlive the returned ZSTD_CDict.
- * @workspaceSize: The workspace size. Must be at least
- * ZSTD_CDictWorkspaceBound(params.cParams).
+ * zstd_compress_cctx() - compress src into dst with the initialized parameters
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @dst: The buffer to compress src into.
+ * @dst_capacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @src_size: The size of the data to compress.
+ * @parameters: The compression parameters to be used.
*
- * When compressing multiple messages / blocks with the same dictionary it is
- * recommended to load it just once. The ZSTD_CDict merely references the
- * dictBuffer, so it must outlive the returned ZSTD_CDict.
- *
- * Return: The digested dictionary emplaced into workspace.
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
*/
-ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
- ZSTD_parameters params, void *workspace, size_t workspaceSize);
+size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size, const zstd_parameters *parameters);
-/**
- * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
- * @ctx: The context. Must have been initialized with a workspace at
- * least as large as ZSTD_CCtxWorkspaceBound(cParams) where
- * cParams are the compression parameters used to initialize the
- * cdict.
- * @dst: The buffer to compress src into.
- * @dstCapacity: The size of the destination buffer. May be any size, but
- * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
- * @src: The data to compress.
- * @srcSize: The size of the data to compress.
- * @cdict: The digested dictionary to use for compression.
- * @params: The parameters to use for compression. See ZSTD_getParams().
- *
- * Compression using a digested dictionary. The same dictionary must be used
- * during decompression.
- *
- * Return: The compressed size or an error, which can be checked using
- * ZSTD_isError().
- */
-size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize, const ZSTD_CDict *cdict);
+/* ====== Single-pass Decompression ====== */
+typedef ZSTD_DCtx zstd_dctx;
/**
- * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
+ * zstd_dctx_workspace_bound() - max memory needed to initialize a zstd_dctx
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDDict().
- */
-size_t ZSTD_DDictWorkspaceBound(void);
-
-/**
- * struct ZSTD_DDict - a digested dictionary to be used for decompression
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_dctx().
*/
-typedef struct ZSTD_DDict_s ZSTD_DDict;
+size_t zstd_dctx_workspace_bound(void);
/**
- * ZSTD_initDDict() - initialize a digested dictionary for decompression
- * @dictBuffer: The dictionary to digest. The buffer is referenced by the
- * ZSTD_DDict so it must outlive the returned ZSTD_DDict.
- * @dictSize: The size of the dictionary.
- * @workspace: The workspace. It must outlive the returned ZSTD_DDict.
- * @workspaceSize: The workspace size. Must be at least
- * ZSTD_DDictWorkspaceBound().
- *
- * When decompressing multiple messages / blocks with the same dictionary it is
- * recommended to load it just once. The ZSTD_DDict merely references the
- * dictBuffer, so it must outlive the returned ZSTD_DDict.
+ * zstd_init_dctx() - initialize a zstd decompression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace. Use zstd_dctx_workspace_bound() to
+ * determine how large the workspace must be.
*
- * Return: The digested dictionary emplaced into workspace.
+ * Return: A zstd decompression context or NULL on error.
*/
-ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
- void *workspace, size_t workspaceSize);
+zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
/**
- * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
- * @ctx: The decompression context.
- * @dst: The buffer to decompress src into.
- * @dstCapacity: The size of the destination buffer. Must be at least as large
- * as the decompressed size. If the caller cannot upper bound the
- * decompressed size, then it's better to use the streaming API.
- * @src: The zstd compressed data to decompress. Multiple concatenated
- * frames and skippable frames are allowed.
- * @srcSize: The exact size of the data to decompress.
- * @ddict: The digested dictionary to use for decompression. The same
- * dictionary must've been used to compress the data.
+ * zstd_decompress_dctx() - decompress zstd compressed src into dst
+ * @dctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dst_capacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @src_size: The exact size of the data to decompress.
*
- * Return: The decompressed size or an error, which can be checked using
- * ZSTD_isError().
+ * Return: The decompressed size or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
- size_t dstCapacity, const void *src, size_t srcSize,
- const ZSTD_DDict *ddict);
+size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size);
-
-/*-**************************
- * Streaming
- ***************************/
+/* ====== Streaming Buffers ====== */
/**
- * struct ZSTD_inBuffer - input buffer for streaming
+ * struct zstd_in_buffer - input buffer for streaming
* @src: Start of the input buffer.
* @size: Size of the input buffer.
* @pos: Position where reading stopped. Will be updated.
* Necessarily 0 <= pos <= size.
+ *
+ * See zstd_lib.h.
*/
-typedef struct ZSTD_inBuffer_s {
- const void *src;
- size_t size;
- size_t pos;
-} ZSTD_inBuffer;
+typedef ZSTD_inBuffer zstd_in_buffer;
/**
- * struct ZSTD_outBuffer - output buffer for streaming
+ * struct zstd_out_buffer - output buffer for streaming
* @dst: Start of the output buffer.
* @size: Size of the output buffer.
* @pos: Position where writing stopped. Will be updated.
* Necessarily 0 <= pos <= size.
+ *
+ * See zstd_lib.h.
*/
-typedef struct ZSTD_outBuffer_s {
- void *dst;
- size_t size;
- size_t pos;
-} ZSTD_outBuffer;
+typedef ZSTD_outBuffer zstd_out_buffer;
+/* ====== Streaming Compression ====== */
-
-/*-*****************************************************************************
- * Streaming compression - HowTo
- *
- * A ZSTD_CStream object is required to track streaming operation.
- * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
- * ZSTD_CStream objects can be reused multiple times on consecutive compression
- * operations. It is recommended to re-use ZSTD_CStream in situations where many
- * streaming operations will be achieved consecutively. Use one separate
- * ZSTD_CStream per thread for parallel execution.
- *
- * Use ZSTD_compressStream() repetitively to consume input stream.
- * The function will automatically update both `pos` fields.
- * Note that it may not consume the entire input, in which case `pos < size`,
- * and it's up to the caller to present again remaining data.
- * It returns a hint for the preferred number of bytes to use as an input for
- * the next function call.
- *
- * At any moment, it's possible to flush whatever data remains within internal
- * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
- * still be some content left within the internal buffer if `output->size` is
- * too small. It returns the number of bytes left in the internal buffer and
- * must be called until it returns 0.
- *
- * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
- * write frame epilogue. The epilogue is required for decoders to consider a
- * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
- * the full content if `output->size` is too small. In which case, call again
- * ZSTD_endStream() to complete the flush. It returns the number of bytes left
- * in the internal buffer and must be called until it returns 0.
- ******************************************************************************/
+typedef ZSTD_CStream zstd_cstream;
/**
- * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
- * @cParams: The compression parameters to be used for compression.
+ * zstd_cstream_workspace_bound() - memory needed to initialize a zstd_cstream
+ * @cparams: The compression parameters to be used for compression.
*
* Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
- */
-size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
-
-/**
- * struct ZSTD_CStream - the zstd streaming compression context
- */
-typedef struct ZSTD_CStream_s ZSTD_CStream;
-
-/*===== ZSTD_CStream management functions =====*/
-/**
- * ZSTD_initCStream() - initialize a zstd streaming compression context
- * @params: The zstd compression parameters.
- * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
- * pass the source size (zero means empty source). Otherwise,
- * the caller may optionally pass the source size, or zero if
- * unknown.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
- * how large the workspace must be.
- *
- * Return: The zstd streaming compression context.
+ * zstd_init_cstream().
*/
-ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
- unsigned long long pledgedSrcSize, void *workspace,
- size_t workspaceSize);
+size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
/**
- * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
- * @cdict: The digested dictionary to use for compression.
- * @pledgedSrcSize: Optionally the source size, or zero if unknown.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound()
- * with the cParams used to initialize the cdict to determine
- * how large the workspace must be.
+ * zstd_init_cstream() - initialize a zstd streaming compression context
+ * @parameters The zstd parameters to use for compression.
+ * @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller
+ * must pass the source size (zero means empty source).
+ * Otherwise, the caller may optionally pass the source
+ * size, or zero if unknown.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspace_size: The size of workspace.
+ * Use zstd_cstream_workspace_bound(params->cparams) to
+ * determine how large the workspace must be.
*
- * Return: The zstd streaming compression context.
+ * Return: The zstd streaming compression context or NULL on error.
*/
-ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
- unsigned long long pledgedSrcSize, void *workspace,
- size_t workspaceSize);
+zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
+ unsigned long long pledged_src_size, void *workspace, size_t workspace_size);
-/*===== Streaming compression functions =====*/
/**
- * ZSTD_resetCStream() - reset the context using parameters from creation
- * @zcs: The zstd streaming compression context to reset.
- * @pledgedSrcSize: Optionally the source size, or zero if unknown.
+ * zstd_reset_cstream() - reset the context using parameters from creation
+ * @cstream: The zstd streaming compression context to reset.
+ * @pledged_src_size: Optionally the source size, or zero if unknown.
*
* Resets the context using the parameters from creation. Skips dictionary
- * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
+ * loading, since it can be reused. If `pledged_src_size` is non-zero the frame
* content size is always written into the frame header.
*
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: Zero or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
+size_t zstd_reset_cstream(zstd_cstream *cstream,
+ unsigned long long pledged_src_size);
+
/**
- * ZSTD_compressStream() - streaming compress some of input into output
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
- * @input: Source buffer. `input->pos` is updated to indicate how much data was
- * read. Note that it may not consume the entire input, in which case
- * `input->pos < input->size`, and it's up to the caller to present
- * remaining data again.
+ * zstd_compress_stream() - streaming compress some of input into output
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
+ * @input: Source buffer. `input->pos` is updated to indicate how much data
+ * was read. Note that it may not consume the entire input, in which
+ * case `input->pos < input->size`, and it's up to the caller to
+ * present remaining data again.
*
* The `input` and `output` buffers may be any size. Guaranteed to make some
* forward progress if `input` and `output` are not empty.
*
- * Return: A hint for the number of bytes to use as the input for the next
- * function call or an error, which can be checked using
- * ZSTD_isError().
+ * Return: A hint for the number of bytes to use as the input for the next
+ * function call or an error, which can be checked using
+ * zstd_is_error().
*/
-size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
- ZSTD_inBuffer *input);
+size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
+ zstd_in_buffer *input);
+
/**
- * ZSTD_flushStream() - flush internal buffers into output
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
+ * zstd_flush_stream() - flush internal buffers into output
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
*
- * ZSTD_flushStream() must be called until it returns 0, meaning all the data
- * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
+ * zstd_flush_stream() must be called until it returns 0, meaning all the data
+ * has been flushed. Since zstd_flush_stream() causes a block to be ended,
* calling it too often will degrade the compression ratio.
*
- * Return: The number of bytes still present within internal buffers or an
- * error, which can be checked using ZSTD_isError().
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
-/**
- * ZSTD_endStream() - flush internal buffers into output and end the frame
- * @zcs: The zstd streaming compression context.
- * @output: Destination buffer. `output->pos` is updated to indicate how much
- * compressed data was written.
- *
- * ZSTD_endStream() must be called until it returns 0, meaning all the data has
- * been flushed and the frame epilogue has been written.
- *
- * Return: The number of bytes still present within internal buffers or an
- * error, which can be checked using ZSTD_isError().
- */
-size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
+size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output);
/**
- * ZSTD_CStreamInSize() - recommended size for the input buffer
- *
- * Return: The recommended size for the input buffer.
- */
-size_t ZSTD_CStreamInSize(void);
-/**
- * ZSTD_CStreamOutSize() - recommended size for the output buffer
+ * zstd_end_stream() - flush internal buffers into output and end the frame
+ * @cstream: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
*
- * When the output buffer is at least this large, it is guaranteed to be large
- * enough to flush at least one complete compressed block.
+ * zstd_end_stream() must be called until it returns 0, meaning all the data has
+ * been flushed and the frame epilogue has been written.
*
- * Return: The recommended size for the output buffer.
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_CStreamOutSize(void);
+size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output);
+/* ====== Streaming Decompression ====== */
-
-/*-*****************************************************************************
- * Streaming decompression - HowTo
- *
- * A ZSTD_DStream object is required to track streaming operations.
- * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
- * ZSTD_DStream objects can be re-used multiple times.
- *
- * Use ZSTD_decompressStream() repetitively to consume your input.
- * The function will update both `pos` fields.
- * If `input->pos < input->size`, some input has not been consumed.
- * It's up to the caller to present again remaining data.
- * If `output->pos < output->size`, decoder has flushed everything it could.
- * Returns 0 iff a frame is completely decoded and fully flushed.
- * Otherwise it returns a suggested next input size that will never load more
- * than the current frame.
- ******************************************************************************/
+typedef ZSTD_DStream zstd_dstream;
/**
- * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
- * @maxWindowSize: The maximum window size allowed for compressed frames.
+ * zstd_dstream_workspace_bound() - memory needed to initialize a zstd_dstream
+ * @max_window_size: The maximum window size allowed for compressed frames.
*
- * Return: A lower bound on the size of the workspace that is passed to
- * ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
+ * Return: A lower bound on the size of the workspace that is passed
+ * to zstd_init_dstream().
*/
-size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
+size_t zstd_dstream_workspace_bound(size_t max_window_size);
/**
- * struct ZSTD_DStream - the zstd streaming decompression context
- */
-typedef struct ZSTD_DStream_s ZSTD_DStream;
-/*===== ZSTD_DStream management functions =====*/
-/**
- * ZSTD_initDStream() - initialize a zstd streaming decompression context
- * @maxWindowSize: The maximum window size allowed for compressed frames.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
- * how large the workspace must be.
- *
- * Return: The zstd streaming decompression context.
- */
-ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
- size_t workspaceSize);
-/**
- * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
- * @maxWindowSize: The maximum window size allowed for compressed frames.
- * @ddict: The digested dictionary to use for decompression.
- * @workspace: The workspace to emplace the context into. It must outlive
- * the returned context.
- * @workspaceSize: The size of workspace.
- * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
- * how large the workspace must be.
+ * zstd_init_dstream() - initialize a zstd streaming decompression context
+ * @max_window_size: The maximum window size allowed for compressed frames.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace.
+ * Use zstd_dstream_workspace_bound(max_window_size) to
+ * determine how large the workspace must be.
*
- * Return: The zstd streaming decompression context.
+ * Return: The zstd streaming decompression context.
*/
-ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
- const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
+zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
+ size_t workspace_size);
-/*===== Streaming decompression functions =====*/
/**
- * ZSTD_resetDStream() - reset the context using parameters from creation
- * @zds: The zstd streaming decompression context to reset.
+ * zstd_reset_dstream() - reset the context using parameters from creation
+ * @dstream: The zstd streaming decompression context to reset.
*
* Resets the context using the parameters from creation. Skips dictionary
* loading, since it can be reused.
*
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: Zero or an error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_resetDStream(ZSTD_DStream *zds);
+size_t zstd_reset_dstream(zstd_dstream *dstream);
+
/**
- * ZSTD_decompressStream() - streaming decompress some of input into output
- * @zds: The zstd streaming decompression context.
- * @output: Destination buffer. `output.pos` is updated to indicate how much
- * decompressed data was written.
- * @input: Source buffer. `input.pos` is updated to indicate how much data was
- * read. Note that it may not consume the entire input, in which case
- * `input.pos < input.size`, and it's up to the caller to present
- * remaining data again.
+ * zstd_decompress_stream() - streaming decompress some of input into output
+ * @dstream: The zstd streaming decompression context.
+ * @output: Destination buffer. `output.pos` is updated to indicate how much
+ * decompressed data was written.
+ * @input: Source buffer. `input.pos` is updated to indicate how much data was
+ * read. Note that it may not consume the entire input, in which case
+ * `input.pos < input.size`, and it's up to the caller to present
+ * remaining data again.
*
* The `input` and `output` buffers may be any size. Guaranteed to make some
* forward progress if `input` and `output` are not empty.
- * ZSTD_decompressStream() will not consume the last byte of the frame until
+ * zstd_decompress_stream() will not consume the last byte of the frame until
* the entire frame is flushed.
*
- * Return: Returns 0 iff a frame is completely decoded and fully flushed.
- * Otherwise returns a hint for the number of bytes to use as the input
- * for the next function call or an error, which can be checked using
- * ZSTD_isError(). The size hint will never load more than the frame.
+ * Return: Returns 0 iff a frame is completely decoded and fully flushed.
+ * Otherwise returns a hint for the number of bytes to use as the
+ * input for the next function call or an error, which can be checked
+ * using zstd_is_error(). The size hint will never load more than the
+ * frame.
*/
-size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
- ZSTD_inBuffer *input);
+size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
+ zstd_in_buffer *input);
-/**
- * ZSTD_DStreamInSize() - recommended size for the input buffer
- *
- * Return: The recommended size for the input buffer.
- */
-size_t ZSTD_DStreamInSize(void);
-/**
- * ZSTD_DStreamOutSize() - recommended size for the output buffer
- *
- * When the output buffer is at least this large, it is guaranteed to be large
- * enough to flush at least one complete decompressed block.
- *
- * Return: The recommended size for the output buffer.
- */
-size_t ZSTD_DStreamOutSize(void);
-
-
-/* --- Constants ---*/
-#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */
-#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U
-
-#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
-#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
-
-#define ZSTD_WINDOWLOG_MAX_32 27
-#define ZSTD_WINDOWLOG_MAX_64 27
-#define ZSTD_WINDOWLOG_MAX \
- ((unsigned int)(sizeof(size_t) == 4 \
- ? ZSTD_WINDOWLOG_MAX_32 \
- : ZSTD_WINDOWLOG_MAX_64))
-#define ZSTD_WINDOWLOG_MIN 10
-#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
-#define ZSTD_HASHLOG_MIN 6
-#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1)
-#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
-#define ZSTD_HASHLOG3_MAX 17
-#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
-#define ZSTD_SEARCHLOG_MIN 1
-/* only for ZSTD_fast, other strategies are limited to 6 */
-#define ZSTD_SEARCHLENGTH_MAX 7
-/* only for ZSTD_btopt, other strategies are limited to 4 */
-#define ZSTD_SEARCHLENGTH_MIN 3
-#define ZSTD_TARGETLENGTH_MIN 4
-#define ZSTD_TARGETLENGTH_MAX 999
-
-/* for static allocation */
-#define ZSTD_FRAMEHEADERSIZE_MAX 18
-#define ZSTD_FRAMEHEADERSIZE_MIN 6
-#define ZSTD_frameHeaderSize_prefix 5
-#define ZSTD_frameHeaderSize_min ZSTD_FRAMEHEADERSIZE_MIN
-#define ZSTD_frameHeaderSize_max ZSTD_FRAMEHEADERSIZE_MAX
-/* magic number + skippable frame length */
-#define ZSTD_skippableHeaderSize 8
-
-
-/*-*************************************
- * Compressed size functions
- **************************************/
-
-/**
- * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
- * @src: Source buffer. It should point to the start of a zstd encoded frame
- * or a skippable frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * size of the frame.
- *
- * Return: The compressed size of the frame pointed to by `src` or an error,
- * which can be check with ZSTD_isError().
- * Suitable to pass to ZSTD_decompress() or similar functions.
- */
-size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
-
-/*-*************************************
- * Decompressed size functions
- **************************************/
-/**
- * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
- * @src: It should point to the start of a zstd encoded frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
- *
- * Return: The frame content size stored in the frame header if known.
- * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
- * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
- */
-unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+/* ====== Frame Inspection Functions ====== */
/**
- * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
- * @src: It should point to the start of a series of zstd encoded and/or
- * skippable frames.
- * @srcSize: The exact size of the series of frames.
+ * zstd_find_frame_compressed_size() - returns the size of a compressed frame
+ * @src: Source buffer. It should point to the start of a zstd encoded
+ * frame or a skippable frame.
+ * @src_size: The size of the source buffer. It must be at least as large as the
+ * size of the frame.
*
- * If any zstd encoded frame in the series doesn't have the frame content size
- * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
- * set when using ZSTD_compress(). The decompressed size can be very large.
- * If the source is untrusted, the decompressed size could be wrong or
- * intentionally modified. Always ensure the result fits within the
- * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
- * frames, and so it must traverse the input to read each frame header. This is
- * efficient as most of the data is skipped, however it does mean that all frame
- * data must be present and valid.
- *
- * Return: Decompressed size of all the data contained in the frames if known.
- * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
- * `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
- */
-unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
-
-/*-*************************************
- * Advanced compression functions
- **************************************/
-/**
- * ZSTD_checkCParams() - ensure parameter values remain within authorized range
- * @cParams: The zstd compression parameters.
- *
- * Return: Zero or an error, which can be checked using ZSTD_isError().
+ * Return: The compressed size of the frame pointed to by `src` or an error,
+ * which can be check with zstd_is_error().
+ * Suitable to pass to ZSTD_decompress() or similar functions.
*/
-size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
+size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
/**
- * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
- * @srcSize: Optionally the estimated source size, or zero if unknown.
- * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
- *
- * Return: The optimized parameters.
- */
-ZSTD_compressionParameters ZSTD_adjustCParams(
- ZSTD_compressionParameters cParams, unsigned long long srcSize,
- size_t dictSize);
-
-/*--- Advanced decompression functions ---*/
-
-/**
- * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
- * @buffer: The source buffer to check.
- * @size: The size of the source buffer, must be at least 4 bytes.
- *
- * Return: True iff the buffer starts with a zstd or skippable frame identifier.
- */
-unsigned int ZSTD_isFrame(const void *buffer, size_t size);
-
-/**
- * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
- * @dict: The dictionary buffer.
- * @dictSize: The size of the dictionary buffer.
- *
- * Return: The dictionary id stored within the dictionary or 0 if the
- * dictionary is not a zstd dictionary. If it returns 0 the
- * dictionary can still be loaded as a content-only dictionary.
- */
-unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
-
-/**
- * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
- * @ddict: The ddict to find the id of.
- *
- * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
- * a zstd dictionary. If it returns 0 `ddict` will be loaded as a
- * content-only dictionary.
- */
-unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
-
-/**
- * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
- * @src: Source buffer. It must be a zstd encoded frame.
- * @srcSize: The size of the source buffer. It must be at least as large as the
- * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
- *
- * Return: The dictionary id required to decompress the frame stored within
- * `src` or 0 if the dictionary id could not be decoded. It can return
- * 0 if the frame does not require a dictionary, the dictionary id
- * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
- * is too small.
- */
-unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
-
-/**
- * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
- * @frameContentSize: The frame content size, or 0 if not present.
+ * struct zstd_frame_params - zstd frame parameters stored in the frame header
+ * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
+ * present.
* @windowSize: The window size, or 0 if the frame is a skippable frame.
+ * @blockSizeMax: The maximum block size.
+ * @frameType: The frame type (zstd or skippable)
+ * @headerSize: The size of the frame header.
* @dictID: The dictionary id, or 0 if not present.
* @checksumFlag: Whether a checksum was used.
+ *
+ * See zstd_lib.h.
*/
-typedef struct {
- unsigned long long frameContentSize;
- unsigned int windowSize;
- unsigned int dictID;
- unsigned int checksumFlag;
-} ZSTD_frameParams;
+typedef ZSTD_frameHeader zstd_frame_header;
/**
- * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
- * @fparamsPtr: On success the frame parameters are written here.
- * @src: The source buffer. It must point to a zstd or skippable frame.
- * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is
- * always large enough to succeed.
+ * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
+ * @params: On success the frame parameters are written here.
+ * @src: The source buffer. It must point to a zstd or skippable frame.
+ * @src_size: The size of the source buffer.
*
- * Return: 0 on success. If more data is required it returns how many bytes
- * must be provided to make forward progress. Otherwise it returns
- * an error, which can be checked using ZSTD_isError().
+ * Return: 0 on success. If more data is required it returns how many bytes
+ * must be provided to make forward progress. Otherwise it returns
+ * an error, which can be checked using zstd_is_error().
*/
-size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
- size_t srcSize);
-
-/*-*****************************************************************************
- * Buffer-less and synchronous inner streaming functions
- *
- * This is an advanced API, giving full control over buffer management, for
- * users which need direct control over memory.
- * But it's also a complex one, with many restrictions (documented below).
- * Prefer using normal streaming API for an easier experience
- ******************************************************************************/
-
-/*-*****************************************************************************
- * Buffer-less streaming compression (synchronous mode)
- *
- * A ZSTD_CCtx object is required to track streaming operations.
- * Use ZSTD_initCCtx() to initialize a context.
- * ZSTD_CCtx object can be re-used multiple times within successive compression
- * operations.
- *
- * Start by initializing a context.
- * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
- * compression,
- * or ZSTD_compressBegin_advanced(), for finer parameter control.
- * It's also possible to duplicate a reference context which has already been
- * initialized, using ZSTD_copyCCtx()
- *
- * Then, consume your input using ZSTD_compressContinue().
- * There are some important considerations to keep in mind when using this
- * advanced function :
- * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
- * buffer only.
- * - Interface is synchronous : input is consumed entirely and produce 1+
- * (or more) compressed blocks.
- * - Caller must ensure there is enough space in `dst` to store compressed data
- * under worst case scenario. Worst case evaluation is provided by
- * ZSTD_compressBound().
- * ZSTD_compressContinue() doesn't guarantee recover after a failed
- * compression.
- * - ZSTD_compressContinue() presumes prior input ***is still accessible and
- * unmodified*** (up to maximum distance size, see WindowLog).
- * It remembers all previous contiguous blocks, plus one separated memory
- * segment (which can itself consists of multiple contiguous blocks)
- * - ZSTD_compressContinue() detects that prior input has been overwritten when
- * `src` buffer overlaps. In which case, it will "discard" the relevant memory
- * section from its history.
- *
- * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
- * and optional checksum. It's possible to use srcSize==0, in which case, it
- * will write a final empty block to end the frame. Without last block mark,
- * frames will be considered unfinished (corrupted) by decoders.
- *
- * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
- * frame.
- ******************************************************************************/
-
-/*===== Buffer-less streaming compression functions =====*/
-size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
- size_t dictSize, int compressionLevel);
-size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
- size_t dictSize, ZSTD_parameters params,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
- unsigned long long pledgedSrcSize);
-size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-
-
-
-/*-*****************************************************************************
- * Buffer-less streaming decompression (synchronous mode)
- *
- * A ZSTD_DCtx object is required to track streaming operations.
- * Use ZSTD_initDCtx() to initialize a context.
- * A ZSTD_DCtx object can be re-used multiple times.
- *
- * First typical operation is to retrieve frame parameters, using
- * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
- * important information to correctly decode the frame, such as the minimum
- * rolling buffer size to allocate to decompress data (`windowSize`), and the
- * dictionary ID used.
- * Note: content size is optional, it may not be present. 0 means unknown.
- * Note that these values could be wrong, either because of data malformation,
- * or because an attacker is spoofing deliberate false information. As a
- * consequence, check that values remain within valid application range,
- * especially `windowSize`, before allocation. Each application can set its own
- * limit, depending on local restrictions. For extended interoperability, it is
- * recommended to support at least 8 MB.
- * Frame parameters are extracted from the beginning of the compressed frame.
- * Data fragment must be large enough to ensure successful decoding, typically
- * `ZSTD_frameHeaderSize_max` bytes.
- * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
- * >0: `srcSize` is too small, provide at least this many bytes.
- * errorCode, which can be tested using ZSTD_isError().
- *
- * Start decompression, with ZSTD_decompressBegin() or
- * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
- * context, using ZSTD_copyDCtx().
- *
- * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
- * alternatively.
- * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
- * to ZSTD_decompressContinue().
- * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
- * fail.
- *
- * The result of ZSTD_decompressContinue() is the number of bytes regenerated
- * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
- * error; it just means ZSTD_decompressContinue() has decoded some metadata
- * item. It can also be an error code, which can be tested with ZSTD_isError().
- *
- * ZSTD_decompressContinue() needs previous data blocks during decompression, up
- * to `windowSize`. They should preferably be located contiguously, prior to
- * current block. Alternatively, a round buffer of sufficient size is also
- * possible. Sufficient size is determined by frame parameters.
- * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
- * follow each other, make sure that either the compressor breaks contiguity at
- * the same place, or that previous contiguous segment is large enough to
- * properly handle maximum back-reference.
- *
- * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
- * Context can then be reset to start a new decompression.
- *
- * Note: it's possible to know if next input to present is a header or a block,
- * using ZSTD_nextInputType(). This information is not required to properly
- * decode a frame.
- *
- * == Special case: skippable frames ==
- *
- * Skippable frames allow integration of user-defined data into a flow of
- * concatenated frames. Skippable frames will be ignored (skipped) by a
- * decompressor. The format of skippable frames is as follows:
- * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
- * 0x184D2A50 to 0x184D2A5F
- * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
- * c) Frame Content - any content (User Data) of length equal to Frame Size
- * For skippable frames ZSTD_decompressContinue() always returns 0.
- * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
- * what means that a frame is skippable.
- * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
- * actually be a zstd encoded frame with no content. For purposes of
- * decompression, it is valid in both cases to skip the frame using
- * ZSTD_findFrameCompressedSize() to find its size in bytes.
- * It also returns frame size as fparamsPtr->frameContentSize.
- ******************************************************************************/
-
-/*===== Buffer-less streaming decompression functions =====*/
-size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
-size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
- size_t dictSize);
-void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
-size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
-size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-typedef enum {
- ZSTDnit_frameHeader,
- ZSTDnit_blockHeader,
- ZSTDnit_block,
- ZSTDnit_lastBlock,
- ZSTDnit_checksum,
- ZSTDnit_skippableFrame
-} ZSTD_nextInputType_e;
-ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
-
-/*-*****************************************************************************
- * Block functions
- *
- * Block functions produce and decode raw zstd blocks, without frame metadata.
- * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
- * very small blocks (< 100 bytes). User will have to take in charge required
- * information to regenerate data, such as compressed and content sizes.
- *
- * A few rules to respect:
- * - Compressing and decompressing require a context structure
- * + Use ZSTD_initCCtx() and ZSTD_initDCtx()
- * - It is necessary to init context before starting
- * + compression : ZSTD_compressBegin()
- * + decompression : ZSTD_decompressBegin()
- * + variants _usingDict() are also allowed
- * + copyCCtx() and copyDCtx() work too
- * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
- * + If you need to compress more, cut data into multiple blocks
- * + Consider using the regular ZSTD_compress() instead, as frame metadata
- * costs become negligible when source size is large.
- * - When a block is considered not compressible enough, ZSTD_compressBlock()
- * result will be zero. In which case, nothing is produced into `dst`.
- * + User must test for such outcome and deal directly with uncompressed data
- * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
- * + In case of multiple successive blocks, decoder must be informed of
- * uncompressed block existence to follow proper history. Use
- * ZSTD_insertBlock() in such a case.
- ******************************************************************************/
-
-/* Define for static allocation */
-#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
-/*===== Raw zstd block functions =====*/
-size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
-size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
- const void *src, size_t srcSize);
-size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
- size_t blockSize);
+size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
+ size_t src_size);
-#endif /* ZSTD_H */
+#endif /* LINUX_ZSTD_H */
diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
new file mode 100644
index 00000000000000..58b6dd45a969f7
--- /dev/null
+++ b/include/linux/zstd_errors.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_ERRORS_H_398273423
+#define ZSTD_ERRORS_H_398273423
+
+
+/*===== dependency =====*/
+#include <linux/types.h> /* size_t */
+
+
+/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
+#define ZSTDERRORLIB_VISIBILITY
+#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+
+/*-*********************************************
+ * Error codes list
+ *-*********************************************
+ * Error codes _values_ are pinned down since v1.3.1 only.
+ * Therefore, don't rely on values if you may link to any version < v1.3.1.
+ *
+ * Only values < 100 are considered stable.
+ *
+ * note 1 : this API shall be used with static linking only.
+ * dynamic linking is not yet officially supported.
+ * note 2 : Prefer relying on the enum than on its value whenever possible
+ * This is the only supported way to use the error list < v1.3.1
+ * note 3 : ZSTD_isError() is always correct, whatever the library version.
+ **********************************************/
+typedef enum {
+ ZSTD_error_no_error = 0,
+ ZSTD_error_GENERIC = 1,
+ ZSTD_error_prefix_unknown = 10,
+ ZSTD_error_version_unsupported = 12,
+ ZSTD_error_frameParameter_unsupported = 14,
+ ZSTD_error_frameParameter_windowTooLarge = 16,
+ ZSTD_error_corruption_detected = 20,
+ ZSTD_error_checksum_wrong = 22,
+ ZSTD_error_dictionary_corrupted = 30,
+ ZSTD_error_dictionary_wrong = 32,
+ ZSTD_error_dictionaryCreation_failed = 34,
+ ZSTD_error_parameter_unsupported = 40,
+ ZSTD_error_parameter_outOfBound = 42,
+ ZSTD_error_tableLog_tooLarge = 44,
+ ZSTD_error_maxSymbolValue_tooLarge = 46,
+ ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_stage_wrong = 60,
+ ZSTD_error_init_missing = 62,
+ ZSTD_error_memory_allocation = 64,
+ ZSTD_error_workSpace_tooSmall= 66,
+ ZSTD_error_dstSize_tooSmall = 70,
+ ZSTD_error_srcSize_wrong = 72,
+ ZSTD_error_dstBuffer_null = 74,
+ /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
+ ZSTD_error_frameIndex_tooLarge = 100,
+ ZSTD_error_seekableIO = 102,
+ ZSTD_error_dstBuffer_wrong = 104,
+ ZSTD_error_srcBuffer_wrong = 105,
+ ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
+} ZSTD_ErrorCode;
+
+/*! ZSTD_getErrorCode() :
+ convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
+ which can be used to compare with enum list published above */
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /*< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
+
+
+
+#endif /* ZSTD_ERRORS_H_398273423 */
diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
new file mode 100644
index 00000000000000..b8c7dbf98390fb
--- /dev/null
+++ b/include/linux/zstd_lib.h
@@ -0,0 +1,2432 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_H_235446
+#define ZSTD_H_235446
+
+/* ====== Dependency ======*/
+#include <linux/limits.h> /* INT_MAX */
+#include <linux/types.h> /* size_t */
+
+
+/* ===== ZSTDLIB_API : control library symbols visibility ===== */
+#define ZSTDLIB_VISIBILITY
+#define ZSTDLIB_API ZSTDLIB_VISIBILITY
+
+
+/* *****************************************************************************
+ Introduction
+
+ zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
+ real-time compression scenarios at zlib-level and better compression ratios.
+ The zstd compression library provides in-memory compression and decompression
+ functions.
+
+ The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
+ which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
+ caution, as they require more memory. The library also offers negative
+ compression levels, which extend the range of speed vs. ratio preferences.
+ The lower the level, the faster the speed (at the cost of compression).
+
+ Compression can be done in:
+ - a single step (described as Simple API)
+ - a single step, reusing a context (described as Explicit context)
+ - unbounded multiple steps (described as Streaming compression)
+
+ The compression ratio achievable on small data can be highly improved using
+ a dictionary. Dictionary compression can be performed in:
+ - a single step (described as Simple dictionary API)
+ - a single step, reusing a dictionary (described as Bulk-processing
+ dictionary API)
+
+ Advanced experimental functions can be accessed using
+ `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
+
+ Advanced experimental APIs should never be used with a dynamically-linked
+ library. They are not "stable"; their definitions or signatures may change in
+ the future. Only static linking is allowed.
+*******************************************************************************/
+
+/*------ Version ------*/
+#define ZSTD_VERSION_MAJOR 1
+#define ZSTD_VERSION_MINOR 4
+#define ZSTD_VERSION_RELEASE 10
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+/*! ZSTD_versionNumber() :
+ * Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
+ZSTDLIB_API unsigned ZSTD_versionNumber(void);
+
+#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
+#define ZSTD_QUOTE(str) #str
+#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
+#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
+
+/*! ZSTD_versionString() :
+ * Return runtime library version, like "1.4.5". Requires v1.3.0+. */
+ZSTDLIB_API const char* ZSTD_versionString(void);
+
+/* *************************************
+ * Default constant
+ ***************************************/
+#ifndef ZSTD_CLEVEL_DEFAULT
+# define ZSTD_CLEVEL_DEFAULT 3
+#endif
+
+/* *************************************
+ * Constants
+ ***************************************/
+
+/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
+#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */
+#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
+#define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0
+
+#define ZSTD_BLOCKSIZELOG_MAX 17
+#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX)
+
+
+
+/* *************************************
+* Simple API
+***************************************/
+/*! ZSTD_compress() :
+ * Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress() :
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
+ const void* src, size_t compressedSize);
+
+/*! ZSTD_getFrameContentSize() : requires v1.3.0+
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * @return : - decompressed size of `src` frame content, if known
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
+/*! ZSTD_getDecompressedSize() :
+ * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+ * Both functions work the same way, but ZSTD_getDecompressedSize() blends
+ * "empty", "unknown" and "error" results to the same return value (0),
+ * while ZSTD_getFrameContentSize() gives them separate return values.
+ * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
+ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_findFrameCompressedSize() :
+ * `src` should point to the start of a ZSTD frame or skippable frame.
+ * `srcSize` must be >= first frame size
+ * @return : the compressed size of the first frame starting at `src`,
+ * suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
+ * or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
+
+/*====== Helper functions ======*/
+#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+
+
+/* *************************************
+* Explicit context
+***************************************/
+/*= Compression context
+ * When compressing many times,
+ * it is recommended to allocate a context just once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Note : re-using context is just a speed / resource optimization.
+ * It doesn't change the compression ratio, which remains identical.
+ * Note 2 : In multi-threaded environments,
+ * use one different context per thread for parallel execution.
+ */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */
+
+/*! ZSTD_compressCCtx() :
+ * Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
+ * Important : in order to behave similarly to `ZSTD_compress()`,
+ * this function compresses at requested compression level,
+ * __ignoring any other parameter__ .
+ * If any advanced parameter was set using the advanced API,
+ * they will all be reset. Only `compressionLevel` remains.
+ */
+ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*= Decompression context
+ * When decompressing many times,
+ * it is recommended to allocate a context only once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution. */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
+ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer */
+
+/*! ZSTD_decompressDCtx() :
+ * Same as ZSTD_decompress(),
+ * requires an allocated ZSTD_DCtx.
+ * Compatible with sticky parameters.
+ */
+ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/* *************************************
+* Advanced compression API
+***************************************/
+
+/* API design :
+ * Parameters are pushed one by one into an existing context,
+ * using ZSTD_CCtx_set*() functions.
+ * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
+ * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
+ * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
+ *
+ * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
+ *
+ * This API supercedes all other "advanced" API entry points in the experimental section.
+ * In the future, we expect to remove from experimental API entry points which are redundant with this API.
+ */
+
+
+/* Compression strategies, listed from fastest to strongest */
+typedef enum { ZSTD_fast=1,
+ ZSTD_dfast=2,
+ ZSTD_greedy=3,
+ ZSTD_lazy=4,
+ ZSTD_lazy2=5,
+ ZSTD_btlazy2=6,
+ ZSTD_btopt=7,
+ ZSTD_btultra=8,
+ ZSTD_btultra2=9
+ /* note : new strategies _might_ be added in the future.
+ Only the order (from fast to strong) is guaranteed */
+} ZSTD_strategy;
+
+
+typedef enum {
+
+ /* compression parameters
+ * Note: When compressing with a ZSTD_CDict these parameters are superseded
+ * by the parameters used to construct the ZSTD_CDict.
+ * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
+ ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
+ * Note that exact compression parameters are dynamically determined,
+ * depending on both compression level and srcSize (when known).
+ * Default level is ZSTD_CLEVEL_DEFAULT==3.
+ * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
+ * Note 1 : it's possible to pass a negative compression level.
+ * Note 2 : setting a level does not automatically set all other compression parameters
+ * to default. Setting this will however eventually dynamically impact the compression
+ * parameters which have not been manually set. The manually set
+ * ones will 'stick'. */
+ /* Advanced compression parameters :
+ * It's possible to pin down compression parameters to some specific values.
+ * In which case, these values are no longer dynamically selected by the compressor */
+ ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2.
+ * This will set a memory budget for streaming decompression,
+ * with larger values requiring more memory
+ * and typically compressing more.
+ * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
+ * Special: value 0 means "use default windowLog".
+ * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
+ * requires explicitly allowing such size at streaming decompression stage. */
+ ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2.
+ * Resulting memory usage is (1 << (hashLog+2)).
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
+ * Larger tables improve compression ratio of strategies <= dFast,
+ * and improve speed of strategies > dFast.
+ * Special: value 0 means "use default hashLog". */
+ ZSTD_c_chainLog=103, /* Size of the multi-probe search table, as a power of 2.
+ * Resulting memory usage is (1 << (chainLog+2)).
+ * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
+ * Larger tables result in better and slower compression.
+ * This parameter is useless for "fast" strategy.
+ * It's still useful when using "dfast" strategy,
+ * in which case it defines a secondary probe table.
+ * Special: value 0 means "use default chainLog". */
+ ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2.
+ * More attempts result in better and slower compression.
+ * This parameter is useless for "fast" and "dFast" strategies.
+ * Special: value 0 means "use default searchLog". */
+ ZSTD_c_minMatch=105, /* Minimum size of searched matches.
+ * Note that Zstandard can still find matches of smaller size,
+ * it just tweaks its search algorithm to look for this size and larger.
+ * Larger values increase compression and decompression speed, but decrease ratio.
+ * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
+ * Note that currently, for all strategies < btopt, effective minimum is 4.
+ * , for all strategies > fast, effective maximum is 6.
+ * Special: value 0 means "use default minMatchLength". */
+ ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
+ * For strategies btopt, btultra & btultra2:
+ * Length of Match considered "good enough" to stop search.
+ * Larger values make compression stronger, and slower.
+ * For strategy fast:
+ * Distance between match sampling.
+ * Larger values make compression faster, and weaker.
+ * Special: value 0 means "use default targetLength". */
+ ZSTD_c_strategy=107, /* See ZSTD_strategy enum definition.
+ * The higher the value of selected strategy, the more complex it is,
+ * resulting in stronger and slower compression.
+ * Special: value 0 means "use default strategy". */
+
+ /* LDM mode parameters */
+ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
+ * This parameter is designed to improve compression ratio
+ * for large inputs, by finding large matches at long distance.
+ * It increases memory usage and window size.
+ * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
+ * except when expressly set to a different value.
+ * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
+ * compression strategy >= ZSTD_btopt (== compression level 16+) */
+ ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2.
+ * Larger values increase memory usage and compression ratio,
+ * but decrease compression speed.
+ * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+ * default: windowlog - 7.
+ * Special: value 0 means "automatically determine hashlog". */
+ ZSTD_c_ldmMinMatch=162, /* Minimum match size for long distance matcher.
+ * Larger/too small values usually decrease compression ratio.
+ * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
+ * Special: value 0 means "use default value" (default: 64). */
+ ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
+ * Larger values improve collision resolution but decrease compression speed.
+ * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
+ * Special: value 0 means "use default value" (default: 3). */
+ ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
+ * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
+ * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
+ * Larger values improve compression speed.
+ * Deviating far from default value will likely result in a compression ratio decrease.
+ * Special: value 0 means "automatically determine hashRateLog". */
+
+ /* frame parameters */
+ ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
+ * Content size must be known at the beginning of compression.
+ * This is automatically the case when using ZSTD_compress2(),
+ * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
+ ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
+ ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */
+
+ /* multi-threading parameters */
+ /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
+ * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
+ * In a situation where it's unknown if the linked library supports multi-threading or not,
+ * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
+ */
+ ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel.
+ * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
+ * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
+ * while compression is performed in parallel, within worker thread(s).
+ * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
+ * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
+ * More workers improve speed, but also increase memory usage.
+ * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
+ * compression is performed inside Caller's thread, and all invocations are blocking */
+ ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
+ * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
+ * 0 means default, which is dynamically determined based on compression parameters.
+ * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
+ * The minimum size is automatically and transparently enforced. */
+ ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size.
+ * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
+ * It helps preserve compression ratio, while each job is compressed in parallel.
+ * This value is enforced only when nbWorkers >= 1.
+ * Larger values increase compression ratio, but decrease speed.
+ * Possible values range from 0 to 9 :
+ * - 0 means "default" : value will be determined by the library, depending on strategy
+ * - 1 means "no overlap"
+ * - 9 means "full overlap", using a full window size.
+ * Each intermediate rank increases/decreases load size by a factor 2 :
+ * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default
+ * default value varies between 6 and 9, depending on strategy */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_c_rsyncable
+ * ZSTD_c_format
+ * ZSTD_c_forceMaxWindow
+ * ZSTD_c_forceAttachDict
+ * ZSTD_c_literalCompressionMode
+ * ZSTD_c_targetCBlockSize
+ * ZSTD_c_srcSizeHint
+ * ZSTD_c_enableDedicatedDictSearch
+ * ZSTD_c_stableInBuffer
+ * ZSTD_c_stableOutBuffer
+ * ZSTD_c_blockDelimiters
+ * ZSTD_c_validateSequences
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly;
+ * also, the enums values themselves are unstable and can still change.
+ */
+ ZSTD_c_experimentalParam1=500,
+ ZSTD_c_experimentalParam2=10,
+ ZSTD_c_experimentalParam3=1000,
+ ZSTD_c_experimentalParam4=1001,
+ ZSTD_c_experimentalParam5=1002,
+ ZSTD_c_experimentalParam6=1003,
+ ZSTD_c_experimentalParam7=1004,
+ ZSTD_c_experimentalParam8=1005,
+ ZSTD_c_experimentalParam9=1006,
+ ZSTD_c_experimentalParam10=1007,
+ ZSTD_c_experimentalParam11=1008,
+ ZSTD_c_experimentalParam12=1009
+} ZSTD_cParameter;
+
+typedef struct {
+ size_t error;
+ int lowerBound;
+ int upperBound;
+} ZSTD_bounds;
+
+/*! ZSTD_cParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - lower and upper bounds, both inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
+
+/*! ZSTD_CCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is generally only possible during frame initialization (before starting compression).
+ * Exception : when using multi-threading mode (nbWorkers >= 1),
+ * the following parameters can be updated _during_ compression (within same frame):
+ * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
+ * new parameters will be active for next job only (after a flush()).
+ * @return : an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtx_setPledgedSrcSize() :
+ * Total input data size to be compressed as a single frame.
+ * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
+ * This value will also be controlled at end of frame, and trigger an error if not respected.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
+ * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
+ * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
+ * Note 2 : pledgedSrcSize is only valid once, for the next frame.
+ * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note 3 : Whenever all input data is provided and consumed in a single round,
+ * for example with ZSTD_compress2(),
+ * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
+ * this value is automatically overridden by srcSize instead.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
+
+typedef enum {
+ ZSTD_reset_session_only = 1,
+ ZSTD_reset_parameters = 2,
+ ZSTD_reset_session_and_parameters = 3
+} ZSTD_ResetDirective;
+
+/*! ZSTD_CCtx_reset() :
+ * There are 2 different things that can be reset, independently or jointly :
+ * - The session : will stop compressing current frame, and make CCtx ready to start a new one.
+ * Useful after an error, or to interrupt any ongoing compression.
+ * Any internal data not yet flushed is cancelled.
+ * Compression parameters and dictionary remain unchanged.
+ * They will be used to compress next frame.
+ * Resetting session never fails.
+ * - The parameters : changes all parameters back to "default".
+ * This removes any reference to any dictionary too.
+ * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
+ * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
+ * - Both : similar to resetting the session, followed by resetting parameters.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
+
+/*! ZSTD_compress2() :
+ * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
+ * ZSTD_compress2() always starts a new frame.
+ * Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - The function is always blocking, returns when compression is completed.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+
+/* *************************************
+* Advanced decompression API
+***************************************/
+
+/* The advanced API pushes parameters one by one into an existing DCtx context.
+ * Parameters are sticky, and remain valid for all following frames
+ * using the same DCtx context.
+ * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
+ * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
+ * Therefore, no new decompression function is necessary.
+ */
+
+typedef enum {
+
+ ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
+ * the streaming API will refuse to allocate memory buffer
+ * in order to protect the host from unreasonable memory requirements.
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
+ * Special: value 0 means "use default maximum windowLog". */
+
+ /* note : additional experimental parameters are also available
+ * within the experimental section of the API.
+ * At the time of this writing, they include :
+ * ZSTD_d_format
+ * ZSTD_d_stableOutBuffer
+ * ZSTD_d_forceIgnoreChecksum
+ * ZSTD_d_refMultipleDDicts
+ * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+ * note : never ever use experimentalParam? names directly
+ */
+ ZSTD_d_experimentalParam1=1000,
+ ZSTD_d_experimentalParam2=1001,
+ ZSTD_d_experimentalParam3=1002,
+ ZSTD_d_experimentalParam4=1003
+
+} ZSTD_dParameter;
+
+/*! ZSTD_dParam_getBounds() :
+ * All parameters must belong to an interval with lower and upper bounds,
+ * otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ * - an error status field, which must be tested using ZSTD_isError()
+ * - both lower and upper bounds, inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
+
+/*! ZSTD_DCtx_setParameter() :
+ * Set one compression parameter, selected by enum ZSTD_dParameter.
+ * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
+ * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ * Setting a parameter is only possible during frame initialization (before starting decompression).
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
+
+/*! ZSTD_DCtx_reset() :
+ * Return a DCtx to clean state.
+ * Session and parameters can be reset jointly or separately.
+ * Parameters can only be reset when no active frame is being decompressed.
+ * @return : 0, or an error code, which can be tested with ZSTD_isError()
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
+
+
+/* **************************
+* Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+ const void* src; /*< start of input buffer */
+ size_t size; /*< size of input buffer */
+ size_t pos; /*< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+ void* dst; /*< start of output buffer */
+ size_t size; /*< size of output buffer */
+ size_t pos; /*< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+* Streaming compression - HowTo
+*
+* A ZSTD_CStream object is required to track streaming operation.
+* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+*
+* For parallel execution, use one separate ZSTD_CStream per thread.
+*
+* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
+*
+* Parameters are sticky : when starting a new compression on the same context,
+* it will re-use the same sticky parameters as previous compression session.
+* When in doubt, it's recommended to fully initialize the context before usage.
+* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
+* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
+* set more specific parameters, the pledged source size, or load a dictionary.
+*
+* Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
+* consume input stream. The function will automatically update both `pos`
+* fields within `input` and `output`.
+* Note that the function may not consume the entire input, for example, because
+* the output buffer is already full, in which case `input.pos < input.size`.
+* The caller must check if input has been entirely consumed.
+* If not, the caller must make some room to receive more compressed data,
+* and then present again remaining input data.
+* note: ZSTD_e_continue is guaranteed to make some forward progress when called,
+* but doesn't guarantee maximal forward progress. This is especially relevant
+* when compressing with multiple threads. The call won't block if it can
+* consume some input, but if it can't it will wait for some, but not all,
+* output to be flushed.
+* @return : provides a minimum amount of data remaining to be flushed from internal buffers
+* or an error code, which can be tested using ZSTD_isError().
+*
+* At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+* using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
+* Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
+* In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
+* operation.
+* note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if internal buffers are entirely flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
+* It will perform a flush and write frame epilogue.
+* The epilogue is required for decoders to consider a frame completed.
+* flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
+* start a new frame.
+* note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if frame fully completed and fully flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef ZSTD_CCtx ZSTD_CStream; /*< CCtx and CStream are now effectively same object (>= v1.3.0) */
+ /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
+/*===== ZSTD_CStream management functions =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /* accept NULL pointer */
+
+/*===== Streaming compression functions =====*/
+typedef enum {
+ ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
+ ZSTD_e_flush=1, /* flush any data provided so far,
+ * it creates (at least) one new block, that can be decoded immediately on reception;
+ * frame will continue: any future data can still reference previously compressed data, improving compression.
+ * note : multithreaded compression will block to flush as much output as possible. */
+ ZSTD_e_end=2 /* flush any remaining data _and_ close current frame.
+ * note that frame is only closed after compressed data is fully flushed (return value == 0).
+ * After that point, any additional data starts a new frame.
+ * note : each frame is independent (does not reference any content from previous frame).
+ : note : multithreaded compression will block to flush as much output as possible. */
+} ZSTD_EndDirective;
+
+/*! ZSTD_compressStream2() :
+ * Behaves about the same as ZSTD_compressStream, with additional control on end directive.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
+ * - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+ * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ * - endOp must be a valid directive
+ * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
+ * - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
+ * and then immediately returns, just indicating that there is some data remaining to be flushed.
+ * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
+ * - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ * or an error code, which can be tested using ZSTD_isError().
+ * if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ * only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ * Before starting a new compression job, or changing compression parameters,
+ * it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp);
+
+
+/* These buffer sizes are softly recommended.
+ * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
+ * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
+ * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
+ *
+ * However, note that these recommendations are from the perspective of a C caller program.
+ * If the streaming interface is invoked from some other language,
+ * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
+ * a major performance rule is to reduce crossing such interface to an absolute minimum.
+ * It's not rare that performance ends being spent more into the interface, rather than compression itself.
+ * In which cases, prefer using large buffers, as large as practical,
+ * for both input and output, to reduce the nb of roundtrips.
+ */
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /*< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
+
+
+/* *****************************************************************************
+ * This following is a legacy streaming API.
+ * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
+ * It is redundant, but remains fully supported.
+ * Advanced parameters and dictionary compression can only be used through the
+ * new API.
+ ******************************************************************************/
+
+/*!
+ * Equivalent to:
+ *
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ */
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+/*!
+ * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
+ * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
+ * the next read size (if non-zero and not an error). ZSTD_compressStream2()
+ * returns the minimum nb of bytes left to flush (if non-zero and not an error).
+ */
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+
+/*-***************************************************************************
+* Streaming decompression - HowTo
+*
+* A ZSTD_DStream object is required to track streaming operations.
+* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+* ZSTD_DStream objects can be re-used multiple times.
+*
+* Use ZSTD_initDStream() to start a new decompression operation.
+* @return : recommended first input size
+* Alternatively, use advanced API to set specific properties.
+*
+* Use ZSTD_decompressStream() repetitively to consume your input.
+* The function will update both `pos` fields.
+* If `input.pos < input.size`, some input has not been consumed.
+* It's up to the caller to present again remaining data.
+* The function tries to flush all data decoded immediately, respecting output buffer size.
+* If `output.pos < output.size`, decoder has flushed everything it could.
+* But if `output.pos == output.size`, there might be some data left within internal buffers.,
+* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
+* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
+* @return : 0 when a frame is completely decoded and fully flushed,
+* or an error code, which can be tested using ZSTD_isError(),
+* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
+* the return value is a suggested next input size (just a hint for better latency)
+* that will never request more than the remaining frame size.
+* *******************************************************************************/
+
+typedef ZSTD_DCtx ZSTD_DStream; /*< DCtx and DStream are now effectively same object (>= v1.3.0) */
+ /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
+/*===== ZSTD_DStream management functions =====*/
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer */
+
+/*===== Streaming decompression functions =====*/
+
+/* This function is redundant with the advanced API and equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, NULL);
+ */
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+
+/* ************************
+* Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ * Compression at an explicit compression level using a Dictionary.
+ * A dictionary can be any arbitrary data segment (also called a prefix),
+ * or a buffer with specified information (see dictBuilder/zdict.h).
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ * Decompression using a known Dictionary.
+ * Dictionary must be identical to the one used during compression.
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/* *********************************
+ * Bulk processing dictionary API
+ **********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ * When compressing multiple messages or blocks using the same dictionary,
+ * it's recommended to digest the dictionary only once, since it's a costly operation.
+ * ZSTD_createCDict() will create a state from digesting a dictionary.
+ * The resulting state can be used for future compression operations with very limited startup cost.
+ * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
+ * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
+ * in which case the only thing that it transports is the @compressionLevel.
+ * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
+ * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ * Function frees memory allocated by ZSTD_createCDict().
+ * If a NULL pointer is passed, no operation is performed. */
+ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times.
+ * Note : compression level is _decided at dictionary creation time_,
+ * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ * Function frees memory allocated with ZSTD_createDDict()
+ * If a NULL pointer is passed, no operation is performed. */
+ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ * Decompression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict);
+
+
+/* ******************************
+ * Dictionary helper functions
+ *******************************/
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompressed the frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary to be decoded (most common case).
+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/* *****************************************************************************
+ * Advanced dictionary and prefix API
+ *
+ * This API allows dictionaries to be used with ZSTD_compress2(),
+ * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
+ * only reset with the context is reset with ZSTD_reset_parameters or
+ * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ ******************************************************************************/
+
+
+/*! ZSTD_CCtx_loadDictionary() :
+ * Create an internal CDict from `dict` buffer.
+ * Decompression will have to use same dictionary.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
+ * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+ * Note 2 : Loading a dictionary involves building tables.
+ * It's also a CPU consuming operation, with non-negligible impact on latency.
+ * Tables are dependent on compression parameters, and for this reason,
+ * compression parameters can no longer be changed after loading a dictionary.
+ * Note 3 :`dict` content will be copied internally.
+ * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
+ * In such a case, dictionary buffer must outlive its users.
+ * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
+ * to precisely select how dictionary content must be interpreted. */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_refCDict() :
+ * Reference a prepared dictionary, to be used for all next compressed frames.
+ * Note that compression parameters are enforced from within CDict,
+ * and supersede any compression parameter previously set within CCtx.
+ * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
+ * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
+ * The dictionary will remain valid for future compressed frames using same CCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Referencing a NULL CDict means "return to no-dictionary mode".
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
+ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+/*! ZSTD_CCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) for next compressed frame.
+ * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
+ * Decompression will need same prefix to properly regenerate data.
+ * Compressing with a prefix is similar in outcome as performing a diff and compressing it,
+ * but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
+ * Note 1 : Prefix buffer is referenced. It **must** outlive compression.
+ * Its content must remain unmodified during compression.
+ * Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
+ * ensure that the window size is large enough to contain the entire source.
+ * See ZSTD_c_windowLog.
+ * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ * It's a CPU consuming operation, with non-negligible impact on latency.
+ * If there is a need to use the same prefix multiple times, consider loadDictionary instead.
+ * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
+ * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+ const void* prefix, size_t prefixSize);
+
+/*! ZSTD_DCtx_loadDictionary() :
+ * Create an internal DDict from dict buffer,
+ * to be used to decompress next frames.
+ * The dictionary remains valid for all future frames, until explicitly invalidated.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Loading a dictionary involves building tables,
+ * which has a non-negligible impact on CPU usage and latency.
+ * It's recommended to "load once, use many times", to amortize the cost
+ * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
+ * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
+ * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
+ * how dictionary content is loaded and interpreted.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_refDDict() :
+ * Reference a prepared dictionary, to be used to decompress next frames.
+ * The dictionary remains active for decompression of future frames using same DCtx.
+ *
+ * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
+ * will store the DDict references in a table, and the DDict used for decompression
+ * will be determined at decompression time, as per the dict ID in the frame.
+ * The memory for the table is allocated on the first call to refDDict, and can be
+ * freed with ZSTD_freeDCtx().
+ *
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Special: referencing a NULL DDict means "return to no-dictionary mode".
+ * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+/*! ZSTD_DCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) to decompress next frame.
+ * This is the reverse operation of ZSTD_CCtx_refPrefix(),
+ * and must use the same prefix as the one used during compression.
+ * Prefix is **only used once**. Reference is discarded at end of frame.
+ * End of frame is reached when ZSTD_decompressStream() returns 0.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ * Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
+ * Prefix buffer must remain unmodified up to the end of frame,
+ * reached when ZSTD_decompressStream() returns 0.
+ * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
+ * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
+ * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ * A full dictionary is more costly, as it requires building tables.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
+ const void* prefix, size_t prefixSize);
+
+/* === Memory management === */
+
+/*! ZSTD_sizeof_*() :
+ * These functions give the _current_ memory usage of selected object.
+ * Note that object memory usage can evolve (increase or decrease) over time. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+#endif /* ZSTD_H_235446 */
+
+
+/* **************************************************************************************
+ * ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
+ * The definitions in the following section are considered experimental.
+ * They are provided for advanced scenarios.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+
+#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
+/* **************************************************************************************
+ * experimental API (static linking only)
+ ****************************************************************************************
+ * The following symbols and constants
+ * are not planned to join "stable API" status in the near future.
+ * They can still change in future versions.
+ * Some of them are planned to remain in the static_only section indefinitely.
+ * Some of them might be removed in the future (especially when redundant with existing stable functions)
+ * ***************************************************************************************/
+
+#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2)
+#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */
+#define ZSTD_SKIPPABLEHEADERSIZE 8
+
+/* compression parameter bounds */
+#define ZSTD_WINDOWLOG_MAX_32 30
+#define ZSTD_WINDOWLOG_MAX_64 31
+#define ZSTD_WINDOWLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN 10
+#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
+#define ZSTD_HASHLOG_MIN 6
+#define ZSTD_CHAINLOG_MAX_32 29
+#define ZSTD_CHAINLOG_MAX_64 30
+#define ZSTD_CHAINLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
+#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+#define ZSTD_MINMATCH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_MINMATCH_MIN 3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
+#define ZSTD_STRATEGY_MIN ZSTD_fast
+#define ZSTD_STRATEGY_MAX ZSTD_btultra2
+
+
+#define ZSTD_OVERLAPLOG_MIN 0
+#define ZSTD_OVERLAPLOG_MAX 9
+
+#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame
+ * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
+ * to preserve host's memory from unreasonable requirements.
+ * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
+ * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
+
+
+/* LDM parameter bounds */
+#define ZSTD_LDM_HASHLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_LDM_HASHLOG_MAX ZSTD_HASHLOG_MAX
+#define ZSTD_LDM_MINMATCH_MIN 4
+#define ZSTD_LDM_MINMATCH_MAX 4096
+#define ZSTD_LDM_BUCKETSIZELOG_MIN 1
+#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
+#define ZSTD_LDM_HASHRATELOG_MIN 0
+#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
+
+/* Advanced parameter bounds */
+#define ZSTD_TARGETCBLOCKSIZE_MIN 64
+#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
+#define ZSTD_SRCSIZEHINT_MIN 0
+#define ZSTD_SRCSIZEHINT_MAX INT_MAX
+
+/* internal */
+#define ZSTD_HASHLOG3_MAX 17
+
+
+/* --- Advanced types --- */
+
+typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
+
+typedef struct {
+ unsigned int offset; /* The offset of the match. (NOT the same as the offset code)
+ * If offset == 0 and matchLength == 0, this sequence represents the last
+ * literals in the block of litLength size.
+ */
+
+ unsigned int litLength; /* Literal length of the sequence. */
+ unsigned int matchLength; /* Match length of the sequence. */
+
+ /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
+ * In this case, we will treat the sequence as a marker for a block boundary.
+ */
+
+ unsigned int rep; /* Represents which repeat offset is represented by the field 'offset'.
+ * Ranges from [0, 3].
+ *
+ * Repeat offsets are essentially previous offsets from previous sequences sorted in
+ * recency order. For more detail, see doc/zstd_compression_format.md
+ *
+ * If rep == 0, then 'offset' does not contain a repeat offset.
+ * If rep > 0:
+ * If litLength != 0:
+ * rep == 1 --> offset == repeat_offset_1
+ * rep == 2 --> offset == repeat_offset_2
+ * rep == 3 --> offset == repeat_offset_3
+ * If litLength == 0:
+ * rep == 1 --> offset == repeat_offset_2
+ * rep == 2 --> offset == repeat_offset_3
+ * rep == 3 --> offset == repeat_offset_1 - 1
+ *
+ * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
+ * 'rep', but repeat offsets do not necessarily need to be calculated from an external
+ * sequence provider's perspective. For example, ZSTD_compressSequences() does not
+ * use this 'rep' field at all (as of now).
+ */
+} ZSTD_Sequence;
+
+typedef struct {
+ unsigned windowLog; /*< largest match distance : larger == more compression, more memory needed during decompression */
+ unsigned chainLog; /*< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
+ unsigned hashLog; /*< dispatch table : larger == faster, more memory */
+ unsigned searchLog; /*< nb of searches : larger == more compression, slower */
+ unsigned minMatch; /*< match length searched : larger == faster decompression, sometimes less compression */
+ unsigned targetLength; /*< acceptable match size for optimal parser (only) : larger == more compression, slower */
+ ZSTD_strategy strategy; /*< see ZSTD_strategy definition above */
+} ZSTD_compressionParameters;
+
+typedef struct {
+ int contentSizeFlag; /*< 1: content size will be in frame header (when known) */
+ int checksumFlag; /*< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
+ int noDictIDFlag; /*< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
+} ZSTD_frameParameters;
+
+typedef struct {
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+typedef enum {
+ ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
+ ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
+ ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
+} ZSTD_dictContentType_e;
+
+typedef enum {
+ ZSTD_dlm_byCopy = 0, /*< Copy dictionary content internally */
+ ZSTD_dlm_byRef = 1 /*< Reference dictionary content -- the dictionary buffer must outlive its users. */
+} ZSTD_dictLoadMethod_e;
+
+typedef enum {
+ ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
+ ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number.
+ * Useful to save 4 bytes per generated frame.
+ * Decoder cannot recognise automatically this format, requiring this instruction. */
+} ZSTD_format_e;
+
+typedef enum {
+ /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
+ ZSTD_d_validateChecksum = 0,
+ ZSTD_d_ignoreChecksum = 1
+} ZSTD_forceIgnoreChecksum_e;
+
+typedef enum {
+ /* Note: this enum controls ZSTD_d_refMultipleDDicts */
+ ZSTD_rmd_refSingleDDict = 0,
+ ZSTD_rmd_refMultipleDDicts = 1
+} ZSTD_refMultipleDDicts_e;
+
+typedef enum {
+ /* Note: this enum and the behavior it controls are effectively internal
+ * implementation details of the compressor. They are expected to continue
+ * to evolve and should be considered only in the context of extremely
+ * advanced performance tuning.
+ *
+ * Zstd currently supports the use of a CDict in three ways:
+ *
+ * - The contents of the CDict can be copied into the working context. This
+ * means that the compression can search both the dictionary and input
+ * while operating on a single set of internal tables. This makes
+ * the compression faster per-byte of input. However, the initial copy of
+ * the CDict's tables incurs a fixed cost at the beginning of the
+ * compression. For small compressions (< 8 KB), that copy can dominate
+ * the cost of the compression.
+ *
+ * - The CDict's tables can be used in-place. In this model, compression is
+ * slower per input byte, because the compressor has to search two sets of
+ * tables. However, this model incurs no start-up cost (as long as the
+ * working context's tables can be reused). For small inputs, this can be
+ * faster than copying the CDict's tables.
+ *
+ * - The CDict's tables are not used at all, and instead we use the working
+ * context alone to reload the dictionary and use params based on the source
+ * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
+ * This method is effective when the dictionary sizes are very small relative
+ * to the input size, and the input size is fairly large to begin with.
+ *
+ * Zstd has a simple internal heuristic that selects which strategy to use
+ * at the beginning of a compression. However, if experimentation shows that
+ * Zstd is making poor choices, it is possible to override that choice with
+ * this enum.
+ */
+ ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
+ ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */
+ ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */
+ ZSTD_dictForceLoad = 3 /* Always reload the dictionary */
+} ZSTD_dictAttachPref_e;
+
+typedef enum {
+ ZSTD_lcm_auto = 0, /*< Automatically determine the compression mode based on the compression level.
+ * Negative compression levels will be uncompressed, and positive compression
+ * levels will be compressed. */
+ ZSTD_lcm_huffman = 1, /*< Always attempt Huffman compression. Uncompressed literals will still be
+ * emitted if Huffman compression is not profitable. */
+ ZSTD_lcm_uncompressed = 2 /*< Always emit uncompressed literals. */
+} ZSTD_literalCompressionMode_e;
+
+
+/* *************************************
+* Frame size functions
+***************************************/
+
+/*! ZSTD_findDecompressedSize() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - decompressed size of all data in all successive frames
+ * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 2 : decompressed size is always present when compression is done with ZSTD_compress()
+ * note 3 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure result fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
+ * read each contained frame header. This is fast as most of the data is skipped,
+ * however it does mean that all frame data must be present and valid. */
+ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_decompressBound() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - upper-bound for the decompressed size of all data in all successive frames
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame.
+ * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
+ * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
+ * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
+ * upper-bound = # blocks * min(128 KB, Window_Size)
+ */
+ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
+
+/*! ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+
+typedef enum {
+ ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
+ ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
+} ZSTD_sequenceFormat_e;
+
+/*! ZSTD_generateSequences() :
+ * Generate sequences using ZSTD_compress2, given a source buffer.
+ *
+ * Each block will end with a dummy sequence
+ * with offset == 0, matchLength == 0, and litLength == length of last literals.
+ * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
+ * simply acts as a block delimiter.
+ *
+ * zc can be used to insert custom compression params.
+ * This function invokes ZSTD_compress2
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
+ * @return : number of sequences generated
+ */
+
+ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ size_t outSeqsSize, const void* src, size_t srcSize);
+
+/*! ZSTD_mergeBlockDelimiters() :
+ * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
+ * by merging them into into the literals of the next sequence.
+ *
+ * As such, the final generated result has no explicit representation of block boundaries,
+ * and the final last literals segment is not represented in the sequences.
+ *
+ * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+ * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
+ * @return : number of sequences left after merging
+ */
+ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
+
+/*! ZSTD_compressSequences() :
+ * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
+ * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
+ * The entire source is compressed into a single frame.
+ *
+ * The compression behavior changes based on cctx params. In particular:
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on
+ * the block size derived from the cctx, and sequences may be split. This is the default setting.
+ *
+ * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
+ * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ *
+ * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
+ * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
+ *
+ * In addition to the two adjustable experimental params, there are other important cctx params.
+ * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
+ * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
+ * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
+ * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
+ *
+ * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
+ * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
+ * and cannot emit an RLE block that disagrees with the repcode history
+ * @return : final compressed size or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize);
+
+
+/*! ZSTD_writeSkippableFrame() :
+ * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ *
+ * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
+ * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
+ * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
+ * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, if the source size is not representable
+ * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, unsigned magicVariant);
+
+
+/* *************************************
+* Memory management
+***************************************/
+
+/*! ZSTD_estimate*() :
+ * These functions make it possible to estimate memory usage
+ * of a future {D,C}Ctx, before its creation.
+ *
+ * ZSTD_estimateCCtxSize() will provide a memory budget large enough
+ * for any compression level up to selected one.
+ * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
+ * does not include space for a window buffer.
+ * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
+ * The estimate will assume the input may be arbitrarily large,
+ * which is the worst case.
+ *
+ * When srcSize can be bound by a known and rather "small" value,
+ * this fact can be used to provide a tighter estimation
+ * because the CCtx compression context will need less memory.
+ * This tighter estimation can be provided by more advanced functions
+ * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
+ * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
+ * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
+ *
+ * Note 2 : only single-threaded compression is supported.
+ * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
+ */
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
+
+/*! ZSTD_estimateCStreamSize() :
+ * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
+ * It will also consider src size to be arbitrarily "large", which is worst case.
+ * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * Note : CStream size estimation is only correct for single-threaded compression.
+ * ZSTD_DStream memory budget depends on window Size.
+ * This information can be passed manually, using ZSTD_estimateDStreamSize,
+ * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ * Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
+ * an internal ?Dict will be created, which additional size is not estimated here.
+ * In this case, get total size by adding ZSTD_estimate?DictSize */
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+
+/*! ZSTD_estimate?DictSize() :
+ * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
+ * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
+ * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
+ */
+ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
+
+/*! ZSTD_initStatic*() :
+ * Initialize an object using a pre-allocated fixed-size buffer.
+ * workspace: The memory area to emplace the object into.
+ * Provided pointer *must be 8-bytes aligned*.
+ * Buffer must outlive object.
+ * workspaceSize: Use ZSTD_estimate*Size() to determine
+ * how large workspace must be to support target scenario.
+ * @return : pointer to object (same address as workspace, just different type),
+ * or NULL if error (size too small, incorrect alignment, etc.)
+ * Note : zstd will never resize nor malloc() when using a static buffer.
+ * If the object requires more memory than available,
+ * zstd will just error out (typically ZSTD_error_memory_allocation).
+ * Note 2 : there is no corresponding "free" function.
+ * Since workspace is allocated externally, it must be freed externally too.
+ * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
+ * into its associated cParams.
+ * Limitation 1 : currently not compatible with internal dictionary creation, triggered by
+ * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
+ * Limitation 2 : static cctx currently not compatible with multi-threading.
+ * Limitation 3 : static dctx is incompatible with legacy support.
+ */
+ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticCCtx() */
+
+ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticDCtx() */
+
+ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams);
+
+ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType);
+
+
+/*! Custom memory allocation :
+ * These prototypes make it possible to pass your own allocation/free functions.
+ * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
+ * All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
+ */
+typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
+typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
+static
+__attribute__((__unused__))
+ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /*< this constant defers to stdlib's functions */
+
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem);
+
+/* ! Thread pool :
+ * These prototypes make it possible to share a thread pool among multiple compression contexts.
+ * This can limit resources for applications with multiple threads where each one uses
+ * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
+ * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
+ * Note that the lifetime of such pool must exist while being used.
+ * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
+ * to use an internal thread pool).
+ * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
+ */
+typedef struct POOL_ctx_s ZSTD_threadPool;
+ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
+ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
+ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
+
+
+/*
+ * This API is temporary and is expected to change or disappear in the future!
+ */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem);
+
+
+/* *************************************
+* Advanced compression functions
+***************************************/
+
+/*! ZSTD_createCDict_byReference() :
+ * Create a digested dictionary for compression
+ * Dictionary content is just referenced, not duplicated.
+ * As a consequence, `dictBuffer` **must** outlive CDict,
+ * and its content must remain unmodified throughout the lifetime of CDict.
+ * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
+
+/*! ZSTD_getDictID_fromCDict() :
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
+
+/*! ZSTD_getCParams() :
+ * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+ * `estimatedSrcSize` value is optional, select 0 if not known */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_getParams() :
+ * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+ * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_checkCParams() :
+ * Ensure param values remain within authorized range.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+
+/*! ZSTD_adjustCParams() :
+ * optimize params for a given `srcSize` and `dictSize`.
+ * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
+ * `dictSize` must be `0` when there is no dictionary.
+ * cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
+ * This function never fails (wide contract) */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+
+/*! ZSTD_compress_advanced() :
+ * Note : this function is now DEPRECATED.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
+ * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
+ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params);
+
+/*! ZSTD_compress_usingCDict_advanced() :
+ * Note : this function is now REDUNDANT.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
+ * This prototype will be marked as deprecated and generate compilation warning in some future version */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams);
+
+
+/*! ZSTD_CCtx_loadDictionary_byReference() :
+ * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
+ * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_loadDictionary_advanced() :
+ * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_CCtx_refPrefix_advanced() :
+ * Same as ZSTD_CCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/* === experimental parameters === */
+/* these parameters can be used with ZSTD_setParameter()
+ * they are not guaranteed to remain supported in the future */
+
+ /* Enables rsyncable mode,
+ * which makes compressed files more rsync friendly
+ * by adding periodic synchronization points to the compressed data.
+ * The target average block size is ZSTD_c_jobSize / 2.
+ * It's possible to modify the job size to increase or decrease
+ * the granularity of the synchronization point.
+ * Once the jobSize is smaller than the window size,
+ * it will result in compression ratio degradation.
+ * NOTE 1: rsyncable mode only works when multithreading is enabled.
+ * NOTE 2: rsyncable performs poorly in combination with long range mode,
+ * since it will decrease the effectiveness of synchronization points,
+ * though mileage may vary.
+ * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
+ * If the selected compression level is already running significantly slower,
+ * the overall speed won't be significantly impacted.
+ */
+ #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
+
+/* Select a compression format.
+ * The value must be of type ZSTD_format_e.
+ * See ZSTD_format_e enum definition for details */
+#define ZSTD_c_format ZSTD_c_experimentalParam2
+
+/* Force back-reference distances to remain < windowSize,
+ * even when referencing into Dictionary content (default:0) */
+#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
+
+/* Controls whether the contents of a CDict
+ * are used in place, or copied into the working context.
+ * Accepts values from the ZSTD_dictAttachPref_e enum.
+ * See the comments on that enum for an explanation of the feature. */
+#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
+
+/* Controls how the literals are compressed (default is auto).
+ * The value must be of type ZSTD_literalCompressionMode_e.
+ * See ZSTD_literalCompressionMode_t enum definition for details.
+ */
+#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
+
+/* Tries to fit compressed block size to be around targetCBlockSize.
+ * No target when targetCBlockSize == 0.
+ * There is no guarantee on compressed block size (default:0) */
+#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
+
+/* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size,
+ * but compression ratio may regress significantly if guess considerably underestimates */
+#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
+
+/* Controls whether the new and experimental "dedicated dictionary search
+ * structure" can be used. This feature is still rough around the edges, be
+ * prepared for surprising behavior!
+ *
+ * How to use it:
+ *
+ * When using a CDict, whether to use this feature or not is controlled at
+ * CDict creation, and it must be set in a CCtxParams set passed into that
+ * construction (via ZSTD_createCDict_advanced2()). A compression will then
+ * use the feature or not based on how the CDict was constructed; the value of
+ * this param, set in the CCtx, will have no effect.
+ *
+ * However, when a dictionary buffer is passed into a CCtx, such as via
+ * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control
+ * whether the CDict that is created internally can use the feature or not.
+ *
+ * What it does:
+ *
+ * Normally, the internal data structures of the CDict are analogous to what
+ * would be stored in a CCtx after compressing the contents of a dictionary.
+ * To an approximation, a compression using a dictionary can then use those
+ * data structures to simply continue what is effectively a streaming
+ * compression where the simulated compression of the dictionary left off.
+ * Which is to say, the search structures in the CDict are normally the same
+ * format as in the CCtx.
+ *
+ * It is possible to do better, since the CDict is not like a CCtx: the search
+ * structures are written once during CDict creation, and then are only read
+ * after that, while the search structures in the CCtx are both read and
+ * written as the compression goes along. This means we can choose a search
+ * structure for the dictionary that is read-optimized.
+ *
+ * This feature enables the use of that different structure.
+ *
+ * Note that some of the members of the ZSTD_compressionParameters struct have
+ * different semantics and constraints in the dedicated search structure. It is
+ * highly recommended that you simply set a compression level in the CCtxParams
+ * you pass into the CDict creation call, and avoid messing with the cParams
+ * directly.
+ *
+ * Effects:
+ *
+ * This will only have any effect when the selected ZSTD_strategy
+ * implementation supports this feature. Currently, that's limited to
+ * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2.
+ *
+ * Note that this means that the CDict tables can no longer be copied into the
+ * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
+ * useable. The dictionary can only be attached or reloaded.
+ *
+ * In general, you should expect compression to be faster--sometimes very much
+ * so--and CDict creation to be slightly slower. Eventually, we will probably
+ * make this mode the default.
+ */
+#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8
+
+/* ZSTD_c_stableInBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
+ * between calls, except for the modifications that zstd makes to pos (the
+ * caller must not modify pos). This is checked by the compressor, and
+ * compression will fail if it ever changes. This means the only flush
+ * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
+ * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
+ * MUST not be modified during compression or you will get data corruption.
+ *
+ * When this flag is enabled zstd won't allocate an input window buffer,
+ * because the user guarantees it can reference the ZSTD_inBuffer until
+ * the frame is complete. But, it will still allocate an output buffer
+ * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
+ * avoid the memcpy() from the input buffer to the input window buffer.
+ *
+ * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
+ * That means this flag cannot be used with ZSTD_compressStream().
+ *
+ * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
+ * this flag is ALWAYS memory safe, and will never access out-of-bounds
+ * memory. However, compression WILL fail if you violate the preconditions.
+ *
+ * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
+ * not be modified during compression or you will get data corruption. This
+ * is because zstd needs to reference data in the ZSTD_inBuffer to find
+ * matches. Normally zstd maintains its own window buffer for this purpose,
+ * but passing this flag tells zstd to use the user provided buffer.
+ */
+#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
+
+/* ZSTD_c_stableOutBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells he compressor that the ZSTD_outBuffer will not be resized between
+ * calls. Specifically: (out.size - out.pos) will never grow. This gives the
+ * compressor the freedom to say: If the compressed data doesn't fit in the
+ * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to
+ * always decompress directly into the output buffer, instead of decompressing
+ * into an internal buffer and copying to the output buffer.
+ *
+ * When this flag is enabled zstd won't allocate an output buffer, because
+ * it can write directly to the ZSTD_outBuffer. It will still allocate the
+ * input window buffer (see ZSTD_c_stableInBuffer).
+ *
+ * Zstd will check that (out.size - out.pos) never grows and return an error
+ * if it does. While not strictly necessary, this should prevent surprises.
+ */
+#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10
+
+/* ZSTD_c_blockDelimiters
+ * Default is 0 == ZSTD_sf_noBlockDelimiters.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ *
+ * Designates whether or not the given array of ZSTD_Sequence contains block delimiters
+ * and last literals, which are defined as sequences with offset == 0 and matchLength == 0.
+ * See the definition of ZSTD_Sequence for more specifics.
+ */
+#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11
+
+/* ZSTD_c_validateSequences
+ * Default is 0 == disabled. Set to 1 to enable sequence validation.
+ *
+ * For use with sequence compression API: ZSTD_compressSequences().
+ * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
+ * during function execution.
+ *
+ * Without validation, providing a sequence that does not conform to the zstd spec will cause
+ * undefined behavior, and may produce a corrupted block.
+ *
+ * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) then the function will bail out and
+ * return an error.
+ *
+ */
+#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
+
+/*! ZSTD_CCtx_getParameter() :
+ * Get the requested compression parameter value, selected by enum ZSTD_cParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
+
+
+/*! ZSTD_CCtx_params :
+ * Quick howto :
+ * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
+ * an existing ZSTD_CCtx_params structure.
+ * This is similar to
+ * ZSTD_CCtx_setParameter().
+ * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ * an existing CCtx.
+ * These parameters will be applied to
+ * all subsequent frames.
+ * - ZSTD_compressStream2() : Do compression using the CCtx.
+ * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.
+ *
+ * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ * for static allocation of CCtx for single-threaded compression.
+ */
+ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
+
+/*! ZSTD_CCtxParams_reset() :
+ * Reset params to default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_init() :
+ * Initializes the compression parameters of cctxParams according to
+ * compression level. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+
+/*! ZSTD_CCtxParams_init_advanced() :
+ * Initializes the compression and frame parameters of cctxParams according to
+ * params. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+
+/*! ZSTD_CCtxParams_setParameter() :
+ * Similar to ZSTD_CCtx_setParameter.
+ * Set one compression parameter, selected by enum ZSTD_cParameter.
+ * Parameters must be applied to a ZSTD_CCtx using
+ * ZSTD_CCtx_setParametersUsingCCtxParams().
+ * @result : a code representing success or failure (which can be tested with
+ * ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtxParams_getParameter() :
+ * Similar to ZSTD_CCtx_getParameter.
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+
+/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * Apply a set of ZSTD_CCtx_params to the compression context.
+ * This can be done even after compression is started,
+ * if nbWorkers==0, this will have no impact until a new compression is started.
+ * if nbWorkers>=1, new parameters will be picked up at next job,
+ * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+
+/*! ZSTD_compressStream2_simpleArgs() :
+ * Same as ZSTD_compressStream2(),
+ * but using only integral types as arguments.
+ * This variant might be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp);
+
+
+/* *************************************
+* Advanced decompression functions
+***************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * Dictionary content is referenced, and therefore stays in dictBuffer.
+ * It is important that dictBuffer outlives DDict,
+ * it must remain read accessible throughout the lifetime of DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_byReference() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but references `dict` content instead of copying it into `dctx`.
+ * This saves memory if `dict` remains around.,
+ * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_loadDictionary_advanced() :
+ * Same as ZSTD_DCtx_loadDictionary(),
+ * but gives direct control over
+ * how to load the dictionary (by copy ? by reference ?)
+ * and how to interpret it (automatic ? force raw mode ? full mode only ?). */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_refPrefix_advanced() :
+ * Same as ZSTD_DCtx_refPrefix(), but gives finer control over
+ * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_DCtx_setMaxWindowSize() :
+ * Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ * This protects a decoder context from reserving too much memory for itself (potential attack scenario).
+ * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+
+/*! ZSTD_DCtx_getParameter() :
+ * Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
+ * and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
+
+/* ZSTD_d_format
+ * experimental parameter,
+ * allowing selection between ZSTD_format_e input compression formats
+ */
+#define ZSTD_d_format ZSTD_d_experimentalParam1
+/* ZSTD_d_stableOutBuffer
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable.
+ *
+ * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same
+ * between calls, except for the modifications that zstd makes to pos (the
+ * caller must not modify pos). This is checked by the decompressor, and
+ * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer
+ * MUST be large enough to fit the entire decompressed frame. This will be
+ * checked when the frame content size is known. The data in the ZSTD_outBuffer
+ * in the range [dst, dst + pos) MUST not be modified during decompression
+ * or you will get data corruption.
+ *
+ * When this flags is enabled zstd won't allocate an output buffer, because
+ * it can write directly to the ZSTD_outBuffer, but it will still allocate
+ * an input buffer large enough to fit any compressed block. This will also
+ * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
+ * If you need to avoid the input buffer allocation use the buffer-less
+ * streaming API.
+ *
+ * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using
+ * this flag is ALWAYS memory safe, and will never access out-of-bounds
+ * memory. However, decompression WILL fail if you violate the preconditions.
+ *
+ * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST
+ * not be modified during decompression or you will get data corruption. This
+ * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate
+ * matches. Normally zstd maintains its own buffer for this purpose, but passing
+ * this flag tells zstd to use the user provided buffer.
+ */
+#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
+
+/* ZSTD_d_forceIgnoreChecksum
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * Tells the decompressor to skip checksum validation during decompression, regardless
+ * of whether checksumming was specified during compression. This offers some
+ * slight performance benefits, and may be useful for debugging.
+ * Param has values of type ZSTD_forceIgnoreChecksum_e
+ */
+#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
+
+/* ZSTD_d_refMultipleDDicts
+ * Experimental parameter.
+ * Default is 0 == disabled. Set to 1 to enable
+ *
+ * If enabled and dctx is allocated on the heap, then additional memory will be allocated
+ * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
+ * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
+ * store all references. At decompression time, the appropriate dictID is selected
+ * from the set of DDicts based on the dictID in the frame.
+ *
+ * Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
+ *
+ * Param has values of byte ZSTD_refMultipleDDicts_e
+ *
+ * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
+ * allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
+ * Memory is allocated as per ZSTD_DCtx::customMem.
+ *
+ * Although this function allocates memory for the table, the user is still responsible for
+ * memory management of the underlying ZSTD_DDict* themselves.
+ */
+#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
+
+
+/*! ZSTD_DCtx_setFormat() :
+ * Instruct the decoder context about what kind of data to decode next.
+ * This instruction is mandatory to decode data without a fully-formed header,
+ * such ZSTD_f_zstd1_magicless for example.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+
+/*! ZSTD_decompressStream_simpleArgs() :
+ * Same as ZSTD_decompressStream(),
+ * but using only integral types as arguments.
+ * This can be helpful for binders from dynamic languages
+ * which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos);
+
+
+/* ******************************************************************
+* Advanced streaming functions
+* Warning : most of these functions are now redundant with the Advanced API.
+* Once Advanced API reaches "stable" status,
+* redundant functions will be deprecated, and then at some point removed.
+********************************************************************/
+
+/*===== Advanced Streaming compression functions =====*/
+
+/*! ZSTD_initCStream_srcSize() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * pledgedSrcSize must be correct. If it is not known at init time, use
+ * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
+ * "0" also disables frame content size field. It may be enabled in the future.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
+ int compressionLevel,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_initCStream_usingDict() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * Creates of an internal CDict (incompatible with static CCtx), except if
+ * dict == NULL or dictSize < 8, in which case no dict is used.
+ * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
+ * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_initCStream_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * // Pseudocode: Set each zstd parameter and leave the rest as-is.
+ * for ((param, value) : params) {
+ * ZSTD_CCtx_setParameter(zcs, param, value);
+ * }
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
+ * pledgedSrcSize must be correct.
+ * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_initCStream_usingCDict() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * note : cdict will just be referenced, and must outlive compression session
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+
+/*! ZSTD_initCStream_usingCDict_advanced() :
+ * This function is DEPRECATED, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
+ * for ((fParam, value) : fParams) {
+ * ZSTD_CCtx_setParameter(zcs, fParam, value);
+ * }
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t
+ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize);
+
+/*! ZSTD_resetCStream() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * start a new frame, using same parameters from previous frame.
+ * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
+ * Note that zcs must be init at least once before using ZSTD_resetCStream().
+ * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
+ * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
+ * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
+ * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError())
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
+
+
+typedef struct {
+ unsigned long long ingested; /* nb input bytes read and buffered */
+ unsigned long long consumed; /* nb input bytes actually compressed */
+ unsigned long long produced; /* nb of compressed bytes generated and buffered */
+ unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
+ unsigned currentJobID; /* MT only : latest started job nb */
+ unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */
+} ZSTD_frameProgression;
+
+/* ZSTD_getFrameProgression() :
+ * tells how much data has been ingested (read from input)
+ * consumed (input actually compressed) and produced (output) for current frame.
+ * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
+ * Aggregates progression inside active worker threads.
+ */
+ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+
+/*! ZSTD_toFlushNow() :
+ * Tell how many bytes are ready to be flushed immediately.
+ * Useful for multithreading scenarios (nbWorkers >= 1).
+ * Probe the oldest active job, defined as oldest job not yet entirely flushed,
+ * and check its output buffer.
+ * @return : amount of data stored in oldest job and ready to be flushed immediately.
+ * if @return == 0, it means either :
+ * + there is no active job (could be checked with ZSTD_frameProgression()), or
+ * + oldest job is still actively compressing data,
+ * but everything it has produced has also been flushed so far,
+ * therefore flush speed is limited by production speed of oldest job
+ * irrespective of the speed of concurrent (and newer) jobs.
+ */
+ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
+
+
+/*===== Advanced Streaming decompression functions =====*/
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
+ *
+ * note: no dictionary will be used if dict == NULL or dictSize < 8
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, ddict);
+ *
+ * note : ddict is referenced, it must outlive decompression session
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+
+/*!
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *
+ * re-use decompression parameters from previous init; saves dictionary loading
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+
+
+/* *******************************************************************
+* Buffer-less and synchronous inner streaming functions
+*
+* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
+* But it's also a complex one, with several restrictions, documented below.
+* Prefer normal streaming API for an easier experience.
+********************************************************************* */
+
+/*
+ Buffer-less streaming compression (synchronous mode)
+
+ A ZSTD_CCtx object is required to track streaming operations.
+ Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
+ ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+
+ Start by initializing a context.
+ Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
+ or ZSTD_compressBegin_advanced(), for finer parameter control.
+ It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
+
+ Then, consume your input using ZSTD_compressContinue().
+ There are some important considerations to keep in mind when using this advanced function :
+ - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+ - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
+ - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
+ Worst case evaluation is provided by ZSTD_compressBound().
+ ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
+ - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
+ It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
+ - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
+ In which case, it will "discard" the relevant memory section from its history.
+
+ Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
+ It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
+
+ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+*/
+
+/*===== Buffer-less streaming compression functions =====*/
+ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*< note: fails if cdict==NULL */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*
+ Buffer-less streaming decompression (synchronous mode)
+
+ A ZSTD_DCtx object is required to track streaming operations.
+ Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+ A ZSTD_DCtx object can be re-used multiple times.
+
+ First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
+ Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
+ Data fragment must be large enough to ensure successful decoding.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+ >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+ errorCode, which can be tested using ZSTD_isError().
+
+ It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+ Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+ As a consequence, check that values remain within valid application range.
+ For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+ Each application can set its own limits, depending on local restrictions.
+ For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+ ZSTD_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+ There are multiple ways to guarantee this condition.
+
+ The most memory efficient way is to use a round buffer of sufficient size.
+ Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+ which can @return an error code if required value is too large for current system (in 32-bits mode).
+ In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+ up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+ which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+ At which point, decoding can resume from the beginning of the buffer.
+ Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+ There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+ Finally, if you control the compression process, you can also ignore all buffer size rules,
+ as long as the encoder and decoder progress in "lock-step",
+ aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+ Once buffers are setup, start decompression, with ZSTD_decompressBegin().
+ If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
+
+ Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+ ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
+
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
+ It can also be an error code, which can be tested with ZSTD_isError().
+
+ A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+ Context can then be reset to start a new decompression.
+
+ Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
+ This information is not required to properly decode a frame.
+
+ == Special case : skippable frames ==
+
+ Skippable frames allow integration of user-defined data into a flow of concatenated frames.
+ Skippable frames will be ignored (skipped) by decompressor.
+ The format of skippable frames is as follows :
+ a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+ b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+ c) Frame Content - any content (User Data) of length equal to Frame Size
+ For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+ For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
+*/
+
+/*===== Buffer-less streaming decompression functions =====*/
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+typedef struct {
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
+ ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ unsigned headerSize;
+ unsigned dictID;
+ unsigned checksumFlag;
+} ZSTD_frameHeader;
+
+/*! ZSTD_getFrameHeader() :
+ * decode Frame Header, or requires larger `srcSize`.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /*< doesn't consume input */
+/*! ZSTD_getFrameHeader_advanced() :
+ * same as ZSTD_getFrameHeader(),
+ * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* misc */
+ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
+ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+
+
+
+
+/* ============================ */
+/* Block level API */
+/* ============================ */
+
+/*!
+ Block functions produce and decode raw zstd blocks, without frame metadata.
+ Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+ But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
+
+ A few rules to respect :
+ - Compressing and decompressing require a context structure
+ + Use ZSTD_createCCtx() and ZSTD_createDCtx()
+ - It is necessary to init context before starting
+ + compression : any ZSTD_compressBegin*() variant, including with dictionary
+ + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ + copyCCtx() and copyDCtx() can be used too
+ - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ + If input is larger than a block size, it's necessary to split input data into multiple blocks
+ + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
+ Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
+ - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
+ ===> In which case, nothing is produced into `dst` !
+ + User __must__ test for such outcome and deal directly with uncompressed data
+ + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
+ Doing so would mess up with statistics history, leading to potential data corruption.
+ + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ + In case of multiple successive blocks, should some of them be uncompressed,
+ decoder must be informed of their existence in order to follow proper history.
+ Use ZSTD_insertBlock() for such a case.
+*/
+
+/*===== Raw zstd block functions =====*/
+ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
+
+
+#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
+
diff --git a/include/net/llc.h b/include/net/llc.h
index fd1f9a3fd8dda4..e250dca03963bf 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -72,7 +72,9 @@ struct llc_sap {
static inline
struct hlist_head *llc_sk_dev_hash(struct llc_sap *sap, int ifindex)
{
- return &sap->sk_dev_hash[ifindex % LLC_SK_DEV_HASH_ENTRIES];
+ u32 bucket = hash_32(ifindex, LLC_SK_DEV_HASH_BITS);
+
+ return &sap->sk_dev_hash[bucket];
}
static inline
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 651bba654d77db..899c29c326ba13 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1355,16 +1355,6 @@ struct sctp_endpoint {
reconf_enable:1;
__u8 strreset_enable;
-
- /* Security identifiers from incoming (INIT). These are set by
- * security_sctp_assoc_request(). These will only be used by
- * SCTP TCP type sockets and peeled off connections as they
- * cause a new socket to be generated. security_sctp_sk_clone()
- * will then plug these into the new socket.
- */
-
- u32 secid;
- u32 peer_secid;
};
/* Recover the outter endpoint structure. */
@@ -2104,6 +2094,16 @@ struct sctp_association {
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+ /* Security identifiers from incoming (INIT). These are set by
+ * security_sctp_assoc_request(). These will only be used by
+ * SCTP TCP type sockets and peeled off connections as they
+ * cause a new socket to be generated. security_sctp_sk_clone()
+ * will then plug these into the new socket.
+ */
+
+ u32 secid;
+ u32 peer_secid;
+
struct rcu_head rcu;
};
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 1d20b98493a103..732b7097d78e41 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -54,10 +54,28 @@ struct strp_msg {
int offset;
};
+struct _strp_msg {
+ /* Internal cb structure. struct strp_msg must be first for passing
+ * to upper layer.
+ */
+ struct strp_msg strp;
+ int accum_len;
+};
+
+struct sk_skb_cb {
+#define SK_SKB_CB_PRIV_LEN 20
+ unsigned char data[SK_SKB_CB_PRIV_LEN];
+ struct _strp_msg strp;
+ /* temp_reg is a temporary register used for bpf_convert_data_end_access
+ * when dst_reg == src_reg.
+ */
+ u64 temp_reg;
+};
+
static inline struct strp_msg *strp_msg(struct sk_buff *skb)
{
return (struct strp_msg *)((void *)skb->cb +
- offsetof(struct qdisc_skb_cb, data));
+ offsetof(struct sk_skb_cb, strp));
}
/* Structure for an attached lower socket */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70972f3ac8fa39..4da22b41bde688 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -293,7 +293,10 @@ static inline bool tcp_out_of_memory(struct sock *sk)
static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sk_wmem_queued_add(sk, -skb->truesize);
- sk_mem_uncharge(sk, skb->truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_uncharge(sk, skb->truesize);
+ else
+ sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
__kfree_skb(skb);
}
@@ -974,7 +977,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
return likely(tcp_skb_can_collapse_to(to) &&
- mptcp_skb_can_collapse(to, from));
+ mptcp_skb_can_collapse(to, from) &&
+ skb_pure_zcopy_same(to, from));
}
/* Events passed to congestion control interface */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index cd9eaa83432bf0..477a800a954338 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -73,7 +73,7 @@ enum scsi_cmnd_submitter {
struct scsi_cmnd {
struct scsi_request req;
struct scsi_device *device;
- struct list_head eh_entry; /* entry for the host eh_cmd_q */
+ struct list_head eh_entry; /* entry for the host eh_abort_list/eh_cmd_q */
struct delayed_work abort_work;
struct rcu_head rcu;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 83a7890f1479f8..d1c6fc83b1e386 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -226,12 +226,6 @@ struct scsi_device {
struct device sdev_gendev,
sdev_dev;
- /*
- * The array size 6 provides space for one attribute group for the
- * SCSI core, four attribute groups defined by SCSI LLDs and one
- * terminating NULL pointer.
- */
- const struct attribute_group *gendev_attr_groups[6];
struct execute_work ew; /* used to get process context on put */
struct work_struct requeue_work;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index ae715959f886ab..ebe059badba0e5 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -551,6 +551,7 @@ struct Scsi_Host {
struct mutex scan_mutex;/* serialize scanning activity */
+ struct list_head eh_abort_list;
struct list_head eh_cmd_q;
struct task_struct * ehandler; /* Error recovery thread. */
struct completion * eh_action; /* Wait for specific actions on the
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 653dfffb3ac845..1051b84e85798c 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -36,6 +36,13 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
+#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
+#else
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
+#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
#else
@@ -44,13 +51,6 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
-#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
-#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
-#else
-#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
-#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
-#endif
/*
* info for buffer allocation
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index bca73e8c8cdec8..499f5fabd20f7e 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -1016,31 +1016,32 @@ TRACE_EVENT(afs_dir_check_failed,
__entry->vnode, __entry->off, __entry->i_size)
);
-TRACE_EVENT(afs_page_dirty,
- TP_PROTO(struct afs_vnode *vnode, const char *where, struct page *page),
+TRACE_EVENT(afs_folio_dirty,
+ TP_PROTO(struct afs_vnode *vnode, const char *where, struct folio *folio),
- TP_ARGS(vnode, where, page),
+ TP_ARGS(vnode, where, folio),
TP_STRUCT__entry(
__field(struct afs_vnode *, vnode )
__field(const char *, where )
- __field(pgoff_t, page )
+ __field(pgoff_t, index )
__field(unsigned long, from )
__field(unsigned long, to )
),
TP_fast_assign(
+ unsigned long priv = (unsigned long)folio_get_private(folio);
__entry->vnode = vnode;
__entry->where = where;
- __entry->page = page->index;
- __entry->from = afs_page_dirty_from(page, page->private);
- __entry->to = afs_page_dirty_to(page, page->private);
- __entry->to |= (afs_is_page_dirty_mmapped(page->private) ?
- (1UL << (BITS_PER_LONG - 1)) : 0);
+ __entry->index = folio_index(folio);
+ __entry->from = afs_folio_dirty_from(folio, priv);
+ __entry->to = afs_folio_dirty_to(folio, priv);
+ __entry->to |= (afs_is_folio_dirty_mmapped(priv) ?
+ (1UL << (BITS_PER_LONG - 1)) : 0);
),
TP_printk("vn=%p %lx %s %lx-%lx%s",
- __entry->vnode, __entry->page, __entry->where,
+ __entry->vnode, __entry->index, __entry->where,
__entry->from,
__entry->to & ~(1UL << (BITS_PER_LONG - 1)),
__entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 4e881d91c8744d..f8cb916f359588 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -570,9 +570,10 @@ TRACE_EVENT(f2fs_file_write_iter,
);
TRACE_EVENT(f2fs_map_blocks,
- TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
+ TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map,
+ int create, int flag, int ret),
- TP_ARGS(inode, map, ret),
+ TP_ARGS(inode, map, create, flag, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -583,11 +584,14 @@ TRACE_EVENT(f2fs_map_blocks,
__field(unsigned int, m_flags)
__field(int, m_seg_type)
__field(bool, m_may_create)
+ __field(bool, m_multidev_dio)
+ __field(int, create)
+ __field(int, flag)
__field(int, ret)
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev = map->m_bdev->bd_dev;
__entry->ino = inode->i_ino;
__entry->m_lblk = map->m_lblk;
__entry->m_pblk = map->m_pblk;
@@ -595,12 +599,16 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_flags = map->m_flags;
__entry->m_seg_type = map->m_seg_type;
__entry->m_may_create = map->m_may_create;
+ __entry->m_multidev_dio = map->m_multidev_dio;
+ __entry->create = create;
+ __entry->flag = flag;
__entry->ret = ret;
),
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
- "start blkaddr = 0x%llx, len = 0x%llx, flags = %u,"
- "seg_type = %d, may_create = %d, err = %d",
+ "start blkaddr = 0x%llx, len = 0x%llx, flags = %u, "
+ "seg_type = %d, may_create = %d, multidevice = %d, "
+ "create = %d, flag = %d, err = %d",
show_dev_ino(__entry),
(unsigned long long)__entry->m_lblk,
(unsigned long long)__entry->m_pblk,
@@ -608,6 +616,9 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_flags,
__entry->m_seg_type,
__entry->m_may_create,
+ __entry->m_multidev_dio,
+ __entry->create,
+ __entry->flag,
__entry->ret)
);
@@ -807,20 +818,20 @@ TRACE_EVENT(f2fs_lookup_start,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(const char *, name)
+ __string(name, dentry->d_name.name)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->flags = flags;
),
TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u",
show_dev_ino(__entry),
- __entry->name,
+ __get_str(name),
__entry->flags)
);
@@ -834,7 +845,7 @@ TRACE_EVENT(f2fs_lookup_end,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(const char *, name)
+ __string(name, dentry->d_name.name)
__field(nid_t, cino)
__field(int, err)
),
@@ -842,14 +853,14 @@ TRACE_EVENT(f2fs_lookup_end,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->cino = ino;
__entry->err = err;
),
TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d",
show_dev_ino(__entry),
- __entry->name,
+ __get_str(name),
__entry->cino,
__entry->err)
);
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index ca5fbb59fa4212..999777d32dcfdc 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -411,7 +411,9 @@ enum {
ETHTOOL_A_PAUSE_STAT_TX_FRAMES,
ETHTOOL_A_PAUSE_STAT_RX_FRAMES,
- /* add new constants above here */
+ /* add new constants above here
+ * adjust ETHTOOL_PAUSE_STAT_CNT if adding non-stats!
+ */
__ETHTOOL_A_PAUSE_STAT_CNT,
ETHTOOL_A_PAUSE_STAT_MAX = (__ETHTOOL_A_PAUSE_STAT_CNT - 1)
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 78f0719cc2a3a2..1daa45268de266 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1130,6 +1130,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_BINARY_STATS_FD 203
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
#define KVM_CAP_ARM_MTE 205
+#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h
index f950bff75e97ef..03e5b776e59764 100644
--- a/include/uapi/linux/rtc.h
+++ b/include/uapi/linux/rtc.h
@@ -14,6 +14,7 @@
#include <linux/const.h>
#include <linux/ioctl.h>
+#include <linux/types.h>
/*
* The struct used to pass data via the following ioctl. Similar to the
@@ -66,6 +67,17 @@ struct rtc_pll_info {
long pll_clock; /* base PLL frequency */
};
+struct rtc_param {
+ __u64 param;
+ union {
+ __u64 uvalue;
+ __s64 svalue;
+ __u64 ptr;
+ };
+ __u32 index;
+ __u32 __pad;
+};
+
/*
* ioctl calls that are permitted to the /dev/rtc interface, if
* any of the RTC drivers are enabled.
@@ -95,6 +107,9 @@ struct rtc_pll_info {
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
+#define RTC_PARAM_GET _IOW('p', 0x13, struct rtc_param) /* Get parameter */
+#define RTC_PARAM_SET _IOW('p', 0x14, struct rtc_param) /* Set parameter */
+
#define RTC_VL_DATA_INVALID _BITUL(0) /* Voltage too low, RTC data is invalid */
#define RTC_VL_BACKUP_LOW _BITUL(1) /* Backup voltage is low */
#define RTC_VL_BACKUP_EMPTY _BITUL(2) /* Backup empty or not present */
@@ -114,7 +129,21 @@ struct rtc_pll_info {
#define RTC_FEATURE_ALARM 0
#define RTC_FEATURE_ALARM_RES_MINUTE 1
#define RTC_FEATURE_NEED_WEEK_DAY 2
-#define RTC_FEATURE_CNT 3
+#define RTC_FEATURE_ALARM_RES_2S 3
+#define RTC_FEATURE_UPDATE_INTERRUPT 4
+#define RTC_FEATURE_CORRECTION 5
+#define RTC_FEATURE_BACKUP_SWITCH_MODE 6
+#define RTC_FEATURE_CNT 7
+
+/* parameter list */
+#define RTC_PARAM_FEATURES 0
+#define RTC_PARAM_CORRECTION 1
+#define RTC_PARAM_BACKUP_SWITCH_MODE 2
+
+#define RTC_BSM_DISABLED 0
+#define RTC_BSM_DIRECT 1
+#define RTC_BSM_LEVEL 2
+#define RTC_BSM_STANDBY 3
#define RTC_MAX_FREQ 8192
diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h
index 70e01c687d5eb8..e9122f1d0e0cb2 100644
--- a/include/uapi/linux/virtio_mem.h
+++ b/include/uapi/linux/virtio_mem.h
@@ -68,9 +68,10 @@
* explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
*
* There are no guarantees what will happen if unplugged memory is
- * read/written. Such memory should, in general, not be touched. E.g.,
- * even writing might succeed, but the values will simply be discarded at
- * random points in time.
+ * read/written. In general, unplugged memory should not be touched, because
+ * the resulting action is undefined. There is one exception: without
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, unplugged memory inside the usable
+ * region can be read, to simplify creation of memory dumps.
*
* It can happen that the device cannot process a request, because it is
* busy. The device driver has to retry later.
@@ -87,6 +88,8 @@
/* node_id is an ACPI PXM and is valid */
#define VIRTIO_MEM_F_ACPI_PXM 0
+/* unplugged memory must not be accessed */
+#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1
/* --- virtio-mem: guest -> host requests --- */
diff --git a/init/Kconfig b/init/Kconfig
index 21b1f4870c807f..45bcaa8e7481d7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1896,6 +1896,7 @@ choice
config SLAB
bool "SLAB"
+ depends on !PREEMPT_RT
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
@@ -1916,6 +1917,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
+ depends on !PREEMPT_RT
help
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
diff --git a/init/Makefile b/init/Makefile
index 2846113677ee62..04eeee12c076d1 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -30,7 +30,7 @@ $(obj)/version.o: include/generated/compile.h
quiet_cmd_compile.h = CHK $@
cmd_compile.h = \
$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" \
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT_BUILD)" \
"$(CONFIG_PREEMPT_RT)" $(CONFIG_CC_VERSION_TEXT) "$(LD)"
include/generated/compile.h: FORCE
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 60f1bfc3c7b246..ce77f02656603d 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -1,12 +1,23 @@
# SPDX-License-Identifier: GPL-2.0-only
+config PREEMPT_NONE_BUILD
+ bool
+
+config PREEMPT_VOLUNTARY_BUILD
+ bool
+
+config PREEMPT_BUILD
+ bool
+ select PREEMPTION
+ select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
+
choice
prompt "Preemption Model"
- default PREEMPT_NONE_BEHAVIOUR
+ default PREEMPT_NONE
-config PREEMPT_NONE_BEHAVIOUR
+config PREEMPT_NONE
bool "No Forced Preemption (Server)"
- select PREEMPT_NONE if !PREEMPT_DYNAMIC
+ select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
throughput. It will still provide good latencies most of the
@@ -18,10 +29,10 @@ config PREEMPT_NONE_BEHAVIOUR
raw processing power of the kernel, irrespective of scheduling
latencies.
-config PREEMPT_VOLUNTARY_BEHAVIOUR
+config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
depends on !ARCH_NO_PREEMPT
- select PREEMPT_VOLUNTARY if !PREEMPT_DYNAMIC
+ select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
help
This option reduces the latency of the kernel by adding more
"explicit preemption points" to the kernel code. These new
@@ -37,10 +48,10 @@ config PREEMPT_VOLUNTARY_BEHAVIOUR
Select this if you are building a kernel for a desktop system.
-config PREEMPT_BEHAVIOUR
+config PREEMPT
bool "Preemptible Kernel (Low-Latency Desktop)"
depends on !ARCH_NO_PREEMPT
- select PREEMPT
+ select PREEMPT_BUILD
help
This option reduces the latency of the kernel by making
all kernel code (that is not executing in a critical section)
@@ -58,7 +69,7 @@ config PREEMPT_BEHAVIOUR
config PREEMPT_RT
bool "Fully Preemptible Kernel (Real-Time)"
- depends on EXPERT && ARCH_SUPPORTS_RT && !PREEMPT_DYNAMIC
+ depends on EXPERT && ARCH_SUPPORTS_RT
select PREEMPTION
help
This option turns the kernel into a real-time kernel by replacing
@@ -75,17 +86,6 @@ config PREEMPT_RT
endchoice
-config PREEMPT_NONE
- bool
-
-config PREEMPT_VOLUNTARY
- bool
-
-config PREEMPT
- bool
- select PREEMPTION
- select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
-
config PREEMPT_COUNT
bool
@@ -95,8 +95,8 @@ config PREEMPTION
config PREEMPT_DYNAMIC
bool "Preemption behaviour defined on boot"
- depends on HAVE_PREEMPT_DYNAMIC
- select PREEMPT
+ depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT
+ select PREEMPT_BUILD
default y
help
This option allows to define the preemption model on the kernel
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 327e3996eadbd9..2405e39d800fe5 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -390,6 +390,13 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
i = end_new;
insn = prog->insnsi + end_old;
}
+ if (bpf_pseudo_func(insn)) {
+ ret = bpf_adj_delta_to_imm(insn, pos, end_old,
+ end_new, i, probe_pass);
+ if (ret)
+ return ret;
+ continue;
+ }
code = insn->code;
if ((BPF_CLASS(code) != BPF_JMP &&
BPF_CLASS(code) != BPF_JMP32) ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f0dca726ebfde5..890b3ec375a394 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -240,12 +240,6 @@ static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
}
-static bool bpf_pseudo_func(const struct bpf_insn *insn)
-{
- return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
- insn->src_reg == BPF_PSEUDO_FUNC;
-}
-
struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
@@ -1960,16 +1954,10 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
return -EPERM;
}
- if (bpf_pseudo_func(insn)) {
- ret = add_subprog(env, i + insn->imm + 1);
- if (ret >= 0)
- /* remember subprog */
- insn[1].imm = ret;
- } else if (bpf_pseudo_call(insn)) {
+ if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
ret = add_subprog(env, i + insn->imm + 1);
- } else {
+ else
ret = add_kfunc_call(env, insn->imm, insn->off);
- }
if (ret < 0)
return ret;
@@ -3088,9 +3076,12 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
reg = &reg_state->stack[spi].spilled_ptr;
if (is_spilled_reg(&reg_state->stack[spi])) {
- if (size != BPF_REG_SIZE) {
- u8 scalar_size = 0;
+ u8 spill_size = 1;
+
+ for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
+ spill_size++;
+ if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
if (reg->type != SCALAR_VALUE) {
verbose_linfo(env, env->insn_idx, "; ");
verbose(env, "invalid size of register fill\n");
@@ -3101,10 +3092,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
if (dst_regno < 0)
return 0;
- for (i = BPF_REG_SIZE; i > 0 && stype[i - 1] == STACK_SPILL; i--)
- scalar_size++;
-
- if (!(off % BPF_REG_SIZE) && size == scalar_size) {
+ if (!(off % BPF_REG_SIZE) && size == spill_size) {
/* The earlier check_reg_arg() has decided the
* subreg_def for this insn. Save it first.
*/
@@ -3128,12 +3116,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
return 0;
}
- for (i = 1; i < BPF_REG_SIZE; i++) {
- if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
- verbose(env, "corrupted spill memory\n");
- return -EACCES;
- }
- }
if (dst_regno >= 0) {
/* restore register state from stack */
@@ -9393,7 +9375,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (insn->src_reg == BPF_PSEUDO_FUNC) {
struct bpf_prog_aux *aux = env->prog->aux;
- u32 subprogno = insn[1].imm;
+ u32 subprogno = find_subprog(env,
+ env->insn_idx + insn->imm + 1);
if (!aux->func_info) {
verbose(env, "missing btf func_info\n");
@@ -12563,14 +12546,9 @@ static int jit_subprogs(struct bpf_verifier_env *env)
return 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
- if (bpf_pseudo_func(insn)) {
- env->insn_aux_data[i].call_imm = insn->imm;
- /* subprog is encoded in insn[1].imm */
+ if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
continue;
- }
- if (!bpf_pseudo_call(insn))
- continue;
/* Upon error here we cannot fall back to interpreter but
* need a hard reject of the program. Thus -EFAULT is
* propagated in any case.
@@ -12591,6 +12569,12 @@ static int jit_subprogs(struct bpf_verifier_env *env)
env->insn_aux_data[i].call_imm = insn->imm;
/* point imm to __bpf_call_base+1 from JITs point of view */
insn->imm = 1;
+ if (bpf_pseudo_func(insn))
+ /* jit (e.g. x86_64) may emit fewer instructions
+ * if it learns a u32 imm is the same as a u64 imm.
+ * Force a non zero here.
+ */
+ insn[1].imm = 1;
}
err = bpf_prog_alloc_jited_linfo(prog);
@@ -12675,7 +12659,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
insn = func[i]->insnsi;
for (j = 0; j < func[i]->len; j++, insn++) {
if (bpf_pseudo_func(insn)) {
- subprog = insn[1].imm;
+ subprog = insn->off;
insn[0].imm = (u32)(long)func[subprog]->bpf_func;
insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
continue;
@@ -12726,7 +12710,8 @@ static int jit_subprogs(struct bpf_verifier_env *env)
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (bpf_pseudo_func(insn)) {
insn[0].imm = env->insn_aux_data[i].call_imm;
- insn[1].imm = find_subprog(env, i + insn[0].imm + 1);
+ insn[1].imm = insn->off;
+ insn->off = 0;
continue;
}
if (!bpf_pseudo_call(insn))
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f2253ea729a273..523106a506eed0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7154,7 +7154,6 @@ void perf_output_sample(struct perf_output_handle *handle,
static u64 perf_virt_to_phys(u64 virt)
{
u64 phys_addr = 0;
- struct page *p = NULL;
if (!virt)
return 0;
@@ -7173,14 +7172,15 @@ static u64 perf_virt_to_phys(u64 virt)
* If failed, leave phys_addr as 0.
*/
if (current->mm != NULL) {
+ struct page *p;
+
pagefault_disable();
- if (get_user_page_fast_only(virt, 0, &p))
+ if (get_user_page_fast_only(virt, 0, &p)) {
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
+ put_page(p);
+ }
pagefault_enable();
}
-
- if (p)
- put_page(p);
}
return phys_addr;
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 76e67d1e02d48b..4b84c8e7884b48 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -202,6 +202,9 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
}
+static __always_inline void
+check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
+
/* Check scoped accesses; never inline because this is a slow-path! */
static noinline void kcsan_check_scoped_accesses(void)
{
@@ -210,14 +213,16 @@ static noinline void kcsan_check_scoped_accesses(void)
struct kcsan_scoped_access *scoped_access;
ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
- list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
- __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
+ list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
+ check_access(scoped_access->ptr, scoped_access->size,
+ scoped_access->type, scoped_access->ip);
+ }
ctx->scoped_accesses.prev = prev_save;
}
/* Rules for generic atomic accesses. Called from fast-path. */
static __always_inline bool
-is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
+is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
{
if (type & KCSAN_ACCESS_ATOMIC)
return true;
@@ -254,7 +259,7 @@ is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx
}
static __always_inline bool
-should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
+should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
{
/*
* Never set up watchpoints when memory operations are atomic.
@@ -263,7 +268,7 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
* should not count towards skipped instructions, and (2) to actually
* decrement kcsan_atomic_next for consecutive instruction stream.
*/
- if (is_atomic(ptr, size, type, ctx))
+ if (is_atomic(ctx, ptr, size, type))
return false;
if (this_cpu_dec_return(kcsan_skip) >= 0)
@@ -350,6 +355,7 @@ void kcsan_restore_irqtrace(struct task_struct *task)
static noinline void kcsan_found_watchpoint(const volatile void *ptr,
size_t size,
int type,
+ unsigned long ip,
atomic_long_t *watchpoint,
long encoded_watchpoint)
{
@@ -396,7 +402,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
if (consumed) {
kcsan_save_irqtrace(current);
- kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints);
+ kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints);
kcsan_restore_irqtrace(current);
} else {
/*
@@ -416,7 +422,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
}
static noinline void
-kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
+kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip)
{
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
@@ -568,8 +574,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
- kcsan_report_known_origin(ptr, size, type, value_change,
- watchpoint - watchpoints,
+ kcsan_report_known_origin(ptr, size, type, ip,
+ value_change, watchpoint - watchpoints,
old, new, access_mask);
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
/* Inferring a race, since the value should not have changed. */
@@ -578,8 +584,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (is_assert)
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
- if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
- kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask);
+ if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) {
+ kcsan_report_unknown_origin(ptr, size, type, ip,
+ old, new, access_mask);
+ }
}
/*
@@ -596,8 +604,8 @@ out:
user_access_restore(ua_flags);
}
-static __always_inline void check_access(const volatile void *ptr, size_t size,
- int type)
+static __always_inline void
+check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
{
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
atomic_long_t *watchpoint;
@@ -625,13 +633,12 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
*/
if (unlikely(watchpoint != NULL))
- kcsan_found_watchpoint(ptr, size, type, watchpoint,
- encoded_watchpoint);
+ kcsan_found_watchpoint(ptr, size, type, ip, watchpoint, encoded_watchpoint);
else {
struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
- if (unlikely(should_watch(ptr, size, type, ctx)))
- kcsan_setup_watchpoint(ptr, size, type);
+ if (unlikely(should_watch(ctx, ptr, size, type)))
+ kcsan_setup_watchpoint(ptr, size, type, ip);
else if (unlikely(ctx->scoped_accesses.prev))
kcsan_check_scoped_accesses();
}
@@ -757,7 +764,7 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
{
struct kcsan_ctx *ctx = get_ctx();
- __kcsan_check_access(ptr, size, type);
+ check_access(ptr, size, type, _RET_IP_);
ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
@@ -765,6 +772,7 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
sa->ptr = ptr;
sa->size = size;
sa->type = type;
+ sa->ip = _RET_IP_;
if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
INIT_LIST_HEAD(&ctx->scoped_accesses);
@@ -796,13 +804,13 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
ctx->disable_count--;
- __kcsan_check_access(sa->ptr, sa->size, sa->type);
+ check_access(sa->ptr, sa->size, sa->type, sa->ip);
}
EXPORT_SYMBOL(kcsan_end_scoped_access);
void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
{
- check_access(ptr, size, type);
+ check_access(ptr, size, type, _RET_IP_);
}
EXPORT_SYMBOL(__kcsan_check_access);
@@ -823,7 +831,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_read##size(void *ptr); \
void __tsan_read##size(void *ptr) \
{ \
- check_access(ptr, size, 0); \
+ check_access(ptr, size, 0, _RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_read##size); \
void __tsan_unaligned_read##size(void *ptr) \
@@ -832,7 +840,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_write##size(void *ptr); \
void __tsan_write##size(void *ptr) \
{ \
- check_access(ptr, size, KCSAN_ACCESS_WRITE); \
+ check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_write##size); \
void __tsan_unaligned_write##size(void *ptr) \
@@ -842,7 +850,8 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_read_write##size(void *ptr) \
{ \
check_access(ptr, size, \
- KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
+ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \
+ _RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_read_write##size); \
void __tsan_unaligned_read_write##size(void *ptr) \
@@ -858,14 +867,14 @@ DEFINE_TSAN_READ_WRITE(16);
void __tsan_read_range(void *ptr, size_t size);
void __tsan_read_range(void *ptr, size_t size)
{
- check_access(ptr, size, 0);
+ check_access(ptr, size, 0, _RET_IP_);
}
EXPORT_SYMBOL(__tsan_read_range);
void __tsan_write_range(void *ptr, size_t size);
void __tsan_write_range(void *ptr, size_t size)
{
- check_access(ptr, size, KCSAN_ACCESS_WRITE);
+ check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_);
}
EXPORT_SYMBOL(__tsan_write_range);
@@ -886,7 +895,8 @@ EXPORT_SYMBOL(__tsan_write_range);
IS_ALIGNED((unsigned long)ptr, size); \
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
return; \
- check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
+ check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \
+ _RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_volatile_read##size); \
void __tsan_unaligned_volatile_read##size(void *ptr) \
@@ -901,7 +911,8 @@ EXPORT_SYMBOL(__tsan_write_range);
return; \
check_access(ptr, size, \
KCSAN_ACCESS_WRITE | \
- (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
+ (is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \
+ _RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_volatile_write##size); \
void __tsan_unaligned_volatile_write##size(void *ptr) \
@@ -955,7 +966,7 @@ EXPORT_SYMBOL(__tsan_init);
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
{ \
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
- check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
+ check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
return __atomic_load_n(ptr, memorder); \
} \
@@ -965,7 +976,7 @@ EXPORT_SYMBOL(__tsan_init);
{ \
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
- KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
+ KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
__atomic_store_n(ptr, v, memorder); \
} \
@@ -978,7 +989,7 @@ EXPORT_SYMBOL(__tsan_init);
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
- KCSAN_ACCESS_ATOMIC); \
+ KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
return __atomic_##op##suffix(ptr, v, memorder); \
} \
@@ -1010,7 +1021,7 @@ EXPORT_SYMBOL(__tsan_init);
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
- KCSAN_ACCESS_ATOMIC); \
+ KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
} \
@@ -1025,7 +1036,7 @@ EXPORT_SYMBOL(__tsan_init);
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
- KCSAN_ACCESS_ATOMIC); \
+ KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
__atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
return exp; \
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index f36e25c497ed80..ae33c2a7f07e3e 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -121,7 +121,7 @@ enum kcsan_value_change {
* to be consumed by the reporting thread. No report is printed yet.
*/
void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
- int watchpoint_idx);
+ unsigned long ip, int watchpoint_idx);
/*
* The calling thread observed that the watchpoint it set up was hit and
@@ -129,14 +129,14 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ
* thread.
*/
void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
- enum kcsan_value_change value_change, int watchpoint_idx,
- u64 old, u64 new, u64 mask);
+ unsigned long ip, enum kcsan_value_change value_change,
+ int watchpoint_idx, u64 old, u64 new, u64 mask);
/*
* No other thread was observed to race with the access, but the data value
* before and after the stall differs. Reports a race of "unknown origin".
*/
void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
- u64 old, u64 new, u64 mask);
+ unsigned long ip, u64 old, u64 new, u64 mask);
#endif /* _KERNEL_KCSAN_KCSAN_H */
diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c
index dc55fd5a36fcce..66072923858806 100644
--- a/kernel/kcsan/kcsan_test.c
+++ b/kernel/kcsan/kcsan_test.c
@@ -29,6 +29,11 @@
#include <linux/types.h>
#include <trace/events/printk.h>
+#define KCSAN_TEST_REQUIRES(test, cond) do { \
+ if (!(cond)) \
+ kunit_skip((test), "Test requires: " #cond); \
+} while (0)
+
#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
#else
@@ -205,10 +210,12 @@ static bool report_matches(const struct expect_report *r)
"read-write" :
"write") :
"read");
+ const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC);
+ const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED);
const char *const access_type_aux =
- (ty & KCSAN_ACCESS_ATOMIC) ?
- " (marked)" :
- ((ty & KCSAN_ACCESS_SCOPED) ? " (scoped)" : "");
+ (is_atomic && is_scoped) ? " (marked, scoped)"
+ : (is_atomic ? " (marked)"
+ : (is_scoped ? " (scoped)" : ""));
if (i == 1) {
/* Access 2 */
@@ -333,7 +340,10 @@ static noinline void test_kernel_assert_bits_nochange(void)
ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS);
}
-/* To check that scoped assertions do trigger anywhere in scope. */
+/*
+ * Scoped assertions do trigger anywhere in scope. However, the report should
+ * still only point at the start of the scope.
+ */
static noinline void test_enter_scope(void)
{
int x = 0;
@@ -488,17 +498,24 @@ static void test_concurrent_races(struct kunit *test)
__no_kcsan
static void test_novalue_change(struct kunit *test)
{
- const struct expect_report expect = {
+ const struct expect_report expect_rw = {
.access = {
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
},
};
+ const struct expect_report expect_ww = {
+ .access = {
+ { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
+ { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
+ },
+ };
bool match_expect = false;
+ test_kernel_write_nochange(); /* Reset value. */
begin_test_checks(test_kernel_write_nochange, test_kernel_read);
do {
- match_expect = report_matches(&expect);
+ match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
} while (!end_test_checks(match_expect));
if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY))
KUNIT_EXPECT_FALSE(test, match_expect);
@@ -513,17 +530,24 @@ static void test_novalue_change(struct kunit *test)
__no_kcsan
static void test_novalue_change_exception(struct kunit *test)
{
- const struct expect_report expect = {
+ const struct expect_report expect_rw = {
.access = {
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
},
};
+ const struct expect_report expect_ww = {
+ .access = {
+ { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
+ { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
+ },
+ };
bool match_expect = false;
+ test_kernel_write_nochange_rcu(); /* Reset value. */
begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read);
do {
- match_expect = report_matches(&expect);
+ match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
} while (!end_test_checks(match_expect));
KUNIT_EXPECT_TRUE(test, match_expect);
}
@@ -642,8 +666,7 @@ static void test_read_plain_atomic_write(struct kunit *test)
};
bool match_expect = false;
- if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS))
- return;
+ KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
begin_test_checks(test_kernel_read, test_kernel_write_atomic);
do {
@@ -665,8 +688,7 @@ static void test_read_plain_atomic_rmw(struct kunit *test)
};
bool match_expect = false;
- if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS))
- return;
+ KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
begin_test_checks(test_kernel_read, test_kernel_atomic_rmw);
do {
@@ -828,22 +850,22 @@ static void test_assert_exclusive_writer_scoped(struct kunit *test)
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
};
- const struct expect_report expect_anywhere = {
+ const struct expect_report expect_inscope = {
.access = {
{ test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
};
bool match_expect_start = false;
- bool match_expect_anywhere = false;
+ bool match_expect_inscope = false;
begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange);
do {
match_expect_start |= report_matches(&expect_start);
- match_expect_anywhere |= report_matches(&expect_anywhere);
- } while (!end_test_checks(match_expect_start && match_expect_anywhere));
+ match_expect_inscope |= report_matches(&expect_inscope);
+ } while (!end_test_checks(match_expect_inscope));
KUNIT_EXPECT_TRUE(test, match_expect_start);
- KUNIT_EXPECT_TRUE(test, match_expect_anywhere);
+ KUNIT_EXPECT_FALSE(test, match_expect_inscope);
}
__no_kcsan
@@ -872,9 +894,9 @@ static void test_assert_exclusive_access_scoped(struct kunit *test)
do {
match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2);
match_expect_inscope |= report_matches(&expect_inscope);
- } while (!end_test_checks(match_expect_start && match_expect_inscope));
+ } while (!end_test_checks(match_expect_inscope));
KUNIT_EXPECT_TRUE(test, match_expect_start);
- KUNIT_EXPECT_TRUE(test, match_expect_inscope);
+ KUNIT_EXPECT_FALSE(test, match_expect_inscope);
}
/*
@@ -1224,7 +1246,7 @@ static void kcsan_test_exit(void)
tracepoint_synchronize_unregister();
}
-late_initcall(kcsan_test_init);
+late_initcall_sync(kcsan_test_init);
module_exit(kcsan_test_exit);
MODULE_LICENSE("GPL v2");
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 21137929d42831..fc15077991c479 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -8,6 +8,7 @@
#include <linux/debug_locks.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
+#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/preempt.h>
@@ -31,6 +32,7 @@ struct access_info {
int access_type;
int task_pid;
int cpu_id;
+ unsigned long ip;
};
/*
@@ -245,6 +247,10 @@ static const char *get_access_type(int type)
return "write (scoped)";
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
return "write (marked, scoped)";
+ case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
+ return "read-write (scoped)";
+ case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+ return "read-write (marked, scoped)";
default:
BUG();
}
@@ -300,6 +306,48 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
return skip;
}
+/*
+ * Skips to the first entry that matches the function of @ip, and then replaces
+ * that entry with @ip, returning the entries to skip.
+ */
+static int
+replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip)
+{
+ unsigned long symbolsize, offset;
+ unsigned long target_func;
+ int skip;
+
+ if (kallsyms_lookup_size_offset(ip, &symbolsize, &offset))
+ target_func = ip - offset;
+ else
+ goto fallback;
+
+ for (skip = 0; skip < num_entries; ++skip) {
+ unsigned long func = stack_entries[skip];
+
+ if (!kallsyms_lookup_size_offset(func, &symbolsize, &offset))
+ goto fallback;
+ func -= offset;
+
+ if (func == target_func) {
+ stack_entries[skip] = ip;
+ return skip;
+ }
+ }
+
+fallback:
+ /* Should not happen; the resulting stack trace is likely misleading. */
+ WARN_ONCE(1, "Cannot find frame for %pS in stack trace", (void *)ip);
+ return get_stack_skipnr(stack_entries, num_entries);
+}
+
+static int
+sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip)
+{
+ return ip ? replace_stack_entry(stack_entries, num_entries, ip) :
+ get_stack_skipnr(stack_entries, num_entries);
+}
+
/* Compares symbolized strings of addr1 and addr2. */
static int sym_strcmp(void *addr1, void *addr2)
{
@@ -327,12 +375,12 @@ static void print_verbose_info(struct task_struct *task)
static void print_report(enum kcsan_value_change value_change,
const struct access_info *ai,
- const struct other_info *other_info,
+ struct other_info *other_info,
u64 old, u64 new, u64 mask)
{
unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
- int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
+ int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip);
unsigned long this_frame = stack_entries[skipnr];
unsigned long other_frame = 0;
int other_skipnr = 0; /* silence uninit warnings */
@@ -344,8 +392,9 @@ static void print_report(enum kcsan_value_change value_change,
return;
if (other_info) {
- other_skipnr = get_stack_skipnr(other_info->stack_entries,
- other_info->num_stack_entries);
+ other_skipnr = sanitize_stack_entries(other_info->stack_entries,
+ other_info->num_stack_entries,
+ other_info->ai.ip);
other_frame = other_info->stack_entries[other_skipnr];
/* @value_change is only known for the other thread */
@@ -576,21 +625,23 @@ discard:
}
static struct access_info prepare_access_info(const volatile void *ptr, size_t size,
- int access_type)
+ int access_type, unsigned long ip)
{
return (struct access_info) {
.ptr = ptr,
.size = size,
.access_type = access_type,
.task_pid = in_task() ? task_pid_nr(current) : -1,
- .cpu_id = raw_smp_processor_id()
+ .cpu_id = raw_smp_processor_id(),
+ /* Only replace stack entry with @ip if scoped access. */
+ .ip = (access_type & KCSAN_ACCESS_SCOPED) ? ip : 0,
};
}
void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
- int watchpoint_idx)
+ unsigned long ip, int watchpoint_idx)
{
- const struct access_info ai = prepare_access_info(ptr, size, access_type);
+ const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
unsigned long flags;
kcsan_disable_current();
@@ -603,10 +654,10 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ
}
void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
- enum kcsan_value_change value_change, int watchpoint_idx,
- u64 old, u64 new, u64 mask)
+ unsigned long ip, enum kcsan_value_change value_change,
+ int watchpoint_idx, u64 old, u64 new, u64 mask)
{
- const struct access_info ai = prepare_access_info(ptr, size, access_type);
+ const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
struct other_info *other_info = &other_infos[watchpoint_idx];
unsigned long flags = 0;
@@ -637,9 +688,9 @@ out:
}
void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
- u64 old, u64 new, u64 mask)
+ unsigned long ip, u64 old, u64 new, u64 mask)
{
- const struct access_info ai = prepare_access_info(ptr, size, access_type);
+ const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
unsigned long flags;
kcsan_disable_current();
diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c
index 7f29cb0f5e6341..b4295a3892b7c0 100644
--- a/kernel/kcsan/selftest.c
+++ b/kernel/kcsan/selftest.c
@@ -18,7 +18,7 @@
#define ITERS_PER_TEST 2000
/* Test requirements. */
-static bool test_requires(void)
+static bool __init test_requires(void)
{
/* random should be initialized for the below tests */
return prandom_u32() + prandom_u32() != 0;
@@ -28,14 +28,18 @@ static bool test_requires(void)
* Test watchpoint encode and decode: check that encoding some access's info,
* and then subsequent decode preserves the access's info.
*/
-static bool test_encode_decode(void)
+static bool __init test_encode_decode(void)
{
int i;
for (i = 0; i < ITERS_PER_TEST; ++i) {
size_t size = prandom_u32_max(MAX_ENCODABLE_SIZE) + 1;
bool is_write = !!prandom_u32_max(2);
+ unsigned long verif_masked_addr;
+ long encoded_watchpoint;
+ bool verif_is_write;
unsigned long addr;
+ size_t verif_size;
prandom_bytes(&addr, sizeof(addr));
if (addr < PAGE_SIZE)
@@ -44,53 +48,37 @@ static bool test_encode_decode(void)
if (WARN_ON(!check_encodable(addr, size)))
return false;
- /* Encode and decode */
- {
- const long encoded_watchpoint =
- encode_watchpoint(addr, size, is_write);
- unsigned long verif_masked_addr;
- size_t verif_size;
- bool verif_is_write;
-
- /* Check special watchpoints */
- if (WARN_ON(decode_watchpoint(
- INVALID_WATCHPOINT, &verif_masked_addr,
- &verif_size, &verif_is_write)))
- return false;
- if (WARN_ON(decode_watchpoint(
- CONSUMED_WATCHPOINT, &verif_masked_addr,
- &verif_size, &verif_is_write)))
- return false;
-
- /* Check decoding watchpoint returns same data */
- if (WARN_ON(!decode_watchpoint(
- encoded_watchpoint, &verif_masked_addr,
- &verif_size, &verif_is_write)))
- return false;
- if (WARN_ON(verif_masked_addr !=
- (addr & WATCHPOINT_ADDR_MASK)))
- goto fail;
- if (WARN_ON(verif_size != size))
- goto fail;
- if (WARN_ON(is_write != verif_is_write))
- goto fail;
-
- continue;
-fail:
- pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n",
- __func__, is_write ? "write" : "read", size,
- addr, encoded_watchpoint,
- verif_is_write ? "write" : "read", verif_size,
- verif_masked_addr);
+ encoded_watchpoint = encode_watchpoint(addr, size, is_write);
+
+ /* Check special watchpoints */
+ if (WARN_ON(decode_watchpoint(INVALID_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write)))
return false;
- }
+ if (WARN_ON(decode_watchpoint(CONSUMED_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write)))
+ return false;
+
+ /* Check decoding watchpoint returns same data */
+ if (WARN_ON(!decode_watchpoint(encoded_watchpoint, &verif_masked_addr, &verif_size, &verif_is_write)))
+ return false;
+ if (WARN_ON(verif_masked_addr != (addr & WATCHPOINT_ADDR_MASK)))
+ goto fail;
+ if (WARN_ON(verif_size != size))
+ goto fail;
+ if (WARN_ON(is_write != verif_is_write))
+ goto fail;
+
+ continue;
+fail:
+ pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n",
+ __func__, is_write ? "write" : "read", size, addr, encoded_watchpoint,
+ verif_is_write ? "write" : "read", verif_size, verif_masked_addr);
+ return false;
}
return true;
}
/* Test access matching function. */
-static bool test_matching_access(void)
+static bool __init test_matching_access(void)
{
if (WARN_ON(!matching_access(10, 1, 10, 1)))
return false;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index d6e0f9fb7f04db..6bcc5d6a6572b7 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -33,6 +33,7 @@ EXPORT_SYMBOL(cad_pid);
#define DEFAULT_REBOOT_MODE
#endif
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
+EXPORT_SYMBOL_GPL(reboot_mode);
enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
/*
diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
index 2067080bb2358a..8629b37d118e75 100644
--- a/kernel/sched/autogroup.c
+++ b/kernel/sched/autogroup.c
@@ -31,7 +31,7 @@ static inline void autogroup_destroy(struct kref *kref)
ag->tg->rt_se = NULL;
ag->tg->rt_rq = NULL;
#endif
- sched_offline_group(ag->tg);
+ sched_release_group(ag->tg);
sched_destroy_group(ag->tg);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 523fd602ea90cd..3c9b0fda64ac08 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3726,6 +3726,9 @@ out:
bool cpus_share_cache(int this_cpu, int that_cpu)
{
+ if (this_cpu == that_cpu)
+ return true;
+
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}
@@ -6625,13 +6628,13 @@ __setup("preempt=", setup_preempt_mode);
static void __init preempt_dynamic_init(void)
{
if (preempt_dynamic_mode == preempt_dynamic_undefined) {
- if (IS_ENABLED(CONFIG_PREEMPT_NONE_BEHAVIOUR)) {
+ if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
sched_dynamic_update(preempt_dynamic_none);
- } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY_BEHAVIOUR)) {
+ } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
sched_dynamic_update(preempt_dynamic_voluntary);
} else {
/* Default static call setting, nothing to do */
- WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_BEHAVIOUR));
+ WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
preempt_dynamic_mode = preempt_dynamic_full;
pr_info("Dynamic Preempt: full\n");
}
@@ -9716,6 +9719,22 @@ static void sched_free_group(struct task_group *tg)
kmem_cache_free(task_group_cache, tg);
}
+static void sched_free_group_rcu(struct rcu_head *rcu)
+{
+ sched_free_group(container_of(rcu, struct task_group, rcu));
+}
+
+static void sched_unregister_group(struct task_group *tg)
+{
+ unregister_fair_sched_group(tg);
+ unregister_rt_sched_group(tg);
+ /*
+ * We have to wait for yet another RCU grace period to expire, as
+ * print_cfs_stats() might run concurrently.
+ */
+ call_rcu(&tg->rcu, sched_free_group_rcu);
+}
+
/* allocate runqueue etc for a new task group */
struct task_group *sched_create_group(struct task_group *parent)
{
@@ -9759,25 +9778,35 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
}
/* rcu callback to free various structures associated with a task group */
-static void sched_free_group_rcu(struct rcu_head *rhp)
+static void sched_unregister_group_rcu(struct rcu_head *rhp)
{
/* Now it should be safe to free those cfs_rqs: */
- sched_free_group(container_of(rhp, struct task_group, rcu));
+ sched_unregister_group(container_of(rhp, struct task_group, rcu));
}
void sched_destroy_group(struct task_group *tg)
{
/* Wait for possible concurrent references to cfs_rqs complete: */
- call_rcu(&tg->rcu, sched_free_group_rcu);
+ call_rcu(&tg->rcu, sched_unregister_group_rcu);
}
-void sched_offline_group(struct task_group *tg)
+void sched_release_group(struct task_group *tg)
{
unsigned long flags;
- /* End participation in shares distribution: */
- unregister_fair_sched_group(tg);
-
+ /*
+ * Unlink first, to avoid walk_tg_tree_from() from finding us (via
+ * sched_cfs_period_timer()).
+ *
+ * For this to be effective, we have to wait for all pending users of
+ * this task group to leave their RCU critical section to ensure no new
+ * user will see our dying task group any more. Specifically ensure
+ * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
+ *
+ * We therefore defer calling unregister_fair_sched_group() to
+ * sched_unregister_group() which is guarantied to get called only after the
+ * current RCU grace period has expired.
+ */
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
list_del_rcu(&tg->siblings);
@@ -9896,7 +9925,7 @@ static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
- sched_offline_group(tg);
+ sched_release_group(tg);
}
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
@@ -9906,7 +9935,7 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
/*
* Relies on the RCU grace period between css_released() and this.
*/
- sched_free_group(tg);
+ sched_unregister_group(tg);
}
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 13950beb01a251..6e476f6d94351b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11456,8 +11456,6 @@ void free_fair_sched_group(struct task_group *tg)
{
int i;
- destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
-
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
@@ -11534,6 +11532,8 @@ void unregister_fair_sched_group(struct task_group *tg)
struct rq *rq;
int cpu;
+ destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
+
for_each_possible_cpu(cpu) {
if (tg->se[cpu])
remove_entity_load_avg(tg->se[cpu]);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index bb945f8faecac7..b48baaba2fc2ec 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -137,13 +137,17 @@ static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
return rt_rq->rq;
}
-void free_rt_sched_group(struct task_group *tg)
+void unregister_rt_sched_group(struct task_group *tg)
{
- int i;
-
if (tg->rt_se)
destroy_rt_bandwidth(&tg->rt_bandwidth);
+}
+
+void free_rt_sched_group(struct task_group *tg)
+{
+ int i;
+
for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
@@ -250,6 +254,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
return &rq->rt;
}
+void unregister_rt_sched_group(struct task_group *tg) { }
+
void free_rt_sched_group(struct task_group *tg) { }
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 7f1612d26c18cd..0e66749486e755 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -488,6 +488,7 @@ extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
+extern void unregister_rt_sched_group(struct task_group *tg);
extern void free_rt_sched_group(struct task_group *tg);
extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
@@ -503,7 +504,7 @@ extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_online_group(struct task_group *tg,
struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
-extern void sched_offline_group(struct task_group *tg);
+extern void sched_release_group(struct task_group *tg);
extern void sched_move_task(struct task_struct *tsk);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 007a3ded0358ff..30bc880c3849cb 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5602,10 +5602,11 @@ int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
}
}
+ mutex_unlock(&ftrace_lock);
+
/* Removing the tmp_ops will add the updated direct callers to the functions */
unregister_ftrace_function(&tmp_ops);
- mutex_unlock(&ftrace_lock);
out_direct:
mutex_unlock(&direct_mutex);
return err;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f6520d0a4c8c87..2699e9e562b1da 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -5228,6 +5228,9 @@ void ring_buffer_reset(struct trace_buffer *buffer)
struct ring_buffer_per_cpu *cpu_buffer;
int cpu;
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
+
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
@@ -5246,6 +5249,8 @@ void ring_buffer_reset(struct trace_buffer *buffer)
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->resize_disabled);
}
+
+ mutex_unlock(&buffer->mutex);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 0abc9a413b4dd4..8a10046c775fdb 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -1953,9 +1953,10 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
if (!hist_field->type)
goto free;
- if (field->filter_type == FILTER_STATIC_STRING)
+ if (field->filter_type == FILTER_STATIC_STRING) {
hist_field->fn = hist_field_string;
- else if (field->filter_type == FILTER_DYN_STRING)
+ hist_field->size = field->size;
+ } else if (field->filter_type == FILTER_DYN_STRING)
hist_field->fn = hist_field_dynstring;
else
hist_field->fn = hist_field_pstring;
@@ -2580,7 +2581,8 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
operand1_str = str;
str = sep+1;
- if (!operand1_str || !str)
+ /* Binary operator requires both operands */
+ if (*operand1_str == '\0' || *str == '\0')
goto free;
operand_flags = 0;
@@ -3025,7 +3027,7 @@ static inline void __update_field_vars(struct tracing_map_elt *elt,
char *str = elt_data->field_var_str[j++];
char *val_str = (char *)(uintptr_t)var_val;
- strscpy(str, val_str, STR_VAR_LEN_MAX);
+ strscpy(str, val_str, val->size);
var_val = (u64)(uintptr_t)str;
}
tracing_map_set_var(elt, var_idx, var_val);
@@ -4920,7 +4922,7 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
str = elt_data->field_var_str[idx];
val_str = (char *)(uintptr_t)hist_val;
- strscpy(str, val_str, STR_VAR_LEN_MAX);
+ strscpy(str, val_str, hist_field->size);
hist_val = (u64)(uintptr_t)str;
}
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index 3e4a1651e32919..7520d43aed5540 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -55,7 +55,8 @@ struct osnoise_instance {
struct list_head list;
struct trace_array *tr;
};
-struct list_head osnoise_instances;
+
+static struct list_head osnoise_instances;
static bool osnoise_has_registered_instances(void)
{
diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
index 6b629ab31c1eb7..a512b99ae16a93 100644
--- a/lib/decompress_unzstd.c
+++ b/lib/decompress_unzstd.c
@@ -68,11 +68,7 @@
#ifdef STATIC
# define UNZSTD_PREBOOT
# include "xxhash.c"
-# include "zstd/entropy_common.c"
-# include "zstd/fse_decompress.c"
-# include "zstd/huf_decompress.c"
-# include "zstd/zstd_common.c"
-# include "zstd/decompress.c"
+# include "zstd/decompress_sources.h"
#endif
#include <linux/decompress/mm.h>
@@ -91,11 +87,15 @@
static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
{
- const int err = ZSTD_getErrorCode(ret);
+ const zstd_error_code err = zstd_get_error_code(ret);
- if (!ZSTD_isError(ret))
+ if (!zstd_is_error(ret))
return 0;
+ /*
+ * zstd_get_error_name() cannot be used because error takes a char *
+ * not a const char *
+ */
switch (err) {
case ZSTD_error_memory_allocation:
error("ZSTD decompressor ran out of memory");
@@ -124,28 +124,28 @@ static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
long out_len, long *in_pos,
void (*error)(char *x))
{
- const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
+ const size_t wksp_size = zstd_dctx_workspace_bound();
void *wksp = large_malloc(wksp_size);
- ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size);
+ zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
int err;
size_t ret;
if (dctx == NULL) {
- error("Out of memory while allocating ZSTD_DCtx");
+ error("Out of memory while allocating zstd_dctx");
err = -1;
goto out;
}
/*
* Find out how large the frame actually is, there may be junk at
- * the end of the frame that ZSTD_decompressDCtx() can't handle.
+ * the end of the frame that zstd_decompress_dctx() can't handle.
*/
- ret = ZSTD_findFrameCompressedSize(in_buf, in_len);
+ ret = zstd_find_frame_compressed_size(in_buf, in_len);
err = handle_zstd_error(ret, error);
if (err)
goto out;
in_len = (long)ret;
- ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len);
+ ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
err = handle_zstd_error(ret, error);
if (err)
goto out;
@@ -167,14 +167,14 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
long *in_pos,
void (*error)(char *x))
{
- ZSTD_inBuffer in;
- ZSTD_outBuffer out;
- ZSTD_frameParams params;
+ zstd_in_buffer in;
+ zstd_out_buffer out;
+ zstd_frame_header header;
void *in_allocated = NULL;
void *out_allocated = NULL;
void *wksp = NULL;
size_t wksp_size;
- ZSTD_DStream *dstream;
+ zstd_dstream *dstream;
int err;
size_t ret;
@@ -238,13 +238,13 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
out.size = out_len;
/*
- * We need to know the window size to allocate the ZSTD_DStream.
+ * We need to know the window size to allocate the zstd_dstream.
* Since we are streaming, we need to allocate a buffer for the sliding
* window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
* (8 MB), so it is important to use the actual value so as not to
* waste memory when it is smaller.
*/
- ret = ZSTD_getFrameParams(&params, in.src, in.size);
+ ret = zstd_get_frame_header(&header, in.src, in.size);
err = handle_zstd_error(ret, error);
if (err)
goto out;
@@ -253,19 +253,19 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
err = -1;
goto out;
}
- if (params.windowSize > ZSTD_WINDOWSIZE_MAX) {
+ if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
error("ZSTD-compressed data has too large a window size");
err = -1;
goto out;
}
/*
- * Allocate the ZSTD_DStream now that we know how much memory is
+ * Allocate the zstd_dstream now that we know how much memory is
* required.
*/
- wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize);
+ wksp_size = zstd_dstream_workspace_bound(header.windowSize);
wksp = large_malloc(wksp_size);
- dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size);
+ dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
if (dstream == NULL) {
error("Out of memory while allocating ZSTD_DStream");
err = -1;
@@ -298,7 +298,7 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
in.size = in_len;
}
/* Returns zero when the frame is complete. */
- ret = ZSTD_decompressStream(dstream, &out, &in);
+ ret = zstd_decompress_stream(dstream, &out, &in);
err = handle_zstd_error(ret, error);
if (err)
goto out;
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index c259842f6d443c..e2ce8f9b7605e3 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
*/
rpage->zone_device_data = dmirror;
- *dst = migrate_pfn(page_to_pfn(dpage)) |
- MIGRATE_PFN_LOCKED;
+ *dst = migrate_pfn(page_to_pfn(dpage));
if ((*src & MIGRATE_PFN_WRITE) ||
(!spage && args->vma->vm_flags & VM_WRITE))
*dst |= MIGRATE_PFN_WRITE;
@@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
lock_page(dpage);
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
copy_highpage(dpage, spage);
- *dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
+ *dst = migrate_pfn(page_to_pfn(dpage));
if (*src & MIGRATE_PFN_WRITE)
*dst |= MIGRATE_PFN_WRITE;
}
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
index f5d778e7e5c72d..65218ec5b8f2a4 100644
--- a/lib/zstd/Makefile
+++ b/lib/zstd/Makefile
@@ -1,10 +1,46 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+# ################################################################
+# Copyright (c) Facebook, Inc.
+# All rights reserved.
+#
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# You may select, at your option, one of the above-listed licenses.
+# ################################################################
obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
ccflags-y += -O3
-zstd_compress-y := fse_compress.o huf_compress.o compress.o \
- entropy_common.o fse_decompress.o zstd_common.o
-zstd_decompress-y := huf_decompress.o decompress.o \
- entropy_common.o fse_decompress.o zstd_common.o
+zstd_compress-y := \
+ zstd_compress_module.o \
+ common/debug.o \
+ common/entropy_common.o \
+ common/error_private.o \
+ common/fse_decompress.o \
+ common/zstd_common.o \
+ compress/fse_compress.o \
+ compress/hist.o \
+ compress/huf_compress.o \
+ compress/zstd_compress.o \
+ compress/zstd_compress_literals.o \
+ compress/zstd_compress_sequences.o \
+ compress/zstd_compress_superblock.o \
+ compress/zstd_double_fast.o \
+ compress/zstd_fast.o \
+ compress/zstd_lazy.o \
+ compress/zstd_ldm.o \
+ compress/zstd_opt.o \
+
+zstd_decompress-y := \
+ zstd_decompress_module.o \
+ common/debug.o \
+ common/entropy_common.o \
+ common/error_private.o \
+ common/fse_decompress.o \
+ common/zstd_common.o \
+ decompress/huf_decompress.o \
+ decompress/zstd_ddict.o \
+ decompress/zstd_decompress.o \
+ decompress/zstd_decompress_block.o \
diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
deleted file mode 100644
index 5d6343c1a909ed..00000000000000
--- a/lib/zstd/bitstream.h
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * bitstream
- * Part of FSE library
- * header file (to include)
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-#ifndef BITSTREAM_H_MODULE
-#define BITSTREAM_H_MODULE
-
-/*
-* This API consists of small unitary functions, which must be inlined for best performance.
-* Since link-time-optimization is not available for all compilers,
-* these functions are defined into a .h to be included.
-*/
-
-/*-****************************************
-* Dependencies
-******************************************/
-#include "error_private.h" /* error codes and messages */
-#include "mem.h" /* unaligned access routines */
-
-/*=========================================
-* Target specific
-=========================================*/
-#define STREAM_ACCUMULATOR_MIN_32 25
-#define STREAM_ACCUMULATOR_MIN_64 57
-#define STREAM_ACCUMULATOR_MIN ((U32)(ZSTD_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
-
-/*-******************************************
-* bitStream encoding API (write forward)
-********************************************/
-/* bitStream can mix input from multiple sources.
-* A critical property of these streams is that they encode and decode in **reverse** direction.
-* So the first bit sequence you add will be the last to be read, like a LIFO stack.
-*/
-typedef struct {
- size_t bitContainer;
- int bitPos;
- char *startPtr;
- char *ptr;
- char *endPtr;
-} BIT_CStream_t;
-
-ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity);
-ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
-ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC);
-ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC);
-
-/* Start with initCStream, providing the size of buffer to write into.
-* bitStream will never write outside of this buffer.
-* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
-*
-* bits are first added to a local register.
-* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
-* Writing data into memory is an explicit operation, performed by the flushBits function.
-* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
-* After a flushBits, a maximum of 7 bits might still be stored into local register.
-*
-* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
-*
-* Last operation is to close the bitStream.
-* The function returns the final size of CStream in bytes.
-* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
-*/
-
-/*-********************************************
-* bitStream decoding API (read backward)
-**********************************************/
-typedef struct {
- size_t bitContainer;
- unsigned bitsConsumed;
- const char *ptr;
- const char *start;
-} BIT_DStream_t;
-
-typedef enum {
- BIT_DStream_unfinished = 0,
- BIT_DStream_endOfBuffer = 1,
- BIT_DStream_completed = 2,
- BIT_DStream_overflow = 3
-} BIT_DStream_status; /* result of BIT_reloadDStream() */
-/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
-
-ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize);
-ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits);
-ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD);
-ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD);
-
-/* Start by invoking BIT_initDStream().
-* A chunk of the bitStream is then stored into a local register.
-* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
-* You can then retrieve bitFields stored into the local register, **in reverse order**.
-* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
-* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
-* Otherwise, it can be less than that, so proceed accordingly.
-* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
-*/
-
-/*-****************************************
-* unsafe API
-******************************************/
-ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
-/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
-
-ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC);
-/* unsafe version; does not check buffer overflow */
-
-ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits);
-/* faster, but works only if nbBits >= 1 */
-
-/*-**************************************************************
-* Internal functions
-****************************************************************/
-ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); }
-
-/*===== Local Constants =====*/
-static const unsigned BIT_mask[] = {0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
- 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
- 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */
-
-/*-**************************************************************
-* bitStream encoding
-****************************************************************/
-/*! BIT_initCStream() :
- * `dstCapacity` must be > sizeof(void*)
- * @return : 0 if success,
- otherwise an error code (can be tested using ERR_isError() ) */
-ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity)
-{
- bitC->bitContainer = 0;
- bitC->bitPos = 0;
- bitC->startPtr = (char *)startPtr;
- bitC->ptr = bitC->startPtr;
- bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
- if (dstCapacity <= sizeof(bitC->ptr))
- return ERROR(dstSize_tooSmall);
- return 0;
-}
-
-/*! BIT_addBits() :
- can add up to 26 bits into `bitC`.
- Does not check for register overflow ! */
-ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
-{
- bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
- bitC->bitPos += nbBits;
-}
-
-/*! BIT_addBitsFast() :
- * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
-ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
-{
- bitC->bitContainer |= value << bitC->bitPos;
- bitC->bitPos += nbBits;
-}
-
-/*! BIT_flushBitsFast() :
- * unsafe version; does not check buffer overflow */
-ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC)
-{
- size_t const nbBytes = bitC->bitPos >> 3;
- ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
- bitC->ptr += nbBytes;
- bitC->bitPos &= 7;
- bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
-}
-
-/*! BIT_flushBits() :
- * safe version; check for buffer overflow, and prevents it.
- * note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
-ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC)
-{
- size_t const nbBytes = bitC->bitPos >> 3;
- ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
- bitC->ptr += nbBytes;
- if (bitC->ptr > bitC->endPtr)
- bitC->ptr = bitC->endPtr;
- bitC->bitPos &= 7;
- bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
-}
-
-/*! BIT_closeCStream() :
- * @return : size of CStream, in bytes,
- or 0 if it could not fit into dstBuffer */
-ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC)
-{
- BIT_addBitsFast(bitC, 1, 1); /* endMark */
- BIT_flushBits(bitC);
-
- if (bitC->ptr >= bitC->endPtr)
- return 0; /* doesn't fit within authorized budget : cancel */
-
- return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
-}
-
-/*-********************************************************
-* bitStream decoding
-**********************************************************/
-/*! BIT_initDStream() :
-* Initialize a BIT_DStream_t.
-* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
-* `srcSize` must be the *exact* size of the bitStream, in bytes.
-* @return : size of stream (== srcSize) or an errorCode if a problem is detected
-*/
-ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize)
-{
- if (srcSize < 1) {
- memset(bitD, 0, sizeof(*bitD));
- return ERROR(srcSize_wrong);
- }
-
- if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
- bitD->start = (const char *)srcBuffer;
- bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer);
- bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
- {
- BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
- bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
- if (lastByte == 0)
- return ERROR(GENERIC); /* endMark not present */
- }
- } else {
- bitD->start = (const char *)srcBuffer;
- bitD->ptr = bitD->start;
- bitD->bitContainer = *(const BYTE *)(bitD->start);
- switch (srcSize) {
- case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
- fallthrough;
- case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
- fallthrough;
- case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
- fallthrough;
- case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
- fallthrough;
- case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
- fallthrough;
- case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
- fallthrough;
- default:;
- }
- {
- BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
- bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
- if (lastByte == 0)
- return ERROR(GENERIC); /* endMark not present */
- }
- bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8;
- }
-
- return srcSize;
-}
-
-ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; }
-
-ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; }
-
-ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; }
-
-/*! BIT_lookBits() :
- * Provides next n bits from local register.
- * local register is not modified.
- * On 32-bits, maxNbBits==24.
- * On 64-bits, maxNbBits==56.
- * @return : value extracted
- */
-ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits)
-{
- U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
- return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask);
-}
-
-/*! BIT_lookBitsFast() :
-* unsafe version; only works only if nbBits >= 1 */
-ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits)
-{
- U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
- return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask);
-}
-
-ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; }
-
-/*! BIT_readBits() :
- * Read (consume) next n bits from local register and update.
- * Pay attention to not read more than nbBits contained into local register.
- * @return : extracted value.
- */
-ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits)
-{
- size_t const value = BIT_lookBits(bitD, nbBits);
- BIT_skipBits(bitD, nbBits);
- return value;
-}
-
-/*! BIT_readBitsFast() :
-* unsafe version; only works only if nbBits >= 1 */
-ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits)
-{
- size_t const value = BIT_lookBitsFast(bitD, nbBits);
- BIT_skipBits(bitD, nbBits);
- return value;
-}
-
-/*! BIT_reloadDStream() :
-* Refill `bitD` from buffer previously set in BIT_initDStream() .
-* This function is safe, it guarantees it will not read beyond src buffer.
-* @return : status of `BIT_DStream_t` internal register.
- if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
-ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD)
-{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */
- return BIT_DStream_overflow;
-
- if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
- bitD->ptr -= bitD->bitsConsumed >> 3;
- bitD->bitsConsumed &= 7;
- bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
- return BIT_DStream_unfinished;
- }
- if (bitD->ptr == bitD->start) {
- if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8)
- return BIT_DStream_endOfBuffer;
- return BIT_DStream_completed;
- }
- {
- U32 nbBytes = bitD->bitsConsumed >> 3;
- BIT_DStream_status result = BIT_DStream_unfinished;
- if (bitD->ptr - nbBytes < bitD->start) {
- nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
- result = BIT_DStream_endOfBuffer;
- }
- bitD->ptr -= nbBytes;
- bitD->bitsConsumed -= nbBytes * 8;
- bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
- return result;
- }
-}
-
-/*! BIT_endOfDStream() :
-* @return Tells if DStream has exactly reached its end (all bits consumed).
-*/
-ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream)
-{
- return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8));
-}
-
-#endif /* BITSTREAM_H_MODULE */
diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
new file mode 100644
index 00000000000000..28248abe8612a9
--- /dev/null
+++ b/lib/zstd/common/bitstream.h
@@ -0,0 +1,437 @@
+/* ******************************************************************
+ * bitstream
+ * Part of FSE library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+/*
+* This API consists of small unitary functions, which must be inlined for best performance.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include "mem.h" /* unaligned access routines */
+#include "compiler.h" /* UNLIKELY() */
+#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
+#include "error_private.h" /* error codes and messages */
+
+
+/*=========================================
+* Target specific
+=========================================*/
+
+#define STREAM_ACCUMULATOR_MIN_32 25
+#define STREAM_ACCUMULATOR_MIN_64 57
+#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
+
+
+/*-******************************************
+* bitStream encoding API (write forward)
+********************************************/
+/* bitStream can mix input from multiple sources.
+ * A critical property of these streams is that they encode and decode in **reverse** direction.
+ * So the first bit sequence you add will be the last to be read, like a LIFO stack.
+ */
+typedef struct {
+ size_t bitContainer;
+ unsigned bitPos;
+ char* startPtr;
+ char* ptr;
+ char* endPtr;
+} BIT_CStream_t;
+
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
+
+/* Start with initCStream, providing the size of buffer to write into.
+* bitStream will never write outside of this buffer.
+* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
+*
+* bits are first added to a local register.
+* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+* Writing data into memory is an explicit operation, performed by the flushBits function.
+* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
+* After a flushBits, a maximum of 7 bits might still be stored into local register.
+*
+* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
+*
+* Last operation is to close the bitStream.
+* The function returns the final size of CStream in bytes.
+* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
+*/
+
+
+/*-********************************************
+* bitStream decoding API (read backward)
+**********************************************/
+typedef struct {
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+ const char* limitPtr;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+ BIT_DStream_endOfBuffer = 1,
+ BIT_DStream_completed = 2,
+ BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+/* Start by invoking BIT_initDStream().
+* A chunk of the bitStream is then stored into a local register.
+* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+* You can then retrieve bitFields stored into the local register, **in reverse order**.
+* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
+* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
+* Otherwise, it can be less than that, so proceed accordingly.
+* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
+*/
+
+
+/*-****************************************
+* unsafe API
+******************************************/
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
+
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
+/* unsafe version; does not check buffer overflow */
+
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+* Internal functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+ assert(val != 0);
+ {
+# if (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return __builtin_clz (val) ^ 31;
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
+ 11, 14, 16, 18, 22, 25, 3, 30,
+ 8, 12, 20, 28, 15, 17, 24, 7,
+ 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+/*===== Local Constants =====*/
+static const unsigned BIT_mask[] = {
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
+ 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+ 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
+ 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
+#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
+
+/*-**************************************************************
+* bitStream encoding
+****************************************************************/
+/*! BIT_initCStream() :
+ * `dstCapacity` must be > sizeof(size_t)
+ * @return : 0 if success,
+ * otherwise an error code (can be tested using ERR_isError()) */
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
+ void* startPtr, size_t dstCapacity)
+{
+ bitC->bitContainer = 0;
+ bitC->bitPos = 0;
+ bitC->startPtr = (char*)startPtr;
+ bitC->ptr = bitC->startPtr;
+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
+ if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
+ return 0;
+}
+
+/*! BIT_addBits() :
+ * can add up to 31 bits into `bitC`.
+ * Note : does not check for register overflow ! */
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
+ assert(nbBits < BIT_MASK_SIZE);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_addBitsFast() :
+ * works only if `value` is _clean_,
+ * meaning all high bits above nbBits are 0 */
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ assert((value>>nbBits) == 0);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= value << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_flushBitsFast() :
+ * assumption : bitContainer has not overflowed
+ * unsafe version; does not check buffer overflow */
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert(bitC->ptr <= bitC->endPtr);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_flushBits() :
+ * assumption : bitContainer has not overflowed
+ * safe version; check for buffer overflow, and prevents it.
+ * note : does not signal buffer overflow.
+ * overflow will be revealed later on using BIT_closeCStream() */
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert(bitC->ptr <= bitC->endPtr);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_closeCStream() :
+ * @return : size of CStream, in bytes,
+ * or 0 if it could not fit into dstBuffer */
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
+{
+ BIT_addBitsFast(bitC, 1, 1); /* endMark */
+ BIT_flushBits(bitC);
+ if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+}
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BIT_initDStream() :
+ * Initialize a BIT_DStream_t.
+ * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
+ * `srcSize` must be the *exact* size of the bitStream, in bytes.
+ * @return : size of stream (== srcSize), or an errorCode if a problem is detected
+ */
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ bitD->start = (const char*)srcBuffer;
+ bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
+
+ if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
+ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+ } else {
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+ ZSTD_FALLTHROUGH;
+
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+ ZSTD_FALLTHROUGH;
+
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+ ZSTD_FALLTHROUGH;
+
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+ ZSTD_FALLTHROUGH;
+
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+ ZSTD_FALLTHROUGH;
+
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
+ ZSTD_FALLTHROUGH;
+
+ default: break;
+ }
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
+ }
+ bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+{
+ return bitContainer >> start;
+}
+
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+{
+ U32 const regMask = sizeof(bitContainer)*8 - 1;
+ /* if start > regMask, bitstream is corrupted, and result is undefined */
+ assert(nbBits < BIT_MASK_SIZE);
+ return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
+}
+
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+{
+ assert(nbBits < BIT_MASK_SIZE);
+ return bitContainer & BIT_mask[nbBits];
+}
+
+/*! BIT_lookBits() :
+ * Provides next n bits from local register.
+ * local register is not modified.
+ * On 32-bits, maxNbBits==24.
+ * On 64-bits, maxNbBits==56.
+ * @return : value extracted */
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+{
+ /* arbitrate between double-shift and shift+mask */
+#if 1
+ /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
+ * bitstream is likely corrupted, and result is undefined */
+ return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
+#else
+ /* this code path is slower on my os-x laptop */
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
+#endif
+}
+
+/*! BIT_lookBitsFast() :
+ * unsafe version; only works if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+{
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ assert(nbBits >= 1);
+ return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
+}
+
+MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+/*! BIT_readBits() :
+ * Read (consume) next n bits from local register and update.
+ * Pay attention to not read more than nbBits contained into local register.
+ * @return : extracted value. */
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
+{
+ size_t const value = BIT_lookBits(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_readBitsFast() :
+ * unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
+{
+ size_t const value = BIT_lookBitsFast(bitD, nbBits);
+ assert(nbBits >= 1);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_reloadDStreamFast() :
+ * Similar to BIT_reloadDStream(), but with two differences:
+ * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
+ * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
+ * point you must use BIT_reloadDStream() to reload.
+ */
+MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
+{
+ if (UNLIKELY(bitD->ptr < bitD->limitPtr))
+ return BIT_DStream_overflow;
+ assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+}
+
+/*! BIT_reloadDStream() :
+ * Refill `bitD` from buffer previously set in BIT_initDStream() .
+ * This function is safe, it guarantees it will not read beyond src buffer.
+ * @return : status of `BIT_DStream_t` internal register.
+ * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
+ return BIT_DStream_overflow;
+
+ if (bitD->ptr >= bitD->limitPtr) {
+ return BIT_reloadDStreamFast(bitD);
+ }
+ if (bitD->ptr == bitD->start) {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
+ /* start < ptr < limitPtr */
+ { U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BIT_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
+ return result;
+ }
+}
+
+/*! BIT_endOfDStream() :
+ * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
+ */
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+
+#endif /* BITSTREAM_H_MODULE */
diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
new file mode 100644
index 00000000000000..a1a051e4bce66c
--- /dev/null
+++ b/lib/zstd/common/compiler.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPILER_H
+#define ZSTD_COMPILER_H
+
+/*-*******************************************************
+* Compiler specifics
+*********************************************************/
+/* force inlining */
+
+#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# define INLINE_KEYWORD inline
+#else
+# define INLINE_KEYWORD
+#endif
+
+#define FORCE_INLINE_ATTR __attribute__((always_inline))
+
+
+/*
+ On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
+ This explictly marks such functions as __cdecl so that the code will still compile
+ if a CC other than __cdecl has been made the default.
+*/
+#define WIN_CDECL
+
+/*
+ * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
+ * parameters. They must be inlined for the compiler to eliminate the constant
+ * branches.
+ */
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+/*
+ * HINT_INLINE is used to help the compiler generate better code. It is *not*
+ * used for "templates", so it can be tweaked based on the compilers
+ * performance.
+ *
+ * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
+ * always_inline attribute.
+ *
+ * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
+ * attribute.
+ */
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
+# define HINT_INLINE static INLINE_KEYWORD
+#else
+# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
+#endif
+
+/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
+#define UNUSED_ATTR __attribute__((unused))
+
+/* force no inlining */
+#define FORCE_NOINLINE static __attribute__((__noinline__))
+
+
+/* target attribute */
+#ifndef __has_attribute
+ #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
+#endif
+#define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
+
+/* Enable runtime BMI2 dispatch based on the CPU.
+ * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
+ */
+#ifndef DYNAMIC_BMI2
+ #if ((defined(__clang__) && __has_attribute(__target__)) \
+ || (defined(__GNUC__) \
+ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
+ && (defined(__x86_64__) || defined(_M_X86)) \
+ && !defined(__BMI2__)
+ # define DYNAMIC_BMI2 1
+ #else
+ # define DYNAMIC_BMI2 0
+ #endif
+#endif
+
+/* prefetch
+ * can be disabled, by declaring NO_PREFETCH build macro */
+#if ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
+#elif defined(__aarch64__)
+# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
+# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
+#else
+# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
+# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
+#endif /* NO_PREFETCH */
+
+#define CACHELINE_SIZE 64
+
+#define PREFETCH_AREA(p, s) { \
+ const char* const _ptr = (const char*)(p); \
+ size_t const _size = (size_t)(s); \
+ size_t _pos; \
+ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
+ PREFETCH_L2(_ptr + _pos); \
+ } \
+}
+
+/* vectorization
+ * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
+#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__)
+# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
+# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
+# else
+# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
+# endif
+#else
+# define DONT_VECTORIZE
+#endif
+
+/* Tell the compiler that a branch is likely or unlikely.
+ * Only use these macros if it causes the compiler to generate better code.
+ * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
+ * and clang, please do.
+ */
+#define LIKELY(x) (__builtin_expect((x), 1))
+#define UNLIKELY(x) (__builtin_expect((x), 0))
+
+/* disable warnings */
+
+/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
+
+
+/* compat. with non-clang compilers */
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+/* compat. with non-clang compilers */
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+/* C-language Attributes are added in C23. */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
+# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define ZSTD_HAS_C_ATTRIBUTE(x) 0
+#endif
+
+/* Only use C++ attributes in C++. Some compilers report support for C++
+ * attributes when compiling with C.
+ */
+#define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
+
+/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
+ * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
+ * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
+ * - Else: __attribute__((__fallthrough__))
+ */
+#define ZSTD_FALLTHROUGH fallthrough
+
+/* detects whether we are being compiled under msan */
+
+
+/* detects whether we are being compiled under asan */
+
+
+#endif /* ZSTD_COMPILER_H */
diff --git a/lib/zstd/common/cpu.h b/lib/zstd/common/cpu.h
new file mode 100644
index 00000000000000..0db7b42407eea2
--- /dev/null
+++ b/lib/zstd/common/cpu.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMMON_CPU_H
+#define ZSTD_COMMON_CPU_H
+
+/*
+ * Implementation taken from folly/CpuId.h
+ * https://github.com/facebook/folly/blob/master/folly/CpuId.h
+ */
+
+#include "mem.h"
+
+
+typedef struct {
+ U32 f1c;
+ U32 f1d;
+ U32 f7b;
+ U32 f7c;
+} ZSTD_cpuid_t;
+
+MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
+ U32 f1c = 0;
+ U32 f1d = 0;
+ U32 f7b = 0;
+ U32 f7c = 0;
+#if defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
+ /* The following block like the normal cpuid branch below, but gcc
+ * reserves ebx for use of its pic register so we must specially
+ * handle the save and restore to avoid clobbering the register
+ */
+ U32 n;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(n)
+ : "a"(0)
+ : "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(f1a), "=c"(f1c), "=d"(f1d)
+ : "a"(1));
+ }
+ if (n >= 7) {
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "movl %%ebx, %%eax\n\t"
+ "popl %%ebx"
+ : "=a"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
+ U32 n;
+ __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
+ }
+ if (n >= 7) {
+ U32 f7a;
+ __asm__("cpuid"
+ : "=a"(f7a), "=b"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#endif
+ {
+ ZSTD_cpuid_t cpuid;
+ cpuid.f1c = f1c;
+ cpuid.f1d = f1d;
+ cpuid.f7b = f7b;
+ cpuid.f7c = f7c;
+ return cpuid;
+ }
+}
+
+#define X(name, r, bit) \
+ MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
+ return ((cpuid.r) & (1U << bit)) != 0; \
+ }
+
+/* cpuid(1): Processor Info and Feature Bits. */
+#define C(name, bit) X(name, f1c, bit)
+ C(sse3, 0)
+ C(pclmuldq, 1)
+ C(dtes64, 2)
+ C(monitor, 3)
+ C(dscpl, 4)
+ C(vmx, 5)
+ C(smx, 6)
+ C(eist, 7)
+ C(tm2, 8)
+ C(ssse3, 9)
+ C(cnxtid, 10)
+ C(fma, 12)
+ C(cx16, 13)
+ C(xtpr, 14)
+ C(pdcm, 15)
+ C(pcid, 17)
+ C(dca, 18)
+ C(sse41, 19)
+ C(sse42, 20)
+ C(x2apic, 21)
+ C(movbe, 22)
+ C(popcnt, 23)
+ C(tscdeadline, 24)
+ C(aes, 25)
+ C(xsave, 26)
+ C(osxsave, 27)
+ C(avx, 28)
+ C(f16c, 29)
+ C(rdrand, 30)
+#undef C
+#define D(name, bit) X(name, f1d, bit)
+ D(fpu, 0)
+ D(vme, 1)
+ D(de, 2)
+ D(pse, 3)
+ D(tsc, 4)
+ D(msr, 5)
+ D(pae, 6)
+ D(mce, 7)
+ D(cx8, 8)
+ D(apic, 9)
+ D(sep, 11)
+ D(mtrr, 12)
+ D(pge, 13)
+ D(mca, 14)
+ D(cmov, 15)
+ D(pat, 16)
+ D(pse36, 17)
+ D(psn, 18)
+ D(clfsh, 19)
+ D(ds, 21)
+ D(acpi, 22)
+ D(mmx, 23)
+ D(fxsr, 24)
+ D(sse, 25)
+ D(sse2, 26)
+ D(ss, 27)
+ D(htt, 28)
+ D(tm, 29)
+ D(pbe, 31)
+#undef D
+
+/* cpuid(7): Extended Features. */
+#define B(name, bit) X(name, f7b, bit)
+ B(bmi1, 3)
+ B(hle, 4)
+ B(avx2, 5)
+ B(smep, 7)
+ B(bmi2, 8)
+ B(erms, 9)
+ B(invpcid, 10)
+ B(rtm, 11)
+ B(mpx, 14)
+ B(avx512f, 16)
+ B(avx512dq, 17)
+ B(rdseed, 18)
+ B(adx, 19)
+ B(smap, 20)
+ B(avx512ifma, 21)
+ B(pcommit, 22)
+ B(clflushopt, 23)
+ B(clwb, 24)
+ B(avx512pf, 26)
+ B(avx512er, 27)
+ B(avx512cd, 28)
+ B(sha, 29)
+ B(avx512bw, 30)
+ B(avx512vl, 31)
+#undef B
+#define C(name, bit) X(name, f7c, bit)
+ C(prefetchwt1, 0)
+ C(avx512vbmi, 1)
+#undef C
+
+#undef X
+
+#endif /* ZSTD_COMMON_CPU_H */
diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
new file mode 100644
index 00000000000000..bb863c9ea61648
--- /dev/null
+++ b/lib/zstd/common/debug.c
@@ -0,0 +1,24 @@
+/* ******************************************************************
+ * debug
+ * Part of FSE library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+/*
+ * This module only hosts one global variable
+ * which can be used to dynamically influence the verbosity of traces,
+ * such as DEBUGLOG and RAWLOG
+ */
+
+#include "debug.h"
+
+int g_debuglevel = DEBUGLEVEL;
diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
new file mode 100644
index 00000000000000..6dd88d1fbd02ca
--- /dev/null
+++ b/lib/zstd/common/debug.h
@@ -0,0 +1,101 @@
+/* ******************************************************************
+ * debug
+ * Part of FSE library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+/*
+ * The purpose of this header is to enable debug functions.
+ * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
+ * and DEBUG_STATIC_ASSERT() for compile-time.
+ *
+ * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
+ *
+ * Level 1 enables assert() only.
+ * Starting level 2, traces can be generated and pushed to stderr.
+ * The higher the level, the more verbose the traces.
+ *
+ * It's possible to dynamically adjust level using variable g_debug_level,
+ * which is only declared if DEBUGLEVEL>=2,
+ * and is a global variable, not multi-thread protected (use with care)
+ */
+
+#ifndef DEBUG_H_12987983217
+#define DEBUG_H_12987983217
+
+
+
+/* static assert is triggered at compile time, leaving no runtime artefact.
+ * static assert only works with compile-time constants.
+ * Also, this variant can only be used inside a function. */
+#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
+
+
+/* DEBUGLEVEL is expected to be defined externally,
+ * typically through compiler command line.
+ * Value must be a number. */
+#ifndef DEBUGLEVEL
+# define DEBUGLEVEL 0
+#endif
+
+
+/* recommended values for DEBUGLEVEL :
+ * 0 : release mode, no debug, all run-time checks disabled
+ * 1 : enables assert() only, no display
+ * 2 : reserved, for currently active debug path
+ * 3 : events once per object lifetime (CCtx, CDict, etc.)
+ * 4 : events once per frame
+ * 5 : events once per block
+ * 6 : events once per sequence (verbose)
+ * 7+: events at every position (*very* verbose)
+ *
+ * It's generally inconvenient to output traces > 5.
+ * In which case, it's possible to selectively trigger high verbosity levels
+ * by modifying g_debug_level.
+ */
+
+#if (DEBUGLEVEL>=1)
+# define ZSTD_DEPS_NEED_ASSERT
+# include "zstd_deps.h"
+#else
+# ifndef assert /* assert may be already defined, due to prior #include <assert.h> */
+# define assert(condition) ((void)0) /* disable assert (default) */
+# endif
+#endif
+
+#if (DEBUGLEVEL>=2)
+# define ZSTD_DEPS_NEED_IO
+# include "zstd_deps.h"
+extern int g_debuglevel; /* the variable is only declared,
+ it actually lives in debug.c,
+ and is shared by the whole process.
+ It's not thread-safe.
+ It's useful when enabling very verbose levels
+ on selective conditions (such as position in src) */
+
+# define RAWLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__VA_ARGS__); \
+ } }
+# define DEBUGLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
+ ZSTD_DEBUG_PRINT(" \n"); \
+ } }
+#else
+# define RAWLOG(l, ...) {} /* disabled */
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+
+
+#endif /* DEBUG_H_12987983217 */
diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
new file mode 100644
index 00000000000000..53b47a2b52ff21
--- /dev/null
+++ b/lib/zstd/common/entropy_common.c
@@ -0,0 +1,357 @@
+/* ******************************************************************
+ * Common functions of New Generation Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* *************************************
+* Dependencies
+***************************************/
+#include "mem.h"
+#include "error_private.h" /* ERR_*, ERROR */
+#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
+#include "huf.h"
+
+
+/*=== Version ===*/
+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
+
+
+/*=== Error Management ===*/
+unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+static U32 FSE_ctz(U32 val)
+{
+ assert(val != 0);
+ {
+# if (__GNUC__ >= 3) /* GCC Intrinsic */
+ return __builtin_ctz(val);
+# else /* Software version */
+ U32 count = 0;
+ while ((val & 1) == 0) {
+ val >>= 1;
+ ++count;
+ }
+ return count;
+# endif
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ unsigned const maxSV1 = *maxSVPtr + 1;
+ int previous0 = 0;
+
+ if (hbSize < 8) {
+ /* This function only works when hbSize >= 8 */
+ char buffer[8] = {0};
+ ZSTD_memcpy(buffer, headerBuffer, hbSize);
+ { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
+ buffer, sizeof(buffer));
+ if (FSE_isError(countSize)) return countSize;
+ if (countSize > hbSize) return ERROR(corruption_detected);
+ return countSize;
+ } }
+ assert(hbSize >= 8);
+
+ /* init */
+ ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ for (;;) {
+ if (previous0) {
+ /* Count the number of repeats. Each time the
+ * 2-bit repeat code is 0b11 there is another
+ * repeat.
+ * Avoid UB by setting the high bit to 1.
+ */
+ int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ while (repeats >= 12) {
+ charnum += 3 * 12;
+ if (LIKELY(ip <= iend-7)) {
+ ip += 3;
+ } else {
+ bitCount -= (int)(8 * (iend - 7 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ }
+ charnum += 3 * repeats;
+ bitStream >>= 2 * repeats;
+ bitCount += 2 * repeats;
+
+ /* Add the final repeat which isn't 0b11. */
+ assert((bitStream & 3) < 3);
+ charnum += bitStream & 3;
+ bitCount += 2;
+
+ /* This is an error, but break and return an error
+ * at the end, because returning out of a loop makes
+ * it harder for the compiler to optimize.
+ */
+ if (charnum >= maxSV1) break;
+
+ /* We don't need to set the normalized count to 0
+ * because we already memset the whole buffer to 0.
+ */
+
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ assert((bitCount >> 3) <= 3); /* For first condition to work */
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ {
+ int const max = (2*threshold-1) - remaining;
+ int count;
+
+ if ((bitStream & (threshold-1)) < (U32)max) {
+ count = bitStream & (threshold-1);
+ bitCount += nbBits-1;
+ } else {
+ count = bitStream & (2*threshold-1);
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ /* When it matters (small blocks), this is a
+ * predictable branch, because we don't use -1.
+ */
+ if (count >= 0) {
+ remaining -= count;
+ } else {
+ assert(count == -1);
+ remaining += count;
+ }
+ normalizedCounter[charnum++] = (short)count;
+ previous0 = !count;
+
+ assert(threshold > 1);
+ if (remaining < threshold) {
+ /* This branch can be folded into the
+ * threshold update condition because we
+ * know that threshold > 1.
+ */
+ if (remaining <= 1) break;
+ nbBits = BIT_highbit32(remaining) + 1;
+ threshold = 1 << (nbBits - 1);
+ }
+ if (charnum >= maxSV1) break;
+
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } }
+ if (remaining != 1) return ERROR(corruption_detected);
+ /* Only possible when there are too many zeros. */
+ if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
+ if (bitCount > 32) return ERROR(corruption_detected);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ return ip-istart;
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t FSE_readNCount_body_default(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+
+#if DYNAMIC_BMI2
+TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+#endif
+
+size_t FSE_readNCount_bmi2(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+ }
+#endif
+ (void)bmi2;
+ return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+
+size_t FSE_readNCount(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
+}
+
+
+/*! HUF_readStats() :
+ Read compact Huffman tree, saved by HUF_writeCTable().
+ `huffWeight` is destination buffer.
+ `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
+ @return : size read from `src` , or an error Code .
+ Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
+*/
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+ return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+ U32 weightTotal;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) { /* special header */
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ { U32 n;
+ for (n=0; n<oSize; n+=2) {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ } } }
+ else { /* header compressed with FSE (normal case) */
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ /* max (hwSize-1) values decoded, as last one is implied */
+ oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
+ if (FSE_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+ weightTotal = 0;
+ { U32 n; for (n=0; n<oSize; n++) {
+ if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ } }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ *tableLogPtr = tableLog;
+ /* determine last weight */
+ { U32 const total = 1 << tableLog;
+ U32 const rest = total - weightTotal;
+ U32 const verif = 1 << BIT_highbit32(rest);
+ U32 const lastWeight = BIT_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ } }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ return iSize+1;
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
+}
+
+#if DYNAMIC_BMI2
+static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
+}
+#endif
+
+size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
+ }
+#endif
+ (void)bmi2;
+ return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
+}
diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c
new file mode 100644
index 00000000000000..6d1135f8c37330
--- /dev/null
+++ b/lib/zstd/common/error_private.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* The purpose of this file is to have a single list of error strings embedded in binary */
+
+#include "error_private.h"
+
+const char* ERR_getErrorString(ERR_enum code)
+{
+#ifdef ZSTD_STRIP_ERROR_STRINGS
+ (void)code;
+ return "Error strings stripped";
+#else
+ static const char* const notErrorCode = "Unspecified error code";
+ switch( code )
+ {
+ case PREFIX(no_error): return "No error detected";
+ case PREFIX(GENERIC): return "Error (generic)";
+ case PREFIX(prefix_unknown): return "Unknown frame descriptor";
+ case PREFIX(version_unsupported): return "Version not supported";
+ case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
+ case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
+ case PREFIX(corruption_detected): return "Corrupted block detected";
+ case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+ case PREFIX(parameter_unsupported): return "Unsupported parameter";
+ case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
+ case PREFIX(init_missing): return "Context should be init first";
+ case PREFIX(memory_allocation): return "Allocation error : not enough memory";
+ case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
+ case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
+ case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
+ case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
+ case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+ case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
+ case PREFIX(dictionary_wrong): return "Dictionary mismatch";
+ case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
+ case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
+ case PREFIX(srcSize_wrong): return "Src size is incorrect";
+ case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
+ /* following error codes are not stable and may be removed or changed in a future version */
+ case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
+ case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
+ case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
+ case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
+ case PREFIX(maxCode):
+ default: return notErrorCode;
+ }
+#endif
+}
diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
new file mode 100644
index 00000000000000..d14e686adf9514
--- /dev/null
+++ b/lib/zstd/common/error_private.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* Note : this module is expected to remain private, do not expose it */
+
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+
+
+/* ****************************************
+* Dependencies
+******************************************/
+#include "zstd_deps.h" /* size_t */
+#include <linux/zstd_errors.h> /* enum list */
+
+
+/* ****************************************
+* Compiler-specific
+******************************************/
+#define ERR_STATIC static __attribute__((unused))
+
+
+/*-****************************************
+* Customization (error_public.h)
+******************************************/
+typedef ZSTD_ErrorCode ERR_enum;
+#define PREFIX(name) ZSTD_error_##name
+
+
+/*-****************************************
+* Error codes handling
+******************************************/
+#undef ERROR /* already defined on Visual Studio */
+#define ERROR(name) ZSTD_ERROR(name)
+#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
+
+/* check and forward error code */
+#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
+#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
+
+
+/*-****************************************
+* Error Strings
+******************************************/
+
+const char* ERR_getErrorString(ERR_enum code); /* error_private.c */
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+ return ERR_getErrorString(ERR_getErrorCode(code));
+}
+
+
+#endif /* ERROR_H_MODULE */
diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
new file mode 100644
index 00000000000000..0bb174c2c36775
--- /dev/null
+++ b/lib/zstd/common/fse.h
@@ -0,0 +1,710 @@
+/* ******************************************************************
+ * FSE : Finite State Entropy codec
+ * Public Prototypes declaration
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+#ifndef FSE_H
+#define FSE_H
+
+
+/*-*****************************************
+* Dependencies
+******************************************/
+#include "zstd_deps.h" /* size_t, ptrdiff_t */
+
+
+/*-*****************************************
+* FSE_PUBLIC_API : control library symbols visibility
+******************************************/
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define FSE_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define FSE_PUBLIC_API
+#endif
+
+/*------ Version ------*/
+#define FSE_VERSION_MAJOR 0
+#define FSE_VERSION_MINOR 9
+#define FSE_VERSION_RELEASE 0
+
+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
+#define FSE_QUOTE(str) #str
+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
+
+#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
+FSE_PUBLIC_API unsigned FSE_versionNumber(void); /*< library version number; to be used when checking dll version */
+
+
+/*-****************************************
+* FSE simple functions
+******************************************/
+/*! FSE_compress() :
+ Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
+ 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
+ @return : size of compressed data (<= dstCapacity).
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
+ if FSE_isError(return), compression failed (more details using FSE_getErrorName())
+*/
+FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/*! FSE_decompress():
+ Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstCapacity'.
+ @return : size of regenerated data (<= maxDstSize),
+ or an error code, which can be tested using FSE_isError() .
+
+ ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
+ Why ? : making this distinction requires a header.
+ Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize);
+
+
+/*-*****************************************
+* Tool functions
+******************************************/
+FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
+
+/* Error Management */
+FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
+FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+
+
+/*-*****************************************
+* FSE advanced functions
+******************************************/
+/*! FSE_compress2() :
+ Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
+ Both parameters can be defined as '0' to mean : use default value
+ @return : size of compressed data
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
+ if FSE_isError(return), it's an error code.
+*/
+FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+
+
+/*-*****************************************
+* FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[] (see hist.h)
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+/* *** COMPRESSION *** */
+
+/*! FSE_optimalTableLog():
+ dynamically downsize 'tableLog' when conditions are met.
+ It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
+ @return : recommended tableLog (necessarily <= 'maxTableLog') */
+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_normalizeCount():
+ normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
+ 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
+ useLowProbCount is a boolean parameter which trades off compressed size for
+ faster header decoding. When it is set to 1, the compressed data will be slightly
+ smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
+ faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
+ is a good default, since header deserialization makes a big speed difference.
+ Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
+ @return : tableLog,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
+
+/*! FSE_NCountWriteBound():
+ Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
+ Typically useful for allocation purpose. */
+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_writeNCount():
+ Compactly save 'normalizedCounter' into 'buffer'.
+ @return : size of the compressed table,
+ or an errorCode, which can be tested using FSE_isError(). */
+FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/*! Constructor and Destructor of FSE_CTable.
+ Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
+FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
+
+/*! FSE_buildCTable():
+ Builds `ct`, which must be already allocated, using FSE_createCTable().
+ @return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_compress_usingCTable():
+ Compress `src` using `ct` into `dst` which must be already allocated.
+ @return : size of compressed data (<= `dstCapacity`),
+ or 0 if compressed data could not fit into `dst`,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
+
+/*!
+Tutorial :
+----------
+The first step is to count all symbols. FSE_count() does this job very fast.
+Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
+'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
+maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
+FSE_count() will return the number of occurrence of the most frequent symbol.
+This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+The next step is to normalize the frequencies.
+FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
+It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
+You can use 'tableLog'==0 to mean "use default tableLog value".
+If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
+which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
+
+The result of FSE_normalizeCount() will be saved into a table,
+called 'normalizedCounter', which is a table of signed short.
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
+The return value is tableLog if everything proceeded as expected.
+It is 0 if there is a single symbol within distribution.
+If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
+'buffer' must be already allocated.
+For guaranteed success, buffer size must be at least FSE_headerBound().
+The result of the function is the number of bytes written into 'buffer'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
+
+'normalizedCounter' can then be used to create the compression table 'CTable'.
+The space required by 'CTable' must be already allocated, using FSE_createCTable().
+You can then use FSE_buildCTable() to fill 'CTable'.
+If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
+
+'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
+Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
+The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
+If it returns '0', compressed data could not fit into 'dst'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSE_readNCount():
+ Read compactly saved 'normalizedCounter' from 'rBuffer'.
+ @return : size read from 'rBuffer',
+ or an errorCode, which can be tested using FSE_isError().
+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
+ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+ const void* rBuffer, size_t rBuffSize);
+
+/*! FSE_readNCount_bmi2():
+ * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
+ */
+FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
+ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+ const void* rBuffer, size_t rBuffSize, int bmi2);
+
+/*! Constructor and Destructor of FSE_DTable.
+ Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
+
+/*! FSE_buildDTable():
+ Builds 'dt', which must be already allocated, using FSE_createDTable().
+ return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_decompress_usingDTable():
+ Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+ into `dst` which must be already allocated.
+ @return : size of regenerated data (necessarily <= `dstCapacity`),
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+#endif /* FSE_H */
+
+#if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
+#define FSE_H_FSE_STATIC_LINKING_ONLY
+
+/* *** Dependency *** */
+#include "bitstream.h"
+
+
+/* *****************************************
+* Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog)))
+
+/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
+#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
+#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
+
+
+/* *****************************************
+ * FSE advanced API
+ ***************************************** */
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/*< same as FSE_optimalTableLog(), which used `minus==2` */
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
+ */
+#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
+/*< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
+
+size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
+/*< build a fake FSE_CTable, designed to compress always the same symbolValue */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
+ */
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
+size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
+#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
+FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+/*< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/*< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
+
+size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/*< build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
+#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
+/*< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
+
+size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
+/*< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
+
+typedef enum {
+ FSE_repeat_none, /*< Cannot use the previous table */
+ FSE_repeat_check, /*< Can use the previous table but it must be checked */
+ FSE_repeat_valid /*< Can use the previous table and it is assumed to be valid */
+ } FSE_repeat;
+
+/* *****************************************
+* FSE symbol compression API
+*******************************************/
+/*!
+ This API consists of small unitary functions, which highly benefit from being inlined.
+ Hence their body are included in next section.
+*/
+typedef struct {
+ ptrdiff_t value;
+ const void* stateTable;
+ const void* symbolTT;
+ unsigned stateLog;
+} FSE_CState_t;
+
+static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
+
+static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
+
+static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
+
+/*<
+These functions are inner components of FSE_compress_usingCTable().
+They allow the creation of custom streams, mixing multiple tables and bit sources.
+
+A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
+So the first symbol you will encode is the last you will decode, like a LIFO stack.
+
+You will need a few variables to track your CStream. They are :
+
+FSE_CTable ct; // Provided by FSE_buildCTable()
+BIT_CStream_t bitStream; // bitStream tracking structure
+FSE_CState_t state; // State tracking structure (can have several)
+
+
+The first thing to do is to init bitStream and state.
+ size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
+ FSE_initCState(&state, ct);
+
+Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
+You can then encode your input data, byte after byte.
+FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
+Remember decoding will be done in reverse direction.
+ FSE_encodeByte(&bitStream, &state, symbol);
+
+At any time, you can also add any bit sequence.
+Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
+ BIT_addBits(&bitStream, bitField, nbBits);
+
+The above methods don't commit data to memory, they just store it into local register, for speed.
+Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+Writing data to memory is a manual operation, performed by the flushBits function.
+ BIT_flushBits(&bitStream);
+
+Your last FSE encoding operation shall be to flush your last state value(s).
+ FSE_flushState(&bitStream, &state);
+
+Finally, you must close the bitStream.
+The function returns the size of CStream in bytes.
+If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
+If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
+ size_t size = BIT_closeCStream(&bitStream);
+*/
+
+
+/* *****************************************
+* FSE symbol decompression API
+*******************************************/
+typedef struct {
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+/*<
+Let's now decompose FSE_decompress_usingDTable() into its unitary components.
+You will decode FSE-encoded symbols from the bitStream,
+and also any other bitFields you put in, **in reverse order**.
+
+You will need a few variables to track your bitStream. They are :
+
+BIT_DStream_t DStream; // Stream context
+FSE_DState_t DState; // State context. Multiple ones are possible
+FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
+
+The first thing to do is to init the bitStream.
+ errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
+
+You should then retrieve your initial state(s)
+(in reverse flushing order if you have several ones) :
+ errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
+
+You can then decode your data, symbol after symbol.
+For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
+Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
+ unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
+
+You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
+Note : maximum allowed nbBits is 25, for 32-bits compatibility
+ size_t bitField = BIT_readBits(&DStream, nbBits);
+
+All above operations only read from local register (which size depends on size_t).
+Refueling the register from memory is manually performed by the reload method.
+ endSignal = FSE_reloadDStream(&DStream);
+
+BIT_reloadDStream() result tells if there is still some more data to read from DStream.
+BIT_DStream_unfinished : there is still some data left into the DStream.
+BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
+BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
+BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
+
+When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
+to properly detect the exact end of stream.
+After each decoded symbol, check if DStream is fully consumed using this simple test :
+ BIT_reloadDStream(&DStream) >= BIT_DStream_completed
+
+When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
+Checking if DStream has reached its end is performed by :
+ BIT_endOfDStream(&DStream);
+Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
+ FSE_endOfDState(&DState);
+*/
+
+
+/* *****************************************
+* FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+* Implementation of inlined functions
+*******************************************/
+typedef struct {
+ int deltaFindState;
+ U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
+{
+ const void* ptr = ct;
+ const U16* u16ptr = (const U16*) ptr;
+ const U32 tableLog = MEM_read16(ptr);
+ statePtr->value = (ptrdiff_t)1<<tableLog;
+ statePtr->stateTable = u16ptr+2;
+ statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
+ statePtr->stateLog = tableLog;
+}
+
+
+/*! FSE_initCState2() :
+* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
+* uses the smallest state value possible, saving the cost of this symbol */
+MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
+{
+ FSE_initCState(statePtr, ct);
+ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* stateTable = (const U16*)(statePtr->stateTable);
+ U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
+ statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
+ statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+ }
+}
+
+MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
+{
+ FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* const stateTable = (const U16*)(statePtr->stateTable);
+ U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+ BIT_addBits(bitC, statePtr->value, nbBitsOut);
+ statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+}
+
+MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
+{
+ BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+ BIT_flushBits(bitC);
+}
+
+
+/* FSE_getMaxNbBits() :
+ * Approximate maximum cost of a symbol, in bits.
+ * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
+}
+
+/* FSE_bitCost() :
+ * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
+ U32 const threshold = (minNbBits+1) << 16;
+ assert(tableLog < 16);
+ assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */
+ { U32 const tableSize = 1 << tableLog;
+ U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
+ U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */
+ U32 const bitMultiplier = 1 << accuracyLog;
+ assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
+ assert(normalizedDeltaFromThreshold <= bitMultiplier);
+ return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
+ }
+}
+
+
+/* ====== Decompression ====== */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ return DInfo.symbol;
+}
+
+MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+/*! FSE_decodeSymbolFast() :
+ unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/* **************************************************************
+* Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#ifndef FSE_MAX_MEMORY_USAGE
+# define FSE_MAX_MEMORY_USAGE 14
+#endif
+#ifndef FSE_DEFAULT_MEMORY_USAGE
+# define FSE_DEFAULT_MEMORY_USAGE 13
+#endif
+#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
+# error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
+#endif
+
+/*!FSE_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#ifndef FSE_MAX_SYMBOL_VALUE
+# define FSE_MAX_SYMBOL_VALUE 255
+#endif
+
+/* **************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+#endif /* !FSE_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+* Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
+
+
+#endif /* FSE_STATIC_LINKING_ONLY */
+
+
diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
new file mode 100644
index 00000000000000..2c8bbe3e4c1489
--- /dev/null
+++ b/lib/zstd/common/fse_decompress.c
@@ -0,0 +1,390 @@
+/* ******************************************************************
+ * FSE : Finite State Entropy decoder
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include "debug.h" /* assert */
+#include "bitstream.h"
+#include "compiler.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#include "error_private.h"
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+FSE_DTable* FSE_createDTable (unsigned tableLog)
+{
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSE_freeDTable (FSE_DTable* dt)
+{
+ ZSTD_free(dt);
+}
+
+static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+ void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
+ U16* symbolNext = (U16*)workSpace;
+ BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
+
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+ U32 highThreshold = tableSize-1;
+
+ /* Sanity Checks */
+ if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ { FSE_DTableHeader DTableH;
+ DTableH.tableLog = (U16)tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ symbolNext[s] = normalizedCounter[s];
+ } } }
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ if (highThreshold == tableSize - 1) {
+ size_t const tableMask = tableSize-1;
+ size_t const step = FSE_TABLESTEP(tableSize);
+ /* First lay down the symbols in order.
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
+ * misses since small blocks generally have small table logs, so nearly
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
+ * our buffer to handle the over-write.
+ */
+ {
+ U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ pos += n;
+ }
+ }
+ /* Now we spread those positions across the table.
+ * The benefit of doing it in two stages is that we avoid the the
+ * variable size inner loop, which caused lots of branch misses.
+ * Now we can run through all the positions without any branch misses.
+ * We unroll the loop twice, since that is what emperically worked best.
+ */
+ {
+ size_t position = 0;
+ size_t s;
+ size_t const unroll = 2;
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableDecode[uPosition].symbol = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0);
+ }
+ } else {
+ U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ } }
+
+ return 0;
+}
+
+size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-*******************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSV1 = tableMask+1;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<maxSV1; s++) {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BIT_DStream_t bitD;
+ FSE_DState_t state1;
+ FSE_DState_t state2;
+
+ /* Init */
+ CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
+
+ FSE_initDState(&state1, &bitD, dt);
+ FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
+ op[0] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[1] = FSE_GETSYMBOL(&state2);
+
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[3] = FSE_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+ while (1) {
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state1);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state2);
+ break;
+ }
+
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state2);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state1);
+ break;
+ } }
+
+ return op-ostart;
+}
+
+
+size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+typedef struct {
+ short ncount[FSE_MAX_SYMBOL_VALUE + 1];
+ FSE_DTable dtable[1]; /* Dynamically sized */
+} FSE_DecompressWksp;
+
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
+ void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize,
+ unsigned maxLog, void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
+
+ DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
+ if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
+
+ /* normal FSE decoding mode */
+ {
+ size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
+ if (FSE_isError(NCountLength)) return NCountLength;
+ if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
+ assert(NCountLength <= cSrcSize);
+ ip += NCountLength;
+ cSrcSize -= NCountLength;
+ }
+
+ if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
+ workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
+ wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
+
+ CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
+
+ {
+ const void* ptr = wksp->dtable;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
+ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
+ }
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
+}
+
+#if DYNAMIC_BMI2
+TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
+}
+#endif
+
+size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
+ }
+#endif
+ (void)bmi2;
+ return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
+}
+
+
+typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
new file mode 100644
index 00000000000000..88c5586646aa51
--- /dev/null
+++ b/lib/zstd/common/huf.h
@@ -0,0 +1,356 @@
+/* ******************************************************************
+ * huff0 huffman codec,
+ * part of Finite State Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+#ifndef HUF_H_298734234
+#define HUF_H_298734234
+
+/* *** Dependencies *** */
+#include "zstd_deps.h" /* size_t */
+
+
+/* *** library symbols visibility *** */
+/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
+ * HUF symbols remain "private" (internal symbols for library only).
+ * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define HUF_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
+#else
+# define HUF_PUBLIC_API
+#endif
+
+
+/* ========================== */
+/* *** simple functions *** */
+/* ========================== */
+
+/* HUF_compress() :
+ * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
+ * 'dst' buffer must be already allocated.
+ * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
+ * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
+ * @return : size of compressed data (<= `dstCapacity`).
+ * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ * if HUF_isError(return), compression failed (more details using HUF_getErrorName())
+ */
+HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/* HUF_decompress() :
+ * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+ * into already allocated buffer 'dst', of minimum size 'dstSize'.
+ * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
+ * Note : in contrast with FSE, HUF_decompress can regenerate
+ * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ * because it knows size to regenerate (originalSize).
+ * @return : size of regenerated data (== originalSize),
+ * or an error code, which can be tested using HUF_isError()
+ */
+HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize);
+
+
+/* *** Tool functions *** */
+#define HUF_BLOCKSIZE_MAX (128 * 1024) /*< maximum input size for a single block compressed with HUF_compress */
+HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /*< maximum compressed size (worst case) */
+
+/* Error Management */
+HUF_PUBLIC_API unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */
+HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */
+
+
+/* *** Advanced function *** */
+
+/* HUF_compress2() :
+ * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
+ * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
+ * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
+HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/* HUF_compress4X_wksp() :
+ * Same as HUF_compress2(), but uses externally allocated `workSpace`.
+ * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
+#define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
+#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
+HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize);
+
+#endif /* HUF_H_298734234 */
+
+/* ******************************************************************
+ * WARNING !!
+ * The following section contains advanced and experimental definitions
+ * which shall never be used in the context of a dynamic library,
+ * because they are not guaranteed to remain stable in the future.
+ * Only consider them in association with static linking.
+ * *****************************************************************/
+#if !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
+#define HUF_H_HUF_STATIC_LINKING_ONLY
+
+/* *** Dependencies *** */
+#include "mem.h" /* U32 */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+
+
+/* *** Constants *** */
+#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
+#define HUF_SYMBOLVALUE_MAX 255
+
+#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
+# error "HUF_TABLELOG_MAX is too large !"
+#endif
+
+
+/* ****************************************
+* Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's Compression Table */
+/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
+struct HUF_CElt_s {
+ U16 val;
+ BYTE nbBits;
+}; /* typedef'd to HUF_CElt */
+typedef struct HUF_CElt_s HUF_CElt; /* consider it an incomplete type */
+#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
+#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
+#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
+ HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUF_DTable;
+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
+#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
+
+
+/* ****************************************
+* Advanced decompression functions
+******************************************/
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
+#endif
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< decodes RLE and uncompressed */
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
+#endif
+
+
+/* ****************************************
+ * HUF detailed API
+ * ****************************************/
+
+/*! HUF_compress() does the following:
+ * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
+ * 2. (optional) refine tableLog using HUF_optimalTableLog()
+ * 3. build Huffman table from count using HUF_buildCTable()
+ * 4. save Huffman table to memory buffer using HUF_writeCTable()
+ * 5. encode the data stream using HUF_compress4X_usingCTable()
+ *
+ * The following API allows targeting specific sub-functions for advanced tasks.
+ * For example, it's possible to compress several blocks using the same 'CTable',
+ * or to save and regenerate 'CTable' using external methods.
+ */
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
+int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
+
+typedef enum {
+ HUF_repeat_none, /*< Cannot use the previous table */
+ HUF_repeat_check, /*< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
+ HUF_repeat_valid /*< Can use the previous table and it is assumed to be valid */
+ } HUF_repeat;
+/* HUF_compress4X_repeat() :
+ * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+
+/* HUF_buildCTable_wksp() :
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
+ */
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
+#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_buildCTable_wksp (HUF_CElt* tree,
+ const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
+ void* workSpace, size_t wkspSize);
+
+/*! HUF_readStats() :
+ * Read compact Huffman tree, saved by HUF_writeCTable().
+ * `huffWeight` is destination buffer.
+ * @return : size read from `src` , or an error Code .
+ * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize);
+
+/*! HUF_readStats_wksp() :
+ * Same as HUF_readStats() but takes an external workspace which must be
+ * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
+#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workspace, size_t wkspSize,
+ int bmi2);
+
+/* HUF_readCTable() :
+ * Loading a CTable saved with HUF_writeCTable() */
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
+
+/* HUF_getNbBits() :
+ * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
+ * Note 1 : is not inlined, as HUF_CElt definition is private
+ * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
+
+/*
+ * HUF_decompress() does the following:
+ * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
+ * 2. build Huffman table from save, using HUF_readDTableX?()
+ * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
+ */
+
+/* HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
+
+/*
+ * The minimum workspace size for the `workSpace` used in
+ * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
+ *
+ * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
+ * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
+ * Buffer overflow errors may potentially occur if code modifications result in
+ * a required workspace size greater than that specified in the following
+ * macro.
+ */
+#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+
+
+/* ====================== */
+/* single stream variants */
+/* ====================== */
+
+size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /*< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+/* HUF_compress1X_repeat() :
+ * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
+#endif
+
+size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
+#endif
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /*< automatic selection of sing or double symbol decoder, based on DTable */
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+
+/* BMI2 variants.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
+
+#endif /* HUF_STATIC_LINKING_ONLY */
+
diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
new file mode 100644
index 00000000000000..dcdd586a9fd915
--- /dev/null
+++ b/lib/zstd/common/mem.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include <asm/unaligned.h> /* get_unaligned, put_unaligned* */
+#include <linux/compiler.h> /* inline */
+#include <linux/swab.h> /* swab32, swab64 */
+#include <linux/types.h> /* size_t, ptrdiff_t */
+#include "debug.h" /* DEBUG_STATIC_ASSERT */
+
+/*-****************************************
+* Compiler specifics
+******************************************/
+#define MEM_STATIC static inline
+
+/*-**************************************************************
+* Basic Types
+*****************************************************************/
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef int16_t S16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef int64_t S64;
+
+/*-**************************************************************
+* Memory I/O API
+*****************************************************************/
+/*=== Static platform detection ===*/
+MEM_STATIC unsigned MEM_32bits(void);
+MEM_STATIC unsigned MEM_64bits(void);
+MEM_STATIC unsigned MEM_isLittleEndian(void);
+
+/*=== Native unaligned read/write ===*/
+MEM_STATIC U16 MEM_read16(const void* memPtr);
+MEM_STATIC U32 MEM_read32(const void* memPtr);
+MEM_STATIC U64 MEM_read64(const void* memPtr);
+MEM_STATIC size_t MEM_readST(const void* memPtr);
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value);
+MEM_STATIC void MEM_write32(void* memPtr, U32 value);
+MEM_STATIC void MEM_write64(void* memPtr, U64 value);
+
+/*=== Little endian unaligned read/write ===*/
+MEM_STATIC U16 MEM_readLE16(const void* memPtr);
+MEM_STATIC U32 MEM_readLE24(const void* memPtr);
+MEM_STATIC U32 MEM_readLE32(const void* memPtr);
+MEM_STATIC U64 MEM_readLE64(const void* memPtr);
+MEM_STATIC size_t MEM_readLEST(const void* memPtr);
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
+MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
+MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
+MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
+MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
+
+/*=== Big endian unaligned read/write ===*/
+MEM_STATIC U32 MEM_readBE32(const void* memPtr);
+MEM_STATIC U64 MEM_readBE64(const void* memPtr);
+MEM_STATIC size_t MEM_readBEST(const void* memPtr);
+
+MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
+MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
+MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
+
+/*=== Byteswap ===*/
+MEM_STATIC U32 MEM_swap32(U32 in);
+MEM_STATIC U64 MEM_swap64(U64 in);
+MEM_STATIC size_t MEM_swapST(size_t in);
+
+/*-**************************************************************
+* Memory I/O Implementation
+*****************************************************************/
+MEM_STATIC unsigned MEM_32bits(void)
+{
+ return sizeof(size_t) == 4;
+}
+
+MEM_STATIC unsigned MEM_64bits(void)
+{
+ return sizeof(size_t) == 8;
+}
+
+#if defined(__LITTLE_ENDIAN)
+#define MEM_LITTLE_ENDIAN 1
+#else
+#define MEM_LITTLE_ENDIAN 0
+#endif
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ return MEM_LITTLE_ENDIAN;
+}
+
+MEM_STATIC U16 MEM_read16(const void *memPtr)
+{
+ return get_unaligned((const U16 *)memPtr);
+}
+
+MEM_STATIC U32 MEM_read32(const void *memPtr)
+{
+ return get_unaligned((const U32 *)memPtr);
+}
+
+MEM_STATIC U64 MEM_read64(const void *memPtr)
+{
+ return get_unaligned((const U64 *)memPtr);
+}
+
+MEM_STATIC size_t MEM_readST(const void *memPtr)
+{
+ return get_unaligned((const size_t *)memPtr);
+}
+
+MEM_STATIC void MEM_write16(void *memPtr, U16 value)
+{
+ put_unaligned(value, (U16 *)memPtr);
+}
+
+MEM_STATIC void MEM_write32(void *memPtr, U32 value)
+{
+ put_unaligned(value, (U32 *)memPtr);
+}
+
+MEM_STATIC void MEM_write64(void *memPtr, U64 value)
+{
+ put_unaligned(value, (U64 *)memPtr);
+}
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void *memPtr)
+{
+ return get_unaligned_le16(memPtr);
+}
+
+MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val)
+{
+ put_unaligned_le16(val, memPtr);
+}
+
+MEM_STATIC U32 MEM_readLE24(const void *memPtr)
+{
+ return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16);
+}
+
+MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val)
+{
+ MEM_writeLE16(memPtr, (U16)val);
+ ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
+}
+
+MEM_STATIC U32 MEM_readLE32(const void *memPtr)
+{
+ return get_unaligned_le32(memPtr);
+}
+
+MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32)
+{
+ put_unaligned_le32(val32, memPtr);
+}
+
+MEM_STATIC U64 MEM_readLE64(const void *memPtr)
+{
+ return get_unaligned_le64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64)
+{
+ put_unaligned_le64(val64, memPtr);
+}
+
+MEM_STATIC size_t MEM_readLEST(const void *memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeLE32(memPtr, (U32)val);
+ else
+ MEM_writeLE64(memPtr, (U64)val);
+}
+
+/*=== Big endian r/w ===*/
+
+MEM_STATIC U32 MEM_readBE32(const void *memPtr)
+{
+ return get_unaligned_be32(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32)
+{
+ put_unaligned_be32(val32, memPtr);
+}
+
+MEM_STATIC U64 MEM_readBE64(const void *memPtr)
+{
+ return get_unaligned_be64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64)
+{
+ put_unaligned_be64(val64, memPtr);
+}
+
+MEM_STATIC size_t MEM_readBEST(const void *memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readBE32(memPtr);
+ else
+ return (size_t)MEM_readBE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeBE32(memPtr, (U32)val);
+ else
+ MEM_writeBE64(memPtr, (U64)val);
+}
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+ return swab32(in);
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+ return swab64(in);
+}
+
+MEM_STATIC size_t MEM_swapST(size_t in)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_swap32((U32)in);
+ else
+ return (size_t)MEM_swap64((U64)in);
+}
+
+#endif /* MEM_H_MODULE */
diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
new file mode 100644
index 00000000000000..3d7e35b309b5d1
--- /dev/null
+++ b/lib/zstd/common/zstd_common.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/*-*************************************
+* Dependencies
+***************************************/
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
+#include "error_private.h"
+#include "zstd_internal.h"
+
+
+/*-****************************************
+* Version
+******************************************/
+unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
+
+const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
+
+
+/*-****************************************
+* ZSTD Error Management
+******************************************/
+#undef ZSTD_isError /* defined within zstd_internal.h */
+/*! ZSTD_isError() :
+ * tells if a return value is an error code
+ * symbol is required for external callers */
+unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTD_getErrorName() :
+ * provides error code string from function result (useful for debugging) */
+const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+/*! ZSTD_getError() :
+ * convert a `size_t` function result into a proper ZSTD_errorCode enum */
+ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
+
+/*! ZSTD_getErrorString() :
+ * provides error code string from enum */
+const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
+
+
+
+/*=**************************************************************
+* Custom allocator
+****************************************************************/
+void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc)
+ return customMem.customAlloc(customMem.opaque, size);
+ return ZSTD_malloc(size);
+}
+
+void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc) {
+ /* calloc implemented as malloc+memset;
+ * not as efficient as calloc, but next best guess for custom malloc */
+ void* const ptr = customMem.customAlloc(customMem.opaque, size);
+ ZSTD_memset(ptr, 0, size);
+ return ptr;
+ }
+ return ZSTD_calloc(1, size);
+}
+
+void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
+{
+ if (ptr!=NULL) {
+ if (customMem.customFree)
+ customMem.customFree(customMem.opaque, ptr);
+ else
+ ZSTD_free(ptr);
+ }
+}
diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
new file mode 100644
index 00000000000000..7a5bf44839c9ce
--- /dev/null
+++ b/lib/zstd/common/zstd_deps.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*
+ * This file provides common libc dependencies that zstd requires.
+ * The purpose is to allow replacing this file with a custom implementation
+ * to compile zstd without libc support.
+ */
+
+/* Need:
+ * NULL
+ * INT_MAX
+ * UINT_MAX
+ * ZSTD_memcpy()
+ * ZSTD_memset()
+ * ZSTD_memmove()
+ */
+#ifndef ZSTD_DEPS_COMMON
+#define ZSTD_DEPS_COMMON
+
+#include <linux/limits.h>
+#include <linux/stddef.h>
+
+#define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n))
+#define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n))
+#define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n))
+
+#endif /* ZSTD_DEPS_COMMON */
+
+/*
+ * Define malloc as always failing. That means the user must
+ * either use ZSTD_customMem or statically allocate memory.
+ * Need:
+ * ZSTD_malloc()
+ * ZSTD_free()
+ * ZSTD_calloc()
+ */
+#ifdef ZSTD_DEPS_NEED_MALLOC
+#ifndef ZSTD_DEPS_MALLOC
+#define ZSTD_DEPS_MALLOC
+
+#define ZSTD_malloc(s) ({ (void)(s); NULL; })
+#define ZSTD_free(p) ((void)(p))
+#define ZSTD_calloc(n,s) ({ (void)(n); (void)(s); NULL; })
+
+#endif /* ZSTD_DEPS_MALLOC */
+#endif /* ZSTD_DEPS_NEED_MALLOC */
+
+/*
+ * Provides 64-bit math support.
+ * Need:
+ * U64 ZSTD_div64(U64 dividend, U32 divisor)
+ */
+#ifdef ZSTD_DEPS_NEED_MATH64
+#ifndef ZSTD_DEPS_MATH64
+#define ZSTD_DEPS_MATH64
+
+#include <linux/math64.h>
+
+static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
+ return div_u64(dividend, divisor);
+}
+
+#endif /* ZSTD_DEPS_MATH64 */
+#endif /* ZSTD_DEPS_NEED_MATH64 */
+
+/*
+ * This is only requested when DEBUGLEVEL >= 1, meaning
+ * it is disabled in production.
+ * Need:
+ * assert()
+ */
+#ifdef ZSTD_DEPS_NEED_ASSERT
+#ifndef ZSTD_DEPS_ASSERT
+#define ZSTD_DEPS_ASSERT
+
+#include <linux/kernel.h>
+
+#define assert(x) WARN_ON((x))
+
+#endif /* ZSTD_DEPS_ASSERT */
+#endif /* ZSTD_DEPS_NEED_ASSERT */
+
+/*
+ * This is only requested when DEBUGLEVEL >= 2, meaning
+ * it is disabled in production.
+ * Need:
+ * ZSTD_DEBUG_PRINT()
+ */
+#ifdef ZSTD_DEPS_NEED_IO
+#ifndef ZSTD_DEPS_IO
+#define ZSTD_DEPS_IO
+
+#include <linux/printk.h>
+
+#define ZSTD_DEBUG_PRINT(...) pr_debug(__VA_ARGS__)
+
+#endif /* ZSTD_DEPS_IO */
+#endif /* ZSTD_DEPS_NEED_IO */
+
+/*
+ * Only requested when MSAN is enabled.
+ * Need:
+ * intptr_t
+ */
+#ifdef ZSTD_DEPS_NEED_STDINT
+#ifndef ZSTD_DEPS_STDINT
+#define ZSTD_DEPS_STDINT
+
+/*
+ * The Linux Kernel doesn't provide intptr_t, only uintptr_t, which
+ * is an unsigned long.
+ */
+typedef long intptr_t;
+
+#endif /* ZSTD_DEPS_STDINT */
+#endif /* ZSTD_DEPS_NEED_STDINT */
diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
new file mode 100644
index 00000000000000..fc6f3a9b40c075
--- /dev/null
+++ b/lib/zstd/common/zstd_internal.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+/* this module contains definitions which must be identical
+ * across compression, decompression and dictBuilder.
+ * It also contains a few functions useful to at least 2 of them
+ * and which benefit from being inlined */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "compiler.h"
+#include "mem.h"
+#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
+#include "error_private.h"
+#define ZSTD_STATIC_LINKING_ONLY
+#include <linux/zstd.h>
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include <linux/xxhash.h> /* XXH_reset, update, digest */
+#define ZSTD_TRACE 0
+
+
+/* ---- static assert (debug) --- */
+#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
+#define ZSTD_isError ERR_isError /* for inlining */
+#define FSE_isError ERR_isError
+#define HUF_isError ERR_isError
+
+
+/*-*************************************
+* shared macros
+***************************************/
+#undef MIN
+#undef MAX
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+/*
+ * Ignore: this is an internal helper.
+ *
+ * This is a helper function to help force C99-correctness during compilation.
+ * Under strict compilation modes, variadic macro arguments can't be empty.
+ * However, variadic function arguments can be. Using a function therefore lets
+ * us statically check that at least one (string) argument was passed,
+ * independent of the compilation flags.
+ */
+static INLINE_KEYWORD UNUSED_ATTR
+void _force_has_format_string(const char *format, ...) {
+ (void)format;
+}
+
+/*
+ * Ignore: this is an internal helper.
+ *
+ * We want to force this function invocation to be syntactically correct, but
+ * we don't want to force runtime evaluation of its arguments.
+ */
+#define _FORCE_HAS_FORMAT_STRING(...) \
+ if (0) { \
+ _force_has_format_string(__VA_ARGS__); \
+ }
+
+/*
+ * Return the specified error if the condition evaluates to true.
+ *
+ * In debug modes, prints additional information.
+ * In order to do that (particularly, printing the conditional that failed),
+ * this can't just wrap RETURN_ERROR().
+ */
+#define RETURN_ERROR_IF(cond, err, ...) \
+ if (cond) { \
+ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
+ __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ }
+
+/*
+ * Unconditionally return the specified error.
+ *
+ * In debug modes, prints additional information.
+ */
+#define RETURN_ERROR(err, ...) \
+ do { \
+ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
+ __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ } while(0);
+
+/*
+ * If the provided expression evaluates to an error code, returns that error code.
+ *
+ * In debug modes, prints additional information.
+ */
+#define FORWARD_IF_ERROR(err, ...) \
+ do { \
+ size_t const err_code = (err); \
+ if (ERR_isError(err_code)) { \
+ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
+ __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return err_code; \
+ } \
+ } while(0);
+
+
+/*-*************************************
+* Common constants
+***************************************/
+#define ZSTD_OPT_NUM (1<<12)
+
+#define ZSTD_REP_NUM 3 /* number of repcodes */
+#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
+static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
+static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
+
+#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
+
+#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
+typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
+
+#define ZSTD_FRAMECHECKSUMSIZE 4
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+
+#define HufLog 12
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+
+#define Litbits 8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML 52
+#define MaxLL 35
+#define DefaultMaxOff 28
+#define MaxOff 31
+#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog 9
+#define LLFSELog 9
+#define OffFSELog 8
+#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
+
+#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
+/* Each table cannot take more than #symbols * FSELog bits */
+#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
+
+static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 6, 7, 8, 9,10,11,12,
+ 13,14,15,16
+};
+static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
+ 4, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 2, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1
+};
+#define LL_DEFAULTNORMLOG 6 /* for static allocation */
+static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
+
+static UNUSED_ATTR const U32 ML_bits[MaxML+1] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 7, 8, 9,10,11,
+ 12,13,14,15,16
+};
+static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
+ 1, 4, 3, 2, 2, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,-1,-1,
+ -1,-1,-1,-1,-1
+};
+#define ML_DEFAULTNORMLOG 6 /* for static allocation */
+static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
+
+static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1,-1
+};
+#define OF_DEFAULTNORMLOG 5 /* for static allocation */
+static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
+
+
+/*-*******************************************
+* Shared functions to include for inlining
+*********************************************/
+static void ZSTD_copy8(void* dst, const void* src) {
+ ZSTD_memcpy(dst, src, 8);
+}
+
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+static void ZSTD_copy16(void* dst, const void* src) {
+ ZSTD_memcpy(dst, src, 16);
+}
+#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
+
+#define WILDCOPY_OVERLENGTH 32
+#define WILDCOPY_VECLEN 16
+
+typedef enum {
+ ZSTD_no_overlap,
+ ZSTD_overlap_src_before_dst
+ /* ZSTD_overlap_dst_before_src, */
+} ZSTD_overlap_e;
+
+/*! ZSTD_wildcopy() :
+ * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
+ * @param ovtype controls the overlap detection
+ * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
+ * The src buffer must be before the dst buffer.
+ */
+MEM_STATIC FORCE_INLINE_ATTR
+void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
+{
+ ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+
+ assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
+
+ if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
+ /* Handle short offset copies. */
+ do {
+ COPY8(op, ip)
+ } while (op < oend);
+ } else {
+ assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
+ /* Separate out the first COPY16() call because the copy length is
+ * almost certain to be short, so the branches have different
+ * probabilities. Since it is almost certain to be short, only do
+ * one COPY16() in the first call. Then, do two calls per loop since
+ * at that point it is more likely to have a high trip count.
+ */
+#ifdef __aarch64__
+ do {
+ COPY16(op, ip);
+ }
+ while (op < oend);
+#else
+ ZSTD_copy16(op, ip);
+ if (16 >= length) return;
+ op += 16;
+ ip += 16;
+ do {
+ COPY16(op, ip);
+ COPY16(op, ip);
+ }
+ while (op < oend);
+#endif
+ }
+}
+
+MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ size_t const length = MIN(dstCapacity, srcSize);
+ if (length > 0) {
+ ZSTD_memcpy(dst, src, length);
+ }
+ return length;
+}
+
+/* define "workspace is too large" as this number of times larger than needed */
+#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
+
+/* when workspace is continuously too large
+ * during at least this number of times,
+ * context's memory usage is considered wasteful,
+ * because it's sized to handle a worst case scenario which rarely happens.
+ * In which case, resize it down to free some memory */
+#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
+
+/* Controls whether the input/output buffer is buffered or stable. */
+typedef enum {
+ ZSTD_bm_buffered = 0, /* Buffer the input/output */
+ ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
+} ZSTD_bufferMode_e;
+
+
+/*-*******************************************
+* Private declarations
+*********************************************/
+typedef struct seqDef_s {
+ U32 offset; /* Offset code of the sequence */
+ U16 litLength;
+ U16 matchLength;
+} seqDef;
+
+typedef struct {
+ seqDef* sequencesStart;
+ seqDef* sequences; /* ptr to end of sequences */
+ BYTE* litStart;
+ BYTE* lit; /* ptr to end of literals */
+ BYTE* llCode;
+ BYTE* mlCode;
+ BYTE* ofCode;
+ size_t maxNbSeq;
+ size_t maxNbLit;
+
+ /* longLengthPos and longLengthID to allow us to represent either a single litLength or matchLength
+ * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
+ * the existing value of the litLength or matchLength by 0x10000.
+ */
+ U32 longLengthID; /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */
+ U32 longLengthPos; /* Index of the sequence to apply long length modification to */
+} seqStore_t;
+
+typedef struct {
+ U32 litLength;
+ U32 matchLength;
+} ZSTD_sequenceLength;
+
+/*
+ * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
+ * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength.
+ */
+MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
+{
+ ZSTD_sequenceLength seqLen;
+ seqLen.litLength = seq->litLength;
+ seqLen.matchLength = seq->matchLength + MINMATCH;
+ if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
+ if (seqStore->longLengthID == 1) {
+ seqLen.litLength += 0xFFFF;
+ }
+ if (seqStore->longLengthID == 2) {
+ seqLen.matchLength += 0xFFFF;
+ }
+ }
+ return seqLen;
+}
+
+/*
+ * Contains the compressed frame size and an upper-bound for the decompressed frame size.
+ * Note: before using `compressedSize`, check for errors using ZSTD_isError().
+ * similarly, before using `decompressedBound`, check for errors using:
+ * `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
+ */
+typedef struct {
+ size_t compressedSize;
+ unsigned long long decompressedBound;
+} ZSTD_frameSizeInfo; /* decompress & legacy */
+
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+
+/* custom memory allocation functions */
+void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
+void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
+void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
+
+
+MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
+{
+ assert(val != 0);
+ {
+# if (__GNUC__ >= 3) /* GCC Intrinsic */
+ return __builtin_clz (val) ^ 31;
+# else /* Software version */
+ static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
+
+
+typedef struct {
+ blockType_e blockType;
+ U32 lastBlock;
+ U32 origSize;
+} blockProperties_t; /* declared here for decompress and fullbench */
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr);
+
+/*! ZSTD_decodeSeqHeaders() :
+ * decode sequence header from src */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize);
+
+
+
+#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
deleted file mode 100644
index b080264ed3adf3..00000000000000
--- a/lib/zstd/compress.c
+++ /dev/null
@@ -1,3485 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/*-*************************************
-* Dependencies
-***************************************/
-#include "fse.h"
-#include "huf.h"
-#include "mem.h"
-#include "zstd_internal.h" /* includes zstd.h */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h> /* memset */
-
-/*-*************************************
-* Constants
-***************************************/
-static const U32 g_searchStrength = 8; /* control skip over incompressible data */
-#define HASH_READ_SIZE 8
-typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
-
-/*-*************************************
-* Helper functions
-***************************************/
-size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
-
-/*-*************************************
-* Sequence storage
-***************************************/
-static void ZSTD_resetSeqStore(seqStore_t *ssPtr)
-{
- ssPtr->lit = ssPtr->litStart;
- ssPtr->sequences = ssPtr->sequencesStart;
- ssPtr->longLengthID = 0;
-}
-
-/*-*************************************
-* Context memory management
-***************************************/
-struct ZSTD_CCtx_s {
- const BYTE *nextSrc; /* next block here to continue on curr prefix */
- const BYTE *base; /* All regular indexes relative to this position */
- const BYTE *dictBase; /* extDict indexes relative to this position */
- U32 dictLimit; /* below that point, need extDict */
- U32 lowLimit; /* below that point, no more data */
- U32 nextToUpdate; /* index from which to continue dictionary update */
- U32 nextToUpdate3; /* index from which to continue dictionary update */
- U32 hashLog3; /* dispatch table : larger == faster, more memory */
- U32 loadedDictEnd; /* index of end of dictionary */
- U32 forceWindow; /* force back-references to respect limit of 1<<wLog, even for dictionary */
- U32 forceRawDict; /* Force loading dictionary in "content-only" mode (no header analysis) */
- ZSTD_compressionStage_e stage;
- U32 rep[ZSTD_REP_NUM];
- U32 repToConfirm[ZSTD_REP_NUM];
- U32 dictID;
- ZSTD_parameters params;
- void *workSpace;
- size_t workSpaceSize;
- size_t blockSize;
- U64 frameContentSize;
- struct xxh64_state xxhState;
- ZSTD_customMem customMem;
-
- seqStore_t seqStore; /* sequences storage ptrs */
- U32 *hashTable;
- U32 *hashTable3;
- U32 *chainTable;
- HUF_CElt *hufTable;
- U32 flagStaticTables;
- HUF_repeat flagStaticHufTable;
- FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
- FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
- FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
- unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32];
-};
-
-size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)
-{
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
- U32 const divider = (cParams.searchLength == 3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
- size_t const tokenSpace = blockSize + 11 * maxNbSeq;
- size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
- size_t const hSize = ((size_t)1) << cParams.hashLog;
- U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
- size_t const h3Size = ((size_t)1) << hashLog3;
- size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
- size_t const optSpace =
- ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
- size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
- (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
-
- return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize);
-}
-
-static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
-{
- ZSTD_CCtx *cctx;
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
- cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
- if (!cctx)
- return NULL;
- memset(cctx, 0, sizeof(ZSTD_CCtx));
- cctx->customMem = customMem;
- return cctx;
-}
-
-ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem);
- if (cctx) {
- cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize);
- }
- return cctx;
-}
-
-size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx)
-{
- if (cctx == NULL)
- return 0; /* support free on NULL */
- ZSTD_free(cctx->workSpace, cctx->customMem);
- ZSTD_free(cctx, cctx->customMem);
- return 0; /* reserved as a potential error code in the future */
-}
-
-const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); }
-
-static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; }
-
-/** ZSTD_checkParams() :
- ensure param values remain within authorized range.
- @return : 0, or an error code if one value is beyond authorized range */
-size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
-{
-#define CLAMPCHECK(val, min, max) \
- { \
- if ((val < min) | (val > max)) \
- return ERROR(compressionParameter_unsupported); \
- }
- CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
- CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
- CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
- CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
- CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
- CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
- if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2)
- return ERROR(compressionParameter_unsupported);
- return 0;
-}
-
-/** ZSTD_cycleLog() :
- * condition for correct operation : hashLog > 1 */
-static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
-{
- U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
- return hashLog - btScale;
-}
-
-/** ZSTD_adjustCParams() :
- optimize `cPar` for a given input (`srcSize` and `dictSize`).
- mostly downsizing to reduce memory consumption and initialization.
- Both `srcSize` and `dictSize` are optional (use 0 if unknown),
- but if both are 0, no optimization can be done.
- Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
-ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
-{
- if (srcSize + dictSize == 0)
- return cPar; /* no size information available : no adjustment */
-
- /* resize params, to use less memory when necessary */
- {
- U32 const minSrcSize = (srcSize == 0) ? 500 : 0;
- U64 const rSize = srcSize + dictSize + minSrcSize;
- if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) {
- U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
- if (cPar.windowLog > srcLog)
- cPar.windowLog = srcLog;
- }
- }
- if (cPar.hashLog > cPar.windowLog)
- cPar.hashLog = cPar.windowLog;
- {
- U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
- if (cycleLog > cPar.windowLog)
- cPar.chainLog -= (cycleLog - cPar.windowLog);
- }
-
- if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
- cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
-
- return cPar;
-}
-
-static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
-{
- return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) &
- (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3));
-}
-
-/*! ZSTD_continueCCtx() :
- reuse CCtx without reset (note : requires no dictionary) */
-static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize)
-{
- U32 const end = (U32)(cctx->nextSrc - cctx->base);
- cctx->params = params;
- cctx->frameContentSize = frameContentSize;
- cctx->lowLimit = end;
- cctx->dictLimit = end;
- cctx->nextToUpdate = end + 1;
- cctx->stage = ZSTDcs_init;
- cctx->dictID = 0;
- cctx->loadedDictEnd = 0;
- {
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- cctx->rep[i] = repStartValue[i];
- }
- cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
- xxh64_reset(&cctx->xxhState, 0);
- return 0;
-}
-
-typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
-
-/*! ZSTD_resetCCtx_advanced() :
- note : `params` must be validated */
-static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp)
-{
- if (crp == ZSTDcrp_continue)
- if (ZSTD_equivalentParams(params, zc->params)) {
- zc->flagStaticTables = 0;
- zc->flagStaticHufTable = HUF_repeat_none;
- return ZSTD_continueCCtx(zc, params, frameContentSize);
- }
-
- {
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
- U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
- size_t const tokenSpace = blockSize + 11 * maxNbSeq;
- size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
- size_t const hSize = ((size_t)1) << params.cParams.hashLog;
- U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
- size_t const h3Size = ((size_t)1) << hashLog3;
- size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
- void *ptr;
-
- /* Check if workSpace is large enough, alloc a new one if needed */
- {
- size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) +
- (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
- size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
- (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
- if (zc->workSpaceSize < neededSpace) {
- ZSTD_free(zc->workSpace, zc->customMem);
- zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
- if (zc->workSpace == NULL)
- return ERROR(memory_allocation);
- zc->workSpaceSize = neededSpace;
- }
- }
-
- if (crp != ZSTDcrp_noMemset)
- memset(zc->workSpace, 0, tableSpace); /* reset tables only */
- xxh64_reset(&zc->xxhState, 0);
- zc->hashLog3 = hashLog3;
- zc->hashTable = (U32 *)(zc->workSpace);
- zc->chainTable = zc->hashTable + hSize;
- zc->hashTable3 = zc->chainTable + chainSize;
- ptr = zc->hashTable3 + h3Size;
- zc->hufTable = (HUF_CElt *)ptr;
- zc->flagStaticTables = 0;
- zc->flagStaticHufTable = HUF_repeat_none;
- ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
-
- zc->nextToUpdate = 1;
- zc->nextSrc = NULL;
- zc->base = NULL;
- zc->dictBase = NULL;
- zc->dictLimit = 0;
- zc->lowLimit = 0;
- zc->params = params;
- zc->blockSize = blockSize;
- zc->frameContentSize = frameContentSize;
- {
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- zc->rep[i] = repStartValue[i];
- }
-
- if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
- zc->seqStore.litFreq = (U32 *)ptr;
- zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits);
- zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1);
- zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1);
- ptr = zc->seqStore.offCodeFreq + (MaxOff + 1);
- zc->seqStore.matchTable = (ZSTD_match_t *)ptr;
- ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1;
- zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr;
- ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1;
- zc->seqStore.litLengthSum = 0;
- }
- zc->seqStore.sequencesStart = (seqDef *)ptr;
- ptr = zc->seqStore.sequencesStart + maxNbSeq;
- zc->seqStore.llCode = (BYTE *)ptr;
- zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
- zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
- zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
-
- zc->stage = ZSTDcs_init;
- zc->dictID = 0;
- zc->loadedDictEnd = 0;
-
- return 0;
- }
-}
-
-/* ZSTD_invalidateRepCodes() :
- * ensures next compression will not use repcodes from previous block.
- * Note : only works with regular variant;
- * do not use with extDict variant ! */
-void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx)
-{
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- cctx->rep[i] = 0;
-}
-
-/*! ZSTD_copyCCtx() :
-* Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
-* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
-* @return : 0, or an error code */
-size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize)
-{
- if (srcCCtx->stage != ZSTDcs_init)
- return ERROR(stage_wrong);
-
- memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
- {
- ZSTD_parameters params = srcCCtx->params;
- params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
- ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
- }
-
- /* copy tables */
- {
- size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
- size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
- size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
- size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
- memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
- }
-
- /* copy dictionary offsets */
- dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
- dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3;
- dstCCtx->nextSrc = srcCCtx->nextSrc;
- dstCCtx->base = srcCCtx->base;
- dstCCtx->dictBase = srcCCtx->dictBase;
- dstCCtx->dictLimit = srcCCtx->dictLimit;
- dstCCtx->lowLimit = srcCCtx->lowLimit;
- dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd;
- dstCCtx->dictID = srcCCtx->dictID;
-
- /* copy entropy tables */
- dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
- dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
- if (srcCCtx->flagStaticTables) {
- memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
- memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
- memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
- }
- if (srcCCtx->flagStaticHufTable) {
- memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4);
- }
-
- return 0;
-}
-
-/*! ZSTD_reduceTable() :
-* reduce table indexes by `reducerValue` */
-static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue)
-{
- U32 u;
- for (u = 0; u < size; u++) {
- if (table[u] < reducerValue)
- table[u] = 0;
- else
- table[u] -= reducerValue;
- }
-}
-
-/*! ZSTD_reduceIndex() :
-* rescale all indexes to avoid future overflow (indexes are U32) */
-static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue)
-{
- {
- U32 const hSize = 1 << zc->params.cParams.hashLog;
- ZSTD_reduceTable(zc->hashTable, hSize, reducerValue);
- }
-
- {
- U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
- ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue);
- }
-
- {
- U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
- ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue);
- }
-}
-
-/*-*******************************************************
-* Block entropic compression
-*********************************************************/
-
-/* See doc/zstd_compression_format.md for detailed format description */
-
-size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- if (srcSize + ZSTD_blockHeaderSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
- memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize);
- ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
- return ZSTD_blockHeaderSize + srcSize;
-}
-
-static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- BYTE *const ostart = (BYTE * const)dst;
- U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
-
- if (srcSize + flSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
-
- switch (flSize) {
- case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break;
- case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break;
- default: /*note : should not be necessary : flSize is within {1,2,3} */
- case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break;
- }
-
- memcpy(ostart + flSize, src, srcSize);
- return srcSize + flSize;
-}
-
-static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- BYTE *const ostart = (BYTE * const)dst;
- U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
-
- (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
-
- switch (flSize) {
- case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break;
- case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break;
- default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
- case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break;
- }
-
- ostart[flSize] = *(const BYTE *)src;
- return flSize + 1;
-}
-
-static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
-
-static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t const minGain = ZSTD_minGain(srcSize);
- size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
- BYTE *const ostart = (BYTE *)dst;
- U32 singleStream = srcSize < 256;
- symbolEncodingType_e hType = set_compressed;
- size_t cLitSize;
-
-/* small ? don't even attempt compression (speed opt) */
-#define LITERAL_NOENTROPY 63
- {
- size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
- if (srcSize <= minLitSize)
- return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- }
-
- if (dstCapacity < lhSize + 1)
- return ERROR(dstSize_tooSmall); /* not enough space for compression */
- {
- HUF_repeat repeat = zc->flagStaticHufTable;
- int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
- if (repeat == HUF_repeat_valid && lhSize == 3)
- singleStream = 1;
- cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
- sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
- : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
- sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
- if (repeat != HUF_repeat_none) {
- hType = set_repeat;
- } /* reused the existing table */
- else {
- zc->flagStaticHufTable = HUF_repeat_check;
- } /* now have a table to reuse */
- }
-
- if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) {
- zc->flagStaticHufTable = HUF_repeat_none;
- return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- }
- if (cLitSize == 1) {
- zc->flagStaticHufTable = HUF_repeat_none;
- return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
- }
-
- /* Build header */
- switch (lhSize) {
- case 3: /* 2 - 2 - 10 - 10 */
- {
- U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14);
- ZSTD_writeLE24(ostart, lhc);
- break;
- }
- case 4: /* 2 - 2 - 14 - 14 */
- {
- U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18);
- ZSTD_writeLE32(ostart, lhc);
- break;
- }
- default: /* should not be necessary, lhSize is only {3,4,5} */
- case 5: /* 2 - 2 - 18 - 18 */
- {
- U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22);
- ZSTD_writeLE32(ostart, lhc);
- ostart[4] = (BYTE)(cLitSize >> 10);
- break;
- }
- }
- return lhSize + cLitSize;
-}
-
-static const BYTE LL_Code[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
- 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
- 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
-
-static const BYTE ML_Code[128] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
- 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
- 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
- 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
- 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
-
-void ZSTD_seqToCodes(const seqStore_t *seqStorePtr)
-{
- BYTE const LL_deltaCode = 19;
- BYTE const ML_deltaCode = 36;
- const seqDef *const sequences = seqStorePtr->sequencesStart;
- BYTE *const llCodeTable = seqStorePtr->llCode;
- BYTE *const ofCodeTable = seqStorePtr->ofCode;
- BYTE *const mlCodeTable = seqStorePtr->mlCode;
- U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- U32 u;
- for (u = 0; u < nbSeq; u++) {
- U32 const llv = sequences[u].litLength;
- U32 const mlv = sequences[u].matchLength;
- llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
- ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
- mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
- }
- if (seqStorePtr->longLengthID == 1)
- llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
- if (seqStorePtr->longLengthID == 2)
- mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
-}
-
-ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity)
-{
- const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
- const seqStore_t *seqStorePtr = &(zc->seqStore);
- FSE_CTable *CTable_LitLength = zc->litlengthCTable;
- FSE_CTable *CTable_OffsetBits = zc->offcodeCTable;
- FSE_CTable *CTable_MatchLength = zc->matchlengthCTable;
- U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
- const seqDef *const sequences = seqStorePtr->sequencesStart;
- const BYTE *const ofCodeTable = seqStorePtr->ofCode;
- const BYTE *const llCodeTable = seqStorePtr->llCode;
- const BYTE *const mlCodeTable = seqStorePtr->mlCode;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstCapacity;
- BYTE *op = ostart;
- size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
- BYTE *seqHead;
-
- U32 *count;
- S16 *norm;
- U32 *workspace;
- size_t workspaceSize = sizeof(zc->tmpCounters);
- {
- size_t spaceUsed32 = 0;
- count = (U32 *)zc->tmpCounters + spaceUsed32;
- spaceUsed32 += MaxSeq + 1;
- norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
-
- workspace = (U32 *)zc->tmpCounters + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
- }
-
- /* Compress literals */
- {
- const BYTE *const literals = seqStorePtr->litStart;
- size_t const litSize = seqStorePtr->lit - literals;
- size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
- if (ZSTD_isError(cSize))
- return cSize;
- op += cSize;
- }
-
- /* Sequences Header */
- if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */)
- return ERROR(dstSize_tooSmall);
- if (nbSeq < 0x7F)
- *op++ = (BYTE)nbSeq;
- else if (nbSeq < LONGNBSEQ)
- op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2;
- else
- op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3;
- if (nbSeq == 0)
- return op - ostart;
-
- /* seqHead : flags for FSE encoding type */
- seqHead = op++;
-
-#define MIN_SEQ_FOR_DYNAMIC_FSE 64
-#define MAX_SEQ_FOR_STATIC_FSE 1000
-
- /* convert length/distances into codes */
- ZSTD_seqToCodes(seqStorePtr);
-
- /* CTable for Literal Lengths */
- {
- U32 max = MaxLL;
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
- *op++ = llCodeTable[0];
- FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
- LLtype = set_rle;
- } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
- LLtype = set_repeat;
- } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) {
- FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize);
- LLtype = set_basic;
- } else {
- size_t nbSeq_1 = nbSeq;
- const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
- if (count[llCodeTable[nbSeq - 1]] > 1) {
- count[llCodeTable[nbSeq - 1]]--;
- nbSeq_1--;
- }
- FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
- {
- size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
- if (FSE_isError(NCountSize))
- return NCountSize;
- op += NCountSize;
- }
- FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize);
- LLtype = set_compressed;
- }
- }
-
- /* CTable for Offsets */
- {
- U32 max = MaxOff;
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
- *op++ = ofCodeTable[0];
- FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
- Offtype = set_rle;
- } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
- Offtype = set_repeat;
- } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) {
- FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize);
- Offtype = set_basic;
- } else {
- size_t nbSeq_1 = nbSeq;
- const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
- if (count[ofCodeTable[nbSeq - 1]] > 1) {
- count[ofCodeTable[nbSeq - 1]]--;
- nbSeq_1--;
- }
- FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
- {
- size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
- if (FSE_isError(NCountSize))
- return NCountSize;
- op += NCountSize;
- }
- FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize);
- Offtype = set_compressed;
- }
- }
-
- /* CTable for MatchLengths */
- {
- U32 max = MaxML;
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
- if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
- *op++ = *mlCodeTable;
- FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
- MLtype = set_rle;
- } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
- MLtype = set_repeat;
- } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) {
- FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize);
- MLtype = set_basic;
- } else {
- size_t nbSeq_1 = nbSeq;
- const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
- if (count[mlCodeTable[nbSeq - 1]] > 1) {
- count[mlCodeTable[nbSeq - 1]]--;
- nbSeq_1--;
- }
- FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
- {
- size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
- if (FSE_isError(NCountSize))
- return NCountSize;
- op += NCountSize;
- }
- FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize);
- MLtype = set_compressed;
- }
- }
-
- *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2));
- zc->flagStaticTables = 0;
-
- /* Encoding Sequences */
- {
- BIT_CStream_t blockStream;
- FSE_CState_t stateMatchLength;
- FSE_CState_t stateOffsetBits;
- FSE_CState_t stateLitLength;
-
- CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */
-
- /* first symbols */
- FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]);
- FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]);
- FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]);
- BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]);
- if (ZSTD_32bits())
- BIT_flushBits(&blockStream);
- BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]);
- if (ZSTD_32bits())
- BIT_flushBits(&blockStream);
- if (longOffsets) {
- U32 const ofBits = ofCodeTable[nbSeq - 1];
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
- if (extraBits) {
- BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits);
- BIT_flushBits(&blockStream);
- }
- BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits);
- } else {
- BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]);
- }
- BIT_flushBits(&blockStream);
-
- {
- size_t n;
- for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */
- BYTE const llCode = llCodeTable[n];
- BYTE const ofCode = ofCodeTable[n];
- BYTE const mlCode = mlCodeTable[n];
- U32 const llBits = LL_bits[llCode];
- U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
- U32 const mlBits = ML_bits[mlCode];
- /* (7)*/ /* (7)*/
- FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
- FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
- if (ZSTD_32bits())
- BIT_flushBits(&blockStream); /* (7)*/
- FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
- if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
- BIT_flushBits(&blockStream); /* (7)*/
- BIT_addBits(&blockStream, sequences[n].litLength, llBits);
- if (ZSTD_32bits() && ((llBits + mlBits) > 24))
- BIT_flushBits(&blockStream);
- BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
- if (ZSTD_32bits())
- BIT_flushBits(&blockStream); /* (7)*/
- if (longOffsets) {
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
- if (extraBits) {
- BIT_addBits(&blockStream, sequences[n].offset, extraBits);
- BIT_flushBits(&blockStream); /* (7)*/
- }
- BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */
- } else {
- BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
- }
- BIT_flushBits(&blockStream); /* (7)*/
- }
- }
-
- FSE_flushCState(&blockStream, &stateMatchLength);
- FSE_flushCState(&blockStream, &stateOffsetBits);
- FSE_flushCState(&blockStream, &stateLitLength);
-
- {
- size_t const streamSize = BIT_closeCStream(&blockStream);
- if (streamSize == 0)
- return ERROR(dstSize_tooSmall); /* not enough space */
- op += streamSize;
- }
- }
- return op - ostart;
-}
-
-ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize)
-{
- size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity);
- size_t const minGain = ZSTD_minGain(srcSize);
- size_t const maxCSize = srcSize - minGain;
- /* If the srcSize <= dstCapacity, then there is enough space to write a
- * raw uncompressed block. Since we ran out of space, the block must not
- * be compressible, so fall back to a raw uncompressed block.
- */
- int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
- int i;
-
- if (ZSTD_isError(cSize) && !uncompressibleError)
- return cSize;
- if (cSize >= maxCSize || uncompressibleError) {
- zc->flagStaticHufTable = HUF_repeat_none;
- return 0;
- }
- /* confirm repcodes */
- for (i = 0; i < ZSTD_REP_NUM; i++)
- zc->rep[i] = zc->repToConfirm[i];
- return cSize;
-}
-
-/*! ZSTD_storeSeq() :
- Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
- `offsetCode` : distance to match, or 0 == repCode.
- `matchCode` : matchLength - MINMATCH
-*/
-ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode)
-{
- /* copy Literals */
- ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
- seqStorePtr->lit += litLength;
-
- /* literal Length */
- if (litLength > 0xFFFF) {
- seqStorePtr->longLengthID = 1;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].litLength = (U16)litLength;
-
- /* match offset */
- seqStorePtr->sequences[0].offset = offsetCode + 1;
-
- /* match Length */
- if (matchCode > 0xFFFF) {
- seqStorePtr->longLengthID = 2;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].matchLength = (U16)matchCode;
-
- seqStorePtr->sequences++;
-}
-
-/*-*************************************
-* Match length counter
-***************************************/
-static unsigned ZSTD_NbCommonBytes(register size_t val)
-{
- if (ZSTD_isLittleEndian()) {
- if (ZSTD_64bits()) {
- return (__builtin_ctzll((U64)val) >> 3);
- } else { /* 32 bits */
- return (__builtin_ctz((U32)val) >> 3);
- }
- } else { /* Big Endian CPU */
- if (ZSTD_64bits()) {
- return (__builtin_clzll(val) >> 3);
- } else { /* 32 bits */
- return (__builtin_clz((U32)val) >> 3);
- }
- }
-}
-
-static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
-{
- const BYTE *const pStart = pIn;
- const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1);
-
- while (pIn < pInLoopLimit) {
- size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn);
- if (!diff) {
- pIn += sizeof(size_t);
- pMatch += sizeof(size_t);
- continue;
- }
- pIn += ZSTD_NbCommonBytes(diff);
- return (size_t)(pIn - pStart);
- }
- if (ZSTD_64bits())
- if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) {
- pIn += 4;
- pMatch += 4;
- }
- if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) {
- pIn += 2;
- pMatch += 2;
- }
- if ((pIn < pInLimit) && (*pMatch == *pIn))
- pIn++;
- return (size_t)(pIn - pStart);
-}
-
-/** ZSTD_count_2segments() :
-* can count match length with `ip` & `match` in 2 different segments.
-* convention : on reaching mEnd, match count continue starting from iStart
-*/
-static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
-{
- const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd);
- size_t const matchLength = ZSTD_count(ip, match, vEnd);
- if (match + matchLength != mEnd)
- return matchLength;
- return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd);
-}
-
-/*-*************************************
-* Hashes
-***************************************/
-static const U32 prime3bytes = 506832829U;
-static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); }
-ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */
-
-static const U32 prime4bytes = 2654435761U;
-static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); }
-static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); }
-
-static const U64 prime5bytes = 889523592379ULL;
-static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); }
-static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
-
-static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); }
-static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
-
-static const U64 prime7bytes = 58295818150454627ULL;
-static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); }
-static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
-
-static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
-static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); }
-static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
-
-static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
-{
- switch (mls) {
- // case 3: return ZSTD_hash3Ptr(p, hBits);
- default:
- case 4: return ZSTD_hash4Ptr(p, hBits);
- case 5: return ZSTD_hash5Ptr(p, hBits);
- case 6: return ZSTD_hash6Ptr(p, hBits);
- case 7: return ZSTD_hash7Ptr(p, hBits);
- case 8: return ZSTD_hash8Ptr(p, hBits);
- }
-}
-
-/*-*************************************
-* Fast Scan
-***************************************/
-static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls)
-{
- U32 *const hashTable = zc->hashTable;
- U32 const hBits = zc->params.cParams.hashLog;
- const BYTE *const base = zc->base;
- const BYTE *ip = base + zc->nextToUpdate;
- const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
- const size_t fastHashFillStep = 3;
-
- while (ip <= iend) {
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
- ip += fastHashFillStep;
- }
-}
-
-FORCE_INLINE
-void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
-{
- U32 *const hashTable = cctx->hashTable;
- U32 const hBits = cctx->params.cParams.hashLog;
- seqStore_t *seqStorePtr = &(cctx->seqStore);
- const BYTE *const base = cctx->base;
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const U32 lowestIndex = cctx->dictLimit;
- const BYTE *const lowest = base + lowestIndex;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - HASH_READ_SIZE;
- U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
- U32 offsetSaved = 0;
-
- /* init */
- ip += (ip == lowest);
- {
- U32 const maxRep = (U32)(ip - lowest);
- if (offset_2 > maxRep)
- offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep)
- offsetSaved = offset_1, offset_1 = 0;
- }
-
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
- size_t mLength;
- size_t const h = ZSTD_hashPtr(ip, hBits, mls);
- U32 const curr = (U32)(ip - base);
- U32 const matchIndex = hashTable[h];
- const BYTE *match = base + matchIndex;
- hashTable[h] = curr; /* update hash table */
-
- if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
- mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
- } else {
- U32 offset;
- if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
- ip += ((ip - anchor) >> g_searchStrength) + 1;
- continue;
- }
- mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
- offset = (U32)(ip - match);
- while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
- ip--;
- match--;
- mLength++;
- } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
-
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
- }
-
- /* match found */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
- hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
- /* check immediate repcode */
- while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
- {
- U32 const tmpOff = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOff;
- } /* swap offset_2 <=> offset_1 */
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- }
- }
-
- /* save reps for next block */
- cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
- cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- const U32 mls = ctx->params.cParams.searchLength;
- switch (mls) {
- default: /* includes case 3 */
- case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
- case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
- case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
- case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
-{
- U32 *hashTable = ctx->hashTable;
- const U32 hBits = ctx->params.cParams.hashLog;
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const base = ctx->base;
- const BYTE *const dictBase = ctx->dictBase;
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const U32 lowestIndex = ctx->lowLimit;
- const BYTE *const dictStart = dictBase + lowestIndex;
- const U32 dictLimit = ctx->dictLimit;
- const BYTE *const lowPrefixPtr = base + dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
-
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t h = ZSTD_hashPtr(ip, hBits, mls);
- const U32 matchIndex = hashTable[h];
- const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
- const BYTE *match = matchBase + matchIndex;
- const U32 curr = (U32)(ip - base);
- const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
- const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *repMatch = repBase + repIndex;
- size_t mLength;
- hashTable[h] = curr; /* update hash table */
-
- if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
- (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
- const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
- } else {
- if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
- ip += ((ip - anchor) >> g_searchStrength) + 1;
- continue;
- }
- {
- const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
- const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
- U32 offset;
- mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
- while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
- ip--;
- match--;
- mLength++;
- } /* catch up */
- offset = curr - matchIndex;
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
- }
- }
-
- /* found a match : store it */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
- hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const curr2 = (U32)(ip - base);
- U32 const repIndex2 = curr2 - offset_2;
- const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
- if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
- && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
- const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
- size_t repLength2 =
- ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
- U32 tmpOffset = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- }
- }
- }
-
- /* save reps for next block */
- ctx->repToConfirm[0] = offset_1;
- ctx->repToConfirm[1] = offset_2;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- U32 const mls = ctx->params.cParams.searchLength;
- switch (mls) {
- default: /* includes case 3 */
- case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
- case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
- case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
- case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-/*-*************************************
-* Double Fast
-***************************************/
-static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls)
-{
- U32 *const hashLarge = cctx->hashTable;
- U32 const hBitsL = cctx->params.cParams.hashLog;
- U32 *const hashSmall = cctx->chainTable;
- U32 const hBitsS = cctx->params.cParams.chainLog;
- const BYTE *const base = cctx->base;
- const BYTE *ip = base + cctx->nextToUpdate;
- const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
- const size_t fastHashFillStep = 3;
-
- while (ip <= iend) {
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
- hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
- ip += fastHashFillStep;
- }
-}
-
-FORCE_INLINE
-void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
-{
- U32 *const hashLong = cctx->hashTable;
- const U32 hBitsL = cctx->params.cParams.hashLog;
- U32 *const hashSmall = cctx->chainTable;
- const U32 hBitsS = cctx->params.cParams.chainLog;
- seqStore_t *seqStorePtr = &(cctx->seqStore);
- const BYTE *const base = cctx->base;
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const U32 lowestIndex = cctx->dictLimit;
- const BYTE *const lowest = base + lowestIndex;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - HASH_READ_SIZE;
- U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
- U32 offsetSaved = 0;
-
- /* init */
- ip += (ip == lowest);
- {
- U32 const maxRep = (U32)(ip - lowest);
- if (offset_2 > maxRep)
- offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep)
- offsetSaved = offset_1, offset_1 = 0;
- }
-
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
- size_t mLength;
- size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
- size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
- U32 const curr = (U32)(ip - base);
- U32 const matchIndexL = hashLong[h2];
- U32 const matchIndexS = hashSmall[h];
- const BYTE *matchLong = base + matchIndexL;
- const BYTE *match = base + matchIndexS;
- hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
-
- if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
- mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
- } else {
- U32 offset;
- if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
- mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8;
- offset = (U32)(ip - matchLong);
- while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) {
- ip--;
- matchLong--;
- mLength++;
- } /* catch up */
- } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
- size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
- U32 const matchIndex3 = hashLong[h3];
- const BYTE *match3 = base + matchIndex3;
- hashLong[h3] = curr + 1;
- if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
- mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8;
- ip++;
- offset = (U32)(ip - match3);
- while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) {
- ip--;
- match3--;
- mLength++;
- } /* catch up */
- } else {
- mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
- offset = (U32)(ip - match);
- while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
- ip--;
- match--;
- mLength++;
- } /* catch up */
- }
- } else {
- ip += ((ip - anchor) >> g_searchStrength) + 1;
- continue;
- }
-
- offset_2 = offset_1;
- offset_1 = offset;
-
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
- }
-
- /* match found */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
- curr + 2; /* here because curr+2 could be > iend-8 */
- hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
-
- /* check immediate repcode */
- while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
- {
- U32 const tmpOff = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOff;
- } /* swap offset_2 <=> offset_1 */
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- }
- }
-
- /* save reps for next block */
- cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
- cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- const U32 mls = ctx->params.cParams.searchLength;
- switch (mls) {
- default: /* includes case 3 */
- case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
- case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
- case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
- case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
-{
- U32 *const hashLong = ctx->hashTable;
- U32 const hBitsL = ctx->params.cParams.hashLog;
- U32 *const hashSmall = ctx->chainTable;
- U32 const hBitsS = ctx->params.cParams.chainLog;
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const base = ctx->base;
- const BYTE *const dictBase = ctx->dictBase;
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const U32 lowestIndex = ctx->lowLimit;
- const BYTE *const dictStart = dictBase + lowestIndex;
- const U32 dictLimit = ctx->dictLimit;
- const BYTE *const lowPrefixPtr = base + dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
-
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
- const U32 matchIndex = hashSmall[hSmall];
- const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
- const BYTE *match = matchBase + matchIndex;
-
- const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
- const U32 matchLongIndex = hashLong[hLong];
- const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
- const BYTE *matchLong = matchLongBase + matchLongIndex;
-
- const U32 curr = (U32)(ip - base);
- const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
- const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *repMatch = repBase + repIndex;
- size_t mLength;
- hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
-
- if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
- (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
- const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4;
- ip++;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
- } else {
- if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
- const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
- const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
- U32 offset;
- mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8;
- offset = curr - matchLongIndex;
- while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) {
- ip--;
- matchLong--;
- mLength++;
- } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
-
- } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
- size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
- U32 const matchIndex3 = hashLong[h3];
- const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base;
- const BYTE *match3 = match3Base + matchIndex3;
- U32 offset;
- hashLong[h3] = curr + 1;
- if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
- const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
- const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
- mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8;
- ip++;
- offset = curr + 1 - matchIndex3;
- while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) {
- ip--;
- match3--;
- mLength++;
- } /* catch up */
- } else {
- const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
- const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
- mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4;
- offset = curr - matchIndex;
- while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
- ip--;
- match--;
- mLength++;
- } /* catch up */
- }
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
-
- } else {
- ip += ((ip - anchor) >> g_searchStrength) + 1;
- continue;
- }
- }
-
- /* found a match : store it */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
- hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
- hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
- hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
- hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base);
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const curr2 = (U32)(ip - base);
- U32 const repIndex2 = curr2 - offset_2;
- const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
- if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
- && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
- const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
- size_t const repLength2 =
- ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
- U32 tmpOffset = offset_2;
- offset_2 = offset_1;
- offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- }
- }
- }
-
- /* save reps for next block */
- ctx->repToConfirm[0] = offset_1;
- ctx->repToConfirm[1] = offset_2;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- U32 const mls = ctx->params.cParams.searchLength;
- switch (mls) {
- default: /* includes case 3 */
- case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
- case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
- case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
- case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
- }
-}
-
-/*-*************************************
-* Binary Tree search
-***************************************/
-/** ZSTD_insertBt1() : add one or multiple positions to tree.
-* ip : assumed <= iend-8 .
-* @return : nb of positions added */
-static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict)
-{
- U32 *const hashTable = zc->hashTable;
- U32 const hashLog = zc->params.cParams.hashLog;
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
- U32 *const bt = zc->chainTable;
- U32 const btLog = zc->params.cParams.chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
- U32 matchIndex = hashTable[h];
- size_t commonLengthSmaller = 0, commonLengthLarger = 0;
- const BYTE *const base = zc->base;
- const BYTE *const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const BYTE *match;
- const U32 curr = (U32)(ip - base);
- const U32 btLow = btMask >= curr ? 0 : curr - btMask;
- U32 *smallerPtr = bt + 2 * (curr & btMask);
- U32 *largerPtr = smallerPtr + 1;
- U32 dummy32; /* to be nullified at the end */
- U32 const windowLow = zc->lowLimit;
- U32 matchEndIdx = curr + 8;
- size_t bestLength = 8;
-
- hashTable[h] = curr; /* Update Hash Table */
-
- while (nbCompares-- && (matchIndex > windowLow)) {
- U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
-
- if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
- match = base + matchIndex;
- if (match[matchLength] == ip[matchLength])
- matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
- if (matchIndex + matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- bestLength = matchLength;
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- }
-
- if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
- break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
-
- if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
- /* match is smaller than curr */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) {
- smallerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */
- } else {
- /* match is larger than curr */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) {
- largerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- }
- }
-
- *smallerPtr = *largerPtr = 0;
- if (bestLength > 384)
- return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
- if (matchEndIdx > curr + 8)
- return matchEndIdx - curr - 8;
- return 1;
-}
-
-static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls,
- U32 extDict)
-{
- U32 *const hashTable = zc->hashTable;
- U32 const hashLog = zc->params.cParams.hashLog;
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
- U32 *const bt = zc->chainTable;
- U32 const btLog = zc->params.cParams.chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
- U32 matchIndex = hashTable[h];
- size_t commonLengthSmaller = 0, commonLengthLarger = 0;
- const BYTE *const base = zc->base;
- const BYTE *const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const U32 curr = (U32)(ip - base);
- const U32 btLow = btMask >= curr ? 0 : curr - btMask;
- const U32 windowLow = zc->lowLimit;
- U32 *smallerPtr = bt + 2 * (curr & btMask);
- U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
- U32 matchEndIdx = curr + 8;
- U32 dummy32; /* to be nullified at the end */
- size_t bestLength = 0;
-
- hashTable[h] = curr; /* Update Hash Table */
-
- while (nbCompares-- && (matchIndex > windowLow)) {
- U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- const BYTE *match;
-
- if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
- match = base + matchIndex;
- if (match[matchLength] == ip[matchLength])
- matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
- if (matchIndex + matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
- if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
- break; /* drop, to guarantee consistency (miss a little bit of compression) */
- }
-
- if (match[matchLength] < ip[matchLength]) {
- /* match is smaller than curr */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) {
- smallerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */
- } else {
- /* match is larger than curr */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) {
- largerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- }
- }
-
- *smallerPtr = *largerPtr = 0;
-
- zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
- return bestLength;
-}
-
-static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
-{
- const BYTE *const base = zc->base;
- const U32 target = (U32)(ip - base);
- U32 idx = zc->nextToUpdate;
-
- while (idx < target)
- idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0);
-}
-
-/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls)
-{
- if (ip < zc->base + zc->nextToUpdate)
- return 0; /* skipped area */
- ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
-}
-
-static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch)
-{
- switch (matchLengthSearch) {
- default: /* includes case 3 */
- case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
- case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
- case 7:
- case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
- }
-}
-
-static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
-{
- const BYTE *const base = zc->base;
- const U32 target = (U32)(ip - base);
- U32 idx = zc->nextToUpdate;
-
- while (idx < target)
- idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1);
-}
-
-/** Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
- const U32 mls)
-{
- if (ip < zc->base + zc->nextToUpdate)
- return 0; /* skipped area */
- ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
-}
-
-static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
- const U32 matchLengthSearch)
-{
- switch (matchLengthSearch) {
- default: /* includes case 3 */
- case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
- case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
- case 7:
- case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
- }
-}
-
-/* *********************************
-* Hash Chain
-***********************************/
-#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
-
-/* Update chains up to ip (excluded)
- Assumption : always within prefix (i.e. not within extDict) */
-FORCE_INLINE
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls)
-{
- U32 *const hashTable = zc->hashTable;
- const U32 hashLog = zc->params.cParams.hashLog;
- U32 *const chainTable = zc->chainTable;
- const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
- const BYTE *const base = zc->base;
- const U32 target = (U32)(ip - base);
- U32 idx = zc->nextToUpdate;
-
- while (idx < target) { /* catch up */
- size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls);
- NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
- hashTable[h] = idx;
- idx++;
- }
-
- zc->nextToUpdate = target;
- return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
-}
-
-/* inlining is important to hardwire a hot branch (template emulation) */
-FORCE_INLINE
-size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls,
- const U32 extDict)
-{
- U32 *const chainTable = zc->chainTable;
- const U32 chainSize = (1 << zc->params.cParams.chainLog);
- const U32 chainMask = chainSize - 1;
- const BYTE *const base = zc->base;
- const BYTE *const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const U32 lowLimit = zc->lowLimit;
- const U32 curr = (U32)(ip - base);
- const U32 minChain = curr > chainSize ? curr - chainSize : 0;
- int nbAttempts = maxNbAttempts;
- size_t ml = EQUAL_READ32 - 1;
-
- /* HC4 match finder */
- U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls);
-
- for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) {
- const BYTE *match;
- size_t currMl = 0;
- if ((!extDict) || matchIndex >= dictLimit) {
- match = base + matchIndex;
- if (match[ml] == ip[ml]) /* potentially better */
- currMl = ZSTD_count(ip, match, iLimit);
- } else {
- match = dictBase + matchIndex;
- if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
- currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
- }
-
- /* save best solution */
- if (currMl > ml) {
- ml = currMl;
- *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
- if (ip + currMl == iLimit)
- break; /* best possible, and avoid read overflow*/
- }
-
- if (matchIndex <= minChain)
- break;
- matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
- }
-
- return ml;
-}
-
-FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
- const U32 matchLengthSearch)
-{
- switch (matchLengthSearch) {
- default: /* includes case 3 */
- case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
- case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
- case 7:
- case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
- }
-}
-
-FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
- const U32 matchLengthSearch)
-{
- switch (matchLengthSearch) {
- default: /* includes case 3 */
- case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
- case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
- case 7:
- case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
- }
-}
-
-/* *******************************
-* Common parser - lazy strategy
-*********************************/
-FORCE_INLINE
-void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
-{
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- const BYTE *const base = ctx->base + ctx->dictLimit;
-
- U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
- U32 const mls = ctx->params.cParams.searchLength;
-
- typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
- searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0;
-
- /* init */
- ip += (ip == base);
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- {
- U32 const maxRep = (U32)(ip - base);
- if (offset_2 > maxRep)
- savedOffset = offset_2, offset_2 = 0;
- if (offset_1 > maxRep)
- savedOffset = offset_1, offset_1 = 0;
- }
-
- /* Match Loop */
- while (ip < ilimit) {
- size_t matchLength = 0;
- size_t offset = 0;
- const BYTE *start = ip + 1;
-
- /* check repCode */
- if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
- /* repcode : we take it */
- matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
- if (depth == 0)
- goto _storeSequence;
- }
-
- /* first search (depth 0) */
- {
- size_t offsetFound = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
- if (ml2 > matchLength)
- matchLength = ml2, start = ip, offset = offsetFound;
- }
-
- if (matchLength < EQUAL_READ32) {
- ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
- continue;
- }
-
- /* let's try to find a better solution */
- if (depth >= 1)
- while (ip < ilimit) {
- ip++;
- if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
- size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
- int const gain2 = (int)(mlRep * 3);
- int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
- if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
- matchLength = mlRep, offset = 0, start = ip;
- }
- {
- size_t offset2 = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue; /* search a better one */
- }
- }
-
- /* let's find an even better one */
- if ((depth == 2) && (ip < ilimit)) {
- ip++;
- if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
- size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
- int const gain2 = (int)(ml2 * 4);
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
- matchLength = ml2, offset = 0, start = ip;
- }
- {
- size_t offset2 = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue;
- }
- }
- }
- break; /* nothing found : store previous solution */
- }
-
- /* NOTE:
- * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
- * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
- * overflows the pointer, which is undefined behavior.
- */
- /* catch up */
- if (offset) {
- while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) &&
- (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */
- {
- start--;
- matchLength++;
- }
- offset_2 = offset_1;
- offset_1 = (U32)(offset - ZSTD_REP_MOVE);
- }
-
- /* store sequence */
-_storeSequence:
- {
- size_t const litLength = start - anchor;
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
- anchor = ip = start + matchLength;
- }
-
- /* check immediate repcode */
- while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
- /* store sequence */
- matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
- offset = offset_2;
- offset_2 = offset_1;
- offset_1 = (U32)offset; /* swap repcodes */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
- ip += matchLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- }
-
- /* Save reps for next block */
- ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
- ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); }
-
-static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); }
-
-static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); }
-
-static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); }
-
-FORCE_INLINE
-void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
-{
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- const BYTE *const base = ctx->base;
- const U32 dictLimit = ctx->dictLimit;
- const U32 lowestIndex = ctx->lowLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const BYTE *const dictBase = ctx->dictBase;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const dictStart = dictBase + ctx->lowLimit;
-
- const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
- const U32 mls = ctx->params.cParams.searchLength;
-
- typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
- searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
-
- U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
-
- /* init */
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- ip += (ip == prefixStart);
-
- /* Match Loop */
- while (ip < ilimit) {
- size_t matchLength = 0;
- size_t offset = 0;
- const BYTE *start = ip + 1;
- U32 curr = (U32)(ip - base);
-
- /* check repCode */
- {
- const U32 repIndex = (U32)(curr + 1 - offset_1);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) {
- /* repcode detected we should take it */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- matchLength =
- ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
- if (depth == 0)
- goto _storeSequence;
- }
- }
-
- /* first search (depth 0) */
- {
- size_t offsetFound = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
- if (ml2 > matchLength)
- matchLength = ml2, start = ip, offset = offsetFound;
- }
-
- if (matchLength < EQUAL_READ32) {
- ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
- continue;
- }
-
- /* let's try to find a better solution */
- if (depth >= 1)
- while (ip < ilimit) {
- ip++;
- curr++;
- /* check repCode */
- if (offset) {
- const U32 repIndex = (U32)(curr - offset_1);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
- /* repcode detected */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- size_t const repLength =
- ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) +
- EQUAL_READ32;
- int const gain2 = (int)(repLength * 3);
- int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
- if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
- matchLength = repLength, offset = 0, start = ip;
- }
- }
-
- /* search match, depth 1 */
- {
- size_t offset2 = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue; /* search a better one */
- }
- }
-
- /* let's find an even better one */
- if ((depth == 2) && (ip < ilimit)) {
- ip++;
- curr++;
- /* check repCode */
- if (offset) {
- const U32 repIndex = (U32)(curr - offset_1);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
- /* repcode detected */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend,
- repEnd, prefixStart) +
- EQUAL_READ32;
- int gain2 = (int)(repLength * 4);
- int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
- if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
- matchLength = repLength, offset = 0, start = ip;
- }
- }
-
- /* search match, depth 2 */
- {
- size_t offset2 = 99999999;
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
- int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
- int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
- if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
- matchLength = ml2, offset = offset2, start = ip;
- continue;
- }
- }
- }
- break; /* nothing found : store previous solution */
- }
-
- /* catch up */
- if (offset) {
- U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE));
- const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
- const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
- while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) {
- start--;
- match--;
- matchLength++;
- } /* catch up */
- offset_2 = offset_1;
- offset_1 = (U32)(offset - ZSTD_REP_MOVE);
- }
-
- /* store sequence */
- _storeSequence : {
- size_t const litLength = start - anchor;
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
- anchor = ip = start + matchLength;
- }
-
- /* check immediate repcode */
- while (ip <= ilimit) {
- const U32 repIndex = (U32)((ip - base) - offset_2);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
- /* repcode detected we should take it */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- matchLength =
- ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
- offset = offset_2;
- offset_2 = offset_1;
- offset_1 = (U32)offset; /* swap offset history */
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
- ip += matchLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- break;
- }
- }
-
- /* Save reps for next block */
- ctx->repToConfirm[0] = offset_1;
- ctx->repToConfirm[1] = offset_2;
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); }
-
-static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
-}
-
-static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
-}
-
-static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
- ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
-}
-
-/* The optimal parser */
-#include "zstd_opt.h"
-
-static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
-#else
- (void)ctx;
- (void)src;
- (void)srcSize;
- return;
-#endif
-}
-
-static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
-#else
- (void)ctx;
- (void)src;
- (void)srcSize;
- return;
-#endif
-}
-
-static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
-#else
- (void)ctx;
- (void)src;
- (void)srcSize;
- return;
-#endif
-}
-
-static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
-{
-#ifdef ZSTD_OPT_H_91842398743
- ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
-#else
- (void)ctx;
- (void)src;
- (void)srcSize;
- return;
-#endif
-}
-
-typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize);
-
-static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
-{
- static const ZSTD_blockCompressor blockCompressor[2][8] = {
- {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2,
- ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2},
- {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,
- ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}};
-
- return blockCompressor[extDict][(U32)strat];
-}
-
-static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
- const BYTE *const base = zc->base;
- const BYTE *const istart = (const BYTE *)src;
- const U32 curr = (U32)(istart - base);
- if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1)
- return 0; /* don't even attempt compression below a certain srcSize */
- ZSTD_resetSeqStore(&(zc->seqStore));
- if (curr > zc->nextToUpdate + 384)
- zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
- blockCompressor(zc, src, srcSize);
- return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
-}
-
-/*! ZSTD_compress_generic() :
-* Compress a chunk of data into one or multiple blocks.
-* All blocks will be terminated, all input will be consumed.
-* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
-* Frame is supposed already started (header already produced)
-* @return : compressed size, or an error code
-*/
-static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk)
-{
- size_t blockSize = cctx->blockSize;
- size_t remaining = srcSize;
- const BYTE *ip = (const BYTE *)src;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *op = ostart;
- U32 const maxDist = 1 << cctx->params.cParams.windowLog;
-
- if (cctx->params.fParams.checksumFlag && srcSize)
- xxh64_update(&cctx->xxhState, src, srcSize);
-
- while (remaining) {
- U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
- size_t cSize;
-
- if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
- return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
- if (remaining < blockSize)
- blockSize = remaining;
-
- /* preemptive overflow correction */
- if (cctx->lowLimit > (3U << 29)) {
- U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
- U32 const curr = (U32)(ip - cctx->base);
- U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
- U32 const correction = curr - newCurr;
- ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
- ZSTD_reduceIndex(cctx, correction);
- cctx->base += correction;
- cctx->dictBase += correction;
- cctx->lowLimit -= correction;
- cctx->dictLimit -= correction;
- if (cctx->nextToUpdate < correction)
- cctx->nextToUpdate = 0;
- else
- cctx->nextToUpdate -= correction;
- }
-
- if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
- /* enforce maxDist */
- U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist;
- if (cctx->lowLimit < newLowLimit)
- cctx->lowLimit = newLowLimit;
- if (cctx->dictLimit < cctx->lowLimit)
- cctx->dictLimit = cctx->lowLimit;
- }
-
- cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize);
- if (ZSTD_isError(cSize))
- return cSize;
-
- if (cSize == 0) { /* block is not compressible */
- U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3);
- if (blockSize + ZSTD_blockHeaderSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
- ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */
- memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
- cSize = ZSTD_blockHeaderSize + blockSize;
- } else {
- U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3);
- ZSTD_writeLE24(op, cBlockHeader24);
- cSize += ZSTD_blockHeaderSize;
- }
-
- remaining -= blockSize;
- dstCapacity -= cSize;
- ip += blockSize;
- op += cSize;
- }
-
- if (lastFrameChunk && (op > ostart))
- cctx->stage = ZSTDcs_ending;
- return op - ostart;
-}
-
-static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
-{
- BYTE *const op = (BYTE *)dst;
- U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */
- U32 const checksumFlag = params.fParams.checksumFlag > 0;
- U32 const windowSize = 1U << params.cParams.windowLog;
- U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
- BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
- U32 const fcsCode =
- params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
- BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
- size_t pos;
-
- if (dstCapacity < ZSTD_frameHeaderSize_max)
- return ERROR(dstSize_tooSmall);
-
- ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
- op[4] = frameHeaderDecriptionByte;
- pos = 5;
- if (!singleSegment)
- op[pos++] = windowLogByte;
- switch (dictIDSizeCode) {
- default: /* impossible */
- case 0: break;
- case 1:
- op[pos] = (BYTE)(dictID);
- pos++;
- break;
- case 2:
- ZSTD_writeLE16(op + pos, (U16)dictID);
- pos += 2;
- break;
- case 3:
- ZSTD_writeLE32(op + pos, dictID);
- pos += 4;
- break;
- }
- switch (fcsCode) {
- default: /* impossible */
- case 0:
- if (singleSegment)
- op[pos++] = (BYTE)(pledgedSrcSize);
- break;
- case 1:
- ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256));
- pos += 2;
- break;
- case 2:
- ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize));
- pos += 4;
- break;
- case 3:
- ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize));
- pos += 8;
- break;
- }
- return pos;
-}
-
-static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk)
-{
- const BYTE *const ip = (const BYTE *)src;
- size_t fhSize = 0;
-
- if (cctx->stage == ZSTDcs_created)
- return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
-
- if (frame && (cctx->stage == ZSTDcs_init)) {
- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
- if (ZSTD_isError(fhSize))
- return fhSize;
- dstCapacity -= fhSize;
- dst = (char *)dst + fhSize;
- cctx->stage = ZSTDcs_ongoing;
- }
-
- /* Check if blocks follow each other */
- if (src != cctx->nextSrc) {
- /* not contiguous */
- ptrdiff_t const delta = cctx->nextSrc - ip;
- cctx->lowLimit = cctx->dictLimit;
- cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
- cctx->dictBase = cctx->base;
- cctx->base -= delta;
- cctx->nextToUpdate = cctx->dictLimit;
- if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE)
- cctx->lowLimit = cctx->dictLimit; /* too small extDict */
- }
-
- /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
- if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
- ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
- U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
- cctx->lowLimit = lowLimitMax;
- }
-
- cctx->nextSrc = ip + srcSize;
-
- if (srcSize) {
- size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk)
- : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize);
- if (ZSTD_isError(cSize))
- return cSize;
- return cSize + fhSize;
- } else
- return fhSize;
-}
-
-size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
-}
-
-size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); }
-
-size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
- if (srcSize > blockSizeMax)
- return ERROR(srcSize_wrong);
- return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
-}
-
-/*! ZSTD_loadDictionaryContent() :
- * @return : 0, or an error code
- */
-static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize)
-{
- const BYTE *const ip = (const BYTE *)src;
- const BYTE *const iend = ip + srcSize;
-
- /* input becomes curr prefix */
- zc->lowLimit = zc->dictLimit;
- zc->dictLimit = (U32)(zc->nextSrc - zc->base);
- zc->dictBase = zc->base;
- zc->base += ip - zc->nextSrc;
- zc->nextToUpdate = zc->dictLimit;
- zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
-
- zc->nextSrc = iend;
- if (srcSize <= HASH_READ_SIZE)
- return 0;
-
- switch (zc->params.cParams.strategy) {
- case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break;
-
- case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break;
-
- case ZSTD_greedy:
- case ZSTD_lazy:
- case ZSTD_lazy2:
- if (srcSize >= HASH_READ_SIZE)
- ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength);
- break;
-
- case ZSTD_btlazy2:
- case ZSTD_btopt:
- case ZSTD_btopt2:
- if (srcSize >= HASH_READ_SIZE)
- ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
- break;
-
- default:
- return ERROR(GENERIC); /* strategy doesn't exist; impossible */
- }
-
- zc->nextToUpdate = (U32)(iend - zc->base);
- return 0;
-}
-
-/* Dictionaries that assign zero probability to symbols that show up causes problems
- when FSE encoding. Refuse dictionaries that assign zero probability to symbols
- that we may encounter during compression.
- NOTE: This behavior is not standard and could be improved in the future. */
-static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
-{
- U32 s;
- if (dictMaxSymbolValue < maxSymbolValue)
- return ERROR(dictionary_corrupted);
- for (s = 0; s <= maxSymbolValue; ++s) {
- if (normalizedCounter[s] == 0)
- return ERROR(dictionary_corrupted);
- }
- return 0;
-}
-
-/* Dictionary format :
- * See :
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
- */
-/*! ZSTD_loadZstdDictionary() :
- * @return : 0, or an error code
- * assumptions : magic number supposed already checked
- * dictSize supposed > 8
- */
-static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
-{
- const BYTE *dictPtr = (const BYTE *)dict;
- const BYTE *const dictEnd = dictPtr + dictSize;
- short offcodeNCount[MaxOff + 1];
- unsigned offcodeMaxValue = MaxOff;
-
- dictPtr += 4; /* skip magic number */
- cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr);
- dictPtr += 4;
-
- {
- size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters));
- if (HUF_isError(hufHeaderSize))
- return ERROR(dictionary_corrupted);
- dictPtr += hufHeaderSize;
- }
-
- {
- unsigned offcodeLog;
- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(offcodeHeaderSize))
- return ERROR(dictionary_corrupted);
- if (offcodeLog > OffFSELog)
- return ERROR(dictionary_corrupted);
- /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
- CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
- dictionary_corrupted);
- dictPtr += offcodeHeaderSize;
- }
-
- {
- short matchlengthNCount[MaxML + 1];
- unsigned matchlengthMaxValue = MaxML, matchlengthLog;
- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(matchlengthHeaderSize))
- return ERROR(dictionary_corrupted);
- if (matchlengthLog > MLFSELog)
- return ERROR(dictionary_corrupted);
- /* Every match length code must have non-zero probability */
- CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
- CHECK_E(
- FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
- dictionary_corrupted);
- dictPtr += matchlengthHeaderSize;
- }
-
- {
- short litlengthNCount[MaxLL + 1];
- unsigned litlengthMaxValue = MaxLL, litlengthLog;
- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(litlengthHeaderSize))
- return ERROR(dictionary_corrupted);
- if (litlengthLog > LLFSELog)
- return ERROR(dictionary_corrupted);
- /* Every literal length code must have non-zero probability */
- CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
- CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
- dictionary_corrupted);
- dictPtr += litlengthHeaderSize;
- }
-
- if (dictPtr + 12 > dictEnd)
- return ERROR(dictionary_corrupted);
- cctx->rep[0] = ZSTD_readLE32(dictPtr + 0);
- cctx->rep[1] = ZSTD_readLE32(dictPtr + 4);
- cctx->rep[2] = ZSTD_readLE32(dictPtr + 8);
- dictPtr += 12;
-
- {
- size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
- U32 offcodeMax = MaxOff;
- if (dictContentSize <= ((U32)-1) - 128 KB) {
- U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
- offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
- }
- /* All offset values <= dictContentSize + 128 KB must be representable */
- CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
- /* All repCodes must be <= dictContentSize and != 0*/
- {
- U32 u;
- for (u = 0; u < 3; u++) {
- if (cctx->rep[u] == 0)
- return ERROR(dictionary_corrupted);
- if (cctx->rep[u] > dictContentSize)
- return ERROR(dictionary_corrupted);
- }
- }
-
- cctx->flagStaticTables = 1;
- cctx->flagStaticHufTable = HUF_repeat_valid;
- return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
- }
-}
-
-/** ZSTD_compress_insertDictionary() :
-* @return : 0, or an error code */
-static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
-{
- if ((dict == NULL) || (dictSize <= 8))
- return 0;
-
- /* dict as pure content */
- if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
- return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
-
- /* dict as zstd dictionary */
- return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
-}
-
-/*! ZSTD_compressBegin_internal() :
-* @return : 0, or an error code */
-static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize)
-{
- ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
- CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
- return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
-}
-
-/*! ZSTD_compressBegin_advanced() :
-* @return : 0, or an error code */
-size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
-{
- /* compression parameters verification and optimization */
- CHECK_F(ZSTD_checkCParams(params.cParams));
- return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
-}
-
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
-{
- ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
- return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
-}
-
-size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); }
-
-/*! ZSTD_writeEpilogue() :
-* Ends a frame.
-* @return : nb of bytes written into dst (or an error code) */
-static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity)
-{
- BYTE *const ostart = (BYTE *)dst;
- BYTE *op = ostart;
- size_t fhSize = 0;
-
- if (cctx->stage == ZSTDcs_created)
- return ERROR(stage_wrong); /* init missing */
-
- /* special case : empty frame */
- if (cctx->stage == ZSTDcs_init) {
- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
- if (ZSTD_isError(fhSize))
- return fhSize;
- dstCapacity -= fhSize;
- op += fhSize;
- cctx->stage = ZSTDcs_ongoing;
- }
-
- if (cctx->stage != ZSTDcs_ending) {
- /* write one last empty block, make it the "last" block */
- U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0;
- if (dstCapacity < 4)
- return ERROR(dstSize_tooSmall);
- ZSTD_writeLE32(op, cBlockHeader24);
- op += ZSTD_blockHeaderSize;
- dstCapacity -= ZSTD_blockHeaderSize;
- }
-
- if (cctx->params.fParams.checksumFlag) {
- U32 const checksum = (U32)xxh64_digest(&cctx->xxhState);
- if (dstCapacity < 4)
- return ERROR(dstSize_tooSmall);
- ZSTD_writeLE32(op, checksum);
- op += 4;
- }
-
- cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
- return op - ostart;
-}
-
-size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t endResult;
- size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
- if (ZSTD_isError(cSize))
- return cSize;
- endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize);
- if (ZSTD_isError(endResult))
- return endResult;
- return cSize + endResult;
-}
-
-static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
- ZSTD_parameters params)
-{
- CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
-}
-
-size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
- ZSTD_parameters params)
-{
- return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
-}
-
-size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params)
-{
- return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
-}
-
-/* ===== Dictionary API ===== */
-
-struct ZSTD_CDict_s {
- void *dictBuffer;
- const void *dictContent;
- size_t dictContentSize;
- ZSTD_CCtx *refContext;
-}; /* typedef'd tp ZSTD_CDict within "zstd.h" */
-
-size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); }
-
-static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem)
-{
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- {
- ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
- ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem);
-
- if (!cdict || !cctx) {
- ZSTD_free(cdict, customMem);
- ZSTD_freeCCtx(cctx);
- return NULL;
- }
-
- if ((byReference) || (!dictBuffer) || (!dictSize)) {
- cdict->dictBuffer = NULL;
- cdict->dictContent = dictBuffer;
- } else {
- void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
- if (!internalBuffer) {
- ZSTD_free(cctx, customMem);
- ZSTD_free(cdict, customMem);
- return NULL;
- }
- memcpy(internalBuffer, dictBuffer, dictSize);
- cdict->dictBuffer = internalBuffer;
- cdict->dictContent = internalBuffer;
- }
-
- {
- size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
- if (ZSTD_isError(errorCode)) {
- ZSTD_free(cdict->dictBuffer, customMem);
- ZSTD_free(cdict, customMem);
- ZSTD_freeCCtx(cctx);
- return NULL;
- }
- }
-
- cdict->refContext = cctx;
- cdict->dictContentSize = dictSize;
- return cdict;
- }
-}
-
-ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem);
-}
-
-size_t ZSTD_freeCDict(ZSTD_CDict *cdict)
-{
- if (cdict == NULL)
- return 0; /* support free on NULL */
- {
- ZSTD_customMem const cMem = cdict->refContext->customMem;
- ZSTD_freeCCtx(cdict->refContext);
- ZSTD_free(cdict->dictBuffer, cMem);
- ZSTD_free(cdict, cMem);
- return 0;
- }
-}
-
-static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); }
-
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize)
-{
- if (cdict->dictContentSize)
- CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
- else {
- ZSTD_parameters params = cdict->refContext->params;
- params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
- CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
- }
- return 0;
-}
-
-/*! ZSTD_compress_usingCDict() :
-* Compression using a digested Dictionary.
-* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
-* Note that compression level is decided during dictionary creation */
-size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
-{
- CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
-
- if (cdict->refContext->params.fParams.contentSizeFlag == 1) {
- cctx->params.fParams.contentSizeFlag = 1;
- cctx->frameContentSize = srcSize;
- } else {
- cctx->params.fParams.contentSizeFlag = 0;
- }
-
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
-}
-
-/* ******************************************************************
-* Streaming
-********************************************************************/
-
-typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
-
-struct ZSTD_CStream_s {
- ZSTD_CCtx *cctx;
- ZSTD_CDict *cdictLocal;
- const ZSTD_CDict *cdict;
- char *inBuff;
- size_t inBuffSize;
- size_t inToCompress;
- size_t inBuffPos;
- size_t inBuffTarget;
- size_t blockSize;
- char *outBuff;
- size_t outBuffSize;
- size_t outBuffContentSize;
- size_t outBuffFlushedSize;
- ZSTD_cStreamStage stage;
- U32 checksum;
- U32 frameEnded;
- U64 pledgedSrcSize;
- U64 inputProcessed;
- ZSTD_parameters params;
- ZSTD_customMem customMem;
-}; /* typedef'd to ZSTD_CStream within "zstd.h" */
-
-size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)
-{
- size_t const inBuffSize = (size_t)1 << cParams.windowLog;
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize);
- size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
-
- return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
-}
-
-ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem)
-{
- ZSTD_CStream *zcs;
-
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
- if (zcs == NULL)
- return NULL;
- memset(zcs, 0, sizeof(ZSTD_CStream));
- memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
- zcs->cctx = ZSTD_createCCtx_advanced(customMem);
- if (zcs->cctx == NULL) {
- ZSTD_freeCStream(zcs);
- return NULL;
- }
- return zcs;
-}
-
-size_t ZSTD_freeCStream(ZSTD_CStream *zcs)
-{
- if (zcs == NULL)
- return 0; /* support free on NULL */
- {
- ZSTD_customMem const cMem = zcs->customMem;
- ZSTD_freeCCtx(zcs->cctx);
- zcs->cctx = NULL;
- ZSTD_freeCDict(zcs->cdictLocal);
- zcs->cdictLocal = NULL;
- ZSTD_free(zcs->inBuff, cMem);
- zcs->inBuff = NULL;
- ZSTD_free(zcs->outBuff, cMem);
- zcs->outBuff = NULL;
- ZSTD_free(zcs, cMem);
- return 0;
- }
-}
-
-/*====== Initialization ======*/
-
-size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
-size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; }
-
-static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
-{
- if (zcs->inBuffSize == 0)
- return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */
-
- if (zcs->cdict)
- CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
- else
- CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
-
- zcs->inToCompress = 0;
- zcs->inBuffPos = 0;
- zcs->inBuffTarget = zcs->blockSize;
- zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
- zcs->stage = zcss_load;
- zcs->frameEnded = 0;
- zcs->pledgedSrcSize = pledgedSrcSize;
- zcs->inputProcessed = 0;
- return 0; /* ready to go */
-}
-
-size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
-{
-
- zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
-
- return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
-}
-
-static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
-{
- /* allocate buffers */
- {
- size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
- if (zcs->inBuffSize < neededInBuffSize) {
- zcs->inBuffSize = neededInBuffSize;
- ZSTD_free(zcs->inBuff, zcs->customMem);
- zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem);
- if (zcs->inBuff == NULL)
- return ERROR(memory_allocation);
- }
- zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
- }
- if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) {
- zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1;
- ZSTD_free(zcs->outBuff, zcs->customMem);
- zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
- if (zcs->outBuff == NULL)
- return ERROR(memory_allocation);
- }
-
- if (dict && dictSize >= 8) {
- ZSTD_freeCDict(zcs->cdictLocal);
- zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
- if (zcs->cdictLocal == NULL)
- return ERROR(memory_allocation);
- zcs->cdict = zcs->cdictLocal;
- } else
- zcs->cdict = NULL;
-
- zcs->checksum = params.fParams.checksumFlag > 0;
- zcs->params = params;
-
- return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
-}
-
-ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem);
- if (zcs) {
- size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
- if (ZSTD_isError(code)) {
- return NULL;
- }
- }
- return zcs;
-}
-
-ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
-{
- ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
- ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize);
- if (zcs) {
- zcs->cdict = cdict;
- if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) {
- return NULL;
- }
- }
- return zcs;
-}
-
-/*====== Compression ======*/
-
-typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
-
-ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t const length = MIN(dstCapacity, srcSize);
- memcpy(dst, src, length);
- return length;
-}
-
-static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush)
-{
- U32 someMoreWork = 1;
- const char *const istart = (const char *)src;
- const char *const iend = istart + *srcSizePtr;
- const char *ip = istart;
- char *const ostart = (char *)dst;
- char *const oend = ostart + *dstCapacityPtr;
- char *op = ostart;
-
- while (someMoreWork) {
- switch (zcs->stage) {
- case zcss_init:
- return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */
-
- case zcss_load:
- /* complete inBuffer */
- {
- size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
- size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip);
- zcs->inBuffPos += loaded;
- ip += loaded;
- if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) {
- someMoreWork = 0;
- break; /* not enough input to get a full block : stop there, wait for more */
- }
- }
- /* compress curr block (note : this stage cannot be stopped in the middle) */
- {
- void *cDst;
- size_t cSize;
- size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
- size_t oSize = oend - op;
- if (oSize >= ZSTD_compressBound(iSize))
- cDst = op; /* compress directly into output buffer (avoid flush stage) */
- else
- cDst = zcs->outBuff, oSize = zcs->outBuffSize;
- cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize)
- : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
- if (ZSTD_isError(cSize))
- return cSize;
- if (flush == zsf_end)
- zcs->frameEnded = 1;
- /* prepare next block */
- zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
- if (zcs->inBuffTarget > zcs->inBuffSize)
- zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */
- zcs->inToCompress = zcs->inBuffPos;
- if (cDst == op) {
- op += cSize;
- break;
- } /* no need to flush */
- zcs->outBuffContentSize = cSize;
- zcs->outBuffFlushedSize = 0;
- zcs->stage = zcss_flush; /* pass-through to flush stage */
- }
- fallthrough;
-
- case zcss_flush: {
- size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
- size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
- op += flushed;
- zcs->outBuffFlushedSize += flushed;
- if (toFlush != flushed) {
- someMoreWork = 0;
- break;
- } /* dst too small to store flushed data : stop there */
- zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
- zcs->stage = zcss_load;
- break;
- }
-
- case zcss_final:
- someMoreWork = 0; /* do nothing */
- break;
-
- default:
- return ERROR(GENERIC); /* impossible */
- }
- }
-
- *srcSizePtr = ip - istart;
- *dstCapacityPtr = op - ostart;
- zcs->inputProcessed += *srcSizePtr;
- if (zcs->frameEnded)
- return 0;
- {
- size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
- if (hintInSize == 0)
- hintInSize = zcs->blockSize;
- return hintInSize;
- }
-}
-
-size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
-{
- size_t sizeRead = input->size - input->pos;
- size_t sizeWritten = output->size - output->pos;
- size_t const result =
- ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather);
- input->pos += sizeRead;
- output->pos += sizeWritten;
- return result;
-}
-
-/*====== Finalize ======*/
-
-/*! ZSTD_flushStream() :
-* @return : amount of data remaining to flush */
-size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
-{
- size_t srcSize = 0;
- size_t sizeWritten = output->size - output->pos;
- size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize,
- &srcSize, /* use a valid src address instead of NULL */
- zsf_flush);
- output->pos += sizeWritten;
- if (ZSTD_isError(result))
- return result;
- return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
-}
-
-size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
-{
- BYTE *const ostart = (BYTE *)(output->dst) + output->pos;
- BYTE *const oend = (BYTE *)(output->dst) + output->size;
- BYTE *op = ostart;
-
- if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
- return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
-
- if (zcs->stage != zcss_final) {
- /* flush whatever remains */
- size_t srcSize = 0;
- size_t sizeWritten = output->size - output->pos;
- size_t const notEnded =
- ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */
- size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
- op += sizeWritten;
- if (remainingToFlush) {
- output->pos += sizeWritten;
- return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
- }
- /* create epilogue */
- zcs->stage = zcss_final;
- zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL,
- 0); /* write epilogue, including final empty block, into outBuff */
- }
-
- /* flush epilogue */
- {
- size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
- size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
- op += flushed;
- zcs->outBuffFlushedSize += flushed;
- output->pos += op - ostart;
- if (toFlush == flushed)
- zcs->stage = zcss_init; /* end reached */
- return toFlush - flushed;
- }
-}
-
-/*-===== Pre-defined compression levels =====-*/
-
-#define ZSTD_DEFAULT_CLEVEL 1
-#define ZSTD_MAX_CLEVEL 22
-int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
-
-static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = {
- {
- /* "default" */
- /* W, C, H, S, L, TL, strat */
- {18, 12, 12, 1, 7, 16, ZSTD_fast}, /* level 0 - never used */
- {19, 13, 14, 1, 7, 16, ZSTD_fast}, /* level 1 */
- {19, 15, 16, 1, 6, 16, ZSTD_fast}, /* level 2 */
- {20, 16, 17, 1, 5, 16, ZSTD_dfast}, /* level 3.*/
- {20, 18, 18, 1, 5, 16, ZSTD_dfast}, /* level 4.*/
- {20, 15, 18, 3, 5, 16, ZSTD_greedy}, /* level 5 */
- {21, 16, 19, 2, 5, 16, ZSTD_lazy}, /* level 6 */
- {21, 17, 20, 3, 5, 16, ZSTD_lazy}, /* level 7 */
- {21, 18, 20, 3, 5, 16, ZSTD_lazy2}, /* level 8 */
- {21, 20, 20, 3, 5, 16, ZSTD_lazy2}, /* level 9 */
- {21, 19, 21, 4, 5, 16, ZSTD_lazy2}, /* level 10 */
- {22, 20, 22, 4, 5, 16, ZSTD_lazy2}, /* level 11 */
- {22, 20, 22, 5, 5, 16, ZSTD_lazy2}, /* level 12 */
- {22, 21, 22, 5, 5, 16, ZSTD_lazy2}, /* level 13 */
- {22, 21, 22, 6, 5, 16, ZSTD_lazy2}, /* level 14 */
- {22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */
- {23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */
- {23, 21, 22, 4, 5, 24, ZSTD_btopt}, /* level 17 */
- {23, 23, 22, 6, 5, 32, ZSTD_btopt}, /* level 18 */
- {23, 23, 22, 6, 3, 48, ZSTD_btopt}, /* level 19 */
- {25, 25, 23, 7, 3, 64, ZSTD_btopt2}, /* level 20 */
- {26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */
- {27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */
- },
- {
- /* for srcSize <= 256 KB */
- /* W, C, H, S, L, T, strat */
- {0, 0, 0, 0, 0, 0, ZSTD_fast}, /* level 0 - not used */
- {18, 13, 14, 1, 6, 8, ZSTD_fast}, /* level 1 */
- {18, 14, 13, 1, 5, 8, ZSTD_dfast}, /* level 2 */
- {18, 16, 15, 1, 5, 8, ZSTD_dfast}, /* level 3 */
- {18, 15, 17, 1, 5, 8, ZSTD_greedy}, /* level 4.*/
- {18, 16, 17, 4, 5, 8, ZSTD_greedy}, /* level 5.*/
- {18, 16, 17, 3, 5, 8, ZSTD_lazy}, /* level 6.*/
- {18, 17, 17, 4, 4, 8, ZSTD_lazy}, /* level 7 */
- {18, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */
- {18, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */
- {18, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */
- {18, 18, 17, 6, 4, 8, ZSTD_lazy2}, /* level 11.*/
- {18, 18, 17, 7, 4, 8, ZSTD_lazy2}, /* level 12.*/
- {18, 19, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13 */
- {18, 18, 18, 4, 4, 16, ZSTD_btopt}, /* level 14.*/
- {18, 18, 18, 4, 3, 16, ZSTD_btopt}, /* level 15.*/
- {18, 19, 18, 6, 3, 32, ZSTD_btopt}, /* level 16.*/
- {18, 19, 18, 8, 3, 64, ZSTD_btopt}, /* level 17.*/
- {18, 19, 18, 9, 3, 128, ZSTD_btopt}, /* level 18.*/
- {18, 19, 18, 10, 3, 256, ZSTD_btopt}, /* level 19.*/
- {18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/
- {18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/
- {18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/
- },
- {
- /* for srcSize <= 128 KB */
- /* W, C, H, S, L, T, strat */
- {17, 12, 12, 1, 7, 8, ZSTD_fast}, /* level 0 - not used */
- {17, 12, 13, 1, 6, 8, ZSTD_fast}, /* level 1 */
- {17, 13, 16, 1, 5, 8, ZSTD_fast}, /* level 2 */
- {17, 16, 16, 2, 5, 8, ZSTD_dfast}, /* level 3 */
- {17, 13, 15, 3, 4, 8, ZSTD_greedy}, /* level 4 */
- {17, 15, 17, 4, 4, 8, ZSTD_greedy}, /* level 5 */
- {17, 16, 17, 3, 4, 8, ZSTD_lazy}, /* level 6 */
- {17, 15, 17, 4, 4, 8, ZSTD_lazy2}, /* level 7 */
- {17, 17, 17, 4, 4, 8, ZSTD_lazy2}, /* level 8 */
- {17, 17, 17, 5, 4, 8, ZSTD_lazy2}, /* level 9 */
- {17, 17, 17, 6, 4, 8, ZSTD_lazy2}, /* level 10 */
- {17, 17, 17, 7, 4, 8, ZSTD_lazy2}, /* level 11 */
- {17, 17, 17, 8, 4, 8, ZSTD_lazy2}, /* level 12 */
- {17, 18, 17, 6, 4, 8, ZSTD_btlazy2}, /* level 13.*/
- {17, 17, 17, 7, 3, 8, ZSTD_btopt}, /* level 14.*/
- {17, 17, 17, 7, 3, 16, ZSTD_btopt}, /* level 15.*/
- {17, 18, 17, 7, 3, 32, ZSTD_btopt}, /* level 16.*/
- {17, 18, 17, 7, 3, 64, ZSTD_btopt}, /* level 17.*/
- {17, 18, 17, 7, 3, 256, ZSTD_btopt}, /* level 18.*/
- {17, 18, 17, 8, 3, 256, ZSTD_btopt}, /* level 19.*/
- {17, 18, 17, 9, 3, 256, ZSTD_btopt2}, /* level 20.*/
- {17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/
- {17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/
- },
- {
- /* for srcSize <= 16 KB */
- /* W, C, H, S, L, T, strat */
- {14, 12, 12, 1, 7, 6, ZSTD_fast}, /* level 0 - not used */
- {14, 14, 14, 1, 6, 6, ZSTD_fast}, /* level 1 */
- {14, 14, 14, 1, 4, 6, ZSTD_fast}, /* level 2 */
- {14, 14, 14, 1, 4, 6, ZSTD_dfast}, /* level 3.*/
- {14, 14, 14, 4, 4, 6, ZSTD_greedy}, /* level 4.*/
- {14, 14, 14, 3, 4, 6, ZSTD_lazy}, /* level 5.*/
- {14, 14, 14, 4, 4, 6, ZSTD_lazy2}, /* level 6 */
- {14, 14, 14, 5, 4, 6, ZSTD_lazy2}, /* level 7 */
- {14, 14, 14, 6, 4, 6, ZSTD_lazy2}, /* level 8.*/
- {14, 15, 14, 6, 4, 6, ZSTD_btlazy2}, /* level 9.*/
- {14, 15, 14, 3, 3, 6, ZSTD_btopt}, /* level 10.*/
- {14, 15, 14, 6, 3, 8, ZSTD_btopt}, /* level 11.*/
- {14, 15, 14, 6, 3, 16, ZSTD_btopt}, /* level 12.*/
- {14, 15, 14, 6, 3, 24, ZSTD_btopt}, /* level 13.*/
- {14, 15, 15, 6, 3, 48, ZSTD_btopt}, /* level 14.*/
- {14, 15, 15, 6, 3, 64, ZSTD_btopt}, /* level 15.*/
- {14, 15, 15, 6, 3, 96, ZSTD_btopt}, /* level 16.*/
- {14, 15, 15, 6, 3, 128, ZSTD_btopt}, /* level 17.*/
- {14, 15, 15, 6, 3, 256, ZSTD_btopt}, /* level 18.*/
- {14, 15, 15, 7, 3, 256, ZSTD_btopt}, /* level 19.*/
- {14, 15, 15, 8, 3, 256, ZSTD_btopt2}, /* level 20.*/
- {14, 15, 15, 9, 3, 256, ZSTD_btopt2}, /* level 21.*/
- {14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/
- },
-};
-
-/*! ZSTD_getCParams() :
-* @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
-* Size values are optional, provide 0 if not known or unused */
-ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
-{
- ZSTD_compressionParameters cp;
- size_t const addedSize = srcSize ? 0 : 500;
- U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1;
- U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
- if (compressionLevel <= 0)
- compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */
- if (compressionLevel > ZSTD_MAX_CLEVEL)
- compressionLevel = ZSTD_MAX_CLEVEL;
- cp = ZSTD_defaultCParameters[tableID][compressionLevel];
- if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */
- if (cp.windowLog > ZSTD_WINDOWLOG_MAX)
- cp.windowLog = ZSTD_WINDOWLOG_MAX;
- if (cp.chainLog > ZSTD_CHAINLOG_MAX)
- cp.chainLog = ZSTD_CHAINLOG_MAX;
- if (cp.hashLog > ZSTD_HASHLOG_MAX)
- cp.hashLog = ZSTD_HASHLOG_MAX;
- }
- cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
- return cp;
-}
-
-/*! ZSTD_getParams() :
-* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
-* All fields of `ZSTD_frameParameters` are set to default (0) */
-ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
-{
- ZSTD_parameters params;
- ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
- memset(&params, 0, sizeof(params));
- params.cParams = cParams;
- return params;
-}
-
-EXPORT_SYMBOL(ZSTD_maxCLevel);
-EXPORT_SYMBOL(ZSTD_compressBound);
-
-EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initCCtx);
-EXPORT_SYMBOL(ZSTD_compressCCtx);
-EXPORT_SYMBOL(ZSTD_compress_usingDict);
-
-EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initCDict);
-EXPORT_SYMBOL(ZSTD_compress_usingCDict);
-
-EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initCStream);
-EXPORT_SYMBOL(ZSTD_initCStream_usingCDict);
-EXPORT_SYMBOL(ZSTD_resetCStream);
-EXPORT_SYMBOL(ZSTD_compressStream);
-EXPORT_SYMBOL(ZSTD_flushStream);
-EXPORT_SYMBOL(ZSTD_endStream);
-EXPORT_SYMBOL(ZSTD_CStreamInSize);
-EXPORT_SYMBOL(ZSTD_CStreamOutSize);
-
-EXPORT_SYMBOL(ZSTD_getCParams);
-EXPORT_SYMBOL(ZSTD_getParams);
-EXPORT_SYMBOL(ZSTD_checkCParams);
-EXPORT_SYMBOL(ZSTD_adjustCParams);
-
-EXPORT_SYMBOL(ZSTD_compressBegin);
-EXPORT_SYMBOL(ZSTD_compressBegin_usingDict);
-EXPORT_SYMBOL(ZSTD_compressBegin_advanced);
-EXPORT_SYMBOL(ZSTD_copyCCtx);
-EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict);
-EXPORT_SYMBOL(ZSTD_compressContinue);
-EXPORT_SYMBOL(ZSTD_compressEnd);
-
-EXPORT_SYMBOL(ZSTD_getBlockSizeMax);
-EXPORT_SYMBOL(ZSTD_compressBlock);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Zstd Compressor");
diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
new file mode 100644
index 00000000000000..436985b620e51a
--- /dev/null
+++ b/lib/zstd/compress/fse_compress.c
@@ -0,0 +1,625 @@
+/* ******************************************************************
+ * FSE : Finite State Entropy encoder
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include "../common/compiler.h"
+#include "../common/mem.h" /* U32, U16, etc. */
+#include "../common/debug.h" /* assert, DEBUGLOG */
+#include "hist.h" /* HIST_count_wksp */
+#include "../common/bitstream.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#include "../common/error_private.h"
+#define ZSTD_DEPS_NEED_MALLOC
+#define ZSTD_DEPS_NEED_MATH64
+#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
+ * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize)
+{
+ U32 const tableSize = 1 << tableLog;
+ U32 const tableMask = tableSize - 1;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ U32 const step = FSE_TABLESTEP(tableSize);
+
+ U32* cumul = (U32*)workSpace;
+ FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
+
+ U32 highThreshold = tableSize-1;
+
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
+ if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
+ /* CTable header */
+ tableU16[-2] = (U16) tableLog;
+ tableU16[-1] = (U16) maxSymbolValue;
+ assert(tableLog < 16); /* required for threshold strategy to work */
+
+ /* For explanations on how to distribute symbol values over the table :
+ * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+ #ifdef __clang_analyzer__
+ ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
+ #endif
+
+ /* symbol start positions */
+ { U32 u;
+ cumul[0] = 0;
+ for (u=1; u <= maxSymbolValue+1; u++) {
+ if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
+ cumul[u] = cumul[u-1] + 1;
+ tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
+ } else {
+ cumul[u] = cumul[u-1] + normalizedCounter[u-1];
+ } }
+ cumul[maxSymbolValue+1] = tableSize+1;
+ }
+
+ /* Spread symbols */
+ { U32 position = 0;
+ U32 symbol;
+ for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+ int nbOccurrences;
+ int const freq = normalizedCounter[symbol];
+ for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
+ tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
+ position = (position + step) & tableMask;
+ while (position > highThreshold)
+ position = (position + step) & tableMask; /* Low proba area */
+ } }
+
+ assert(position==0); /* Must have initialized all positions */
+ }
+
+ /* Build table */
+ { U32 u; for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
+ tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
+ } }
+
+ /* Build Symbol Transformation Table */
+ { unsigned total = 0;
+ unsigned s;
+ for (s=0; s<=maxSymbolValue; s++) {
+ switch (normalizedCounter[s])
+ {
+ case 0:
+ /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
+ symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
+ break;
+
+ case -1:
+ case 1:
+ symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
+ symbolTT[s].deltaFindState = total - 1;
+ total ++;
+ break;
+ default :
+ {
+ U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
+ U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
+ symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
+ symbolTT[s].deltaFindState = total - normalizedCounter[s];
+ total += normalizedCounter[s];
+ } } } }
+
+#if 0 /* debug : symbol costs */
+ DEBUGLOG(5, "\n --- table statistics : ");
+ { U32 symbol;
+ for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+ DEBUGLOG(5, "%3u: w=%3i, maxBits=%u, fracBits=%.2f",
+ symbol, normalizedCounter[symbol],
+ FSE_getMaxNbBits(symbolTT, symbol),
+ (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
+ }
+ }
+#endif
+
+ return 0;
+}
+
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+
+/*-**************************************************************
+* FSE NCount encoding
+****************************************************************/
+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
+{
+ size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
+ return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
+}
+
+static size_t
+FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ unsigned writeIsSafe)
+{
+ BYTE* const ostart = (BYTE*) header;
+ BYTE* out = ostart;
+ BYTE* const oend = ostart + headerBufferSize;
+ int nbBits;
+ const int tableSize = 1 << tableLog;
+ int remaining;
+ int threshold;
+ U32 bitStream = 0;
+ int bitCount = 0;
+ unsigned symbol = 0;
+ unsigned const alphabetSize = maxSymbolValue + 1;
+ int previousIs0 = 0;
+
+ /* Table Size */
+ bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
+ bitCount += 4;
+
+ /* Init */
+ remaining = tableSize+1; /* +1 for extra accuracy */
+ threshold = tableSize;
+ nbBits = tableLog+1;
+
+ while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
+ if (previousIs0) {
+ unsigned start = symbol;
+ while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
+ if (symbol == alphabetSize) break; /* incorrect distribution */
+ while (symbol >= start+24) {
+ start+=24;
+ bitStream += 0xFFFFU << bitCount;
+ if ((!writeIsSafe) && (out > oend-2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE) bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+=2;
+ bitStream>>=16;
+ }
+ while (symbol >= start+3) {
+ start+=3;
+ bitStream += 3 << bitCount;
+ bitCount += 2;
+ }
+ bitStream += (symbol-start) << bitCount;
+ bitCount += 2;
+ if (bitCount>16) {
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
+ { int count = normalizedCounter[symbol++];
+ int const max = (2*threshold-1) - remaining;
+ remaining -= count < 0 ? -count : count;
+ count++; /* +1 for extra accuracy */
+ if (count>=threshold)
+ count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+ bitStream += count << bitCount;
+ bitCount += nbBits;
+ bitCount -= (count<max);
+ previousIs0 = (count==1);
+ if (remaining<1) return ERROR(GENERIC);
+ while (remaining<threshold) { nbBits--; threshold>>=1; }
+ }
+ if (bitCount>16) {
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
+
+ if (remaining != 1)
+ return ERROR(GENERIC); /* incorrect normalized distribution */
+ assert(symbol <= alphabetSize);
+
+ /* flush remaining bitStream */
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+= (bitCount+7) /8;
+
+ return (out-ostart);
+}
+
+
+size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
+
+ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
+
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
+}
+
+
+/*-**************************************************************
+* FSE Compression Code
+****************************************************************/
+
+FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
+{
+ size_t size;
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
+ return (FSE_CTable*)ZSTD_malloc(size);
+}
+
+void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
+
+/* provides the minimum logSize to safely represent a distribution */
+static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
+{
+ U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
+ U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+ U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ return minBits;
+}
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
+{
+ U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+ U32 tableLog = maxTableLog;
+ U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
+ if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
+ if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
+ if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
+ return tableLog;
+}
+
+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
+}
+
+/* Secondary normalization method.
+ To be used when primary method fails. */
+
+static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
+{
+ short const NOT_YET_ASSIGNED = -2;
+ U32 s;
+ U32 distributed = 0;
+ U32 ToDistribute;
+
+ /* Init */
+ U32 const lowThreshold = (U32)(total >> tableLog);
+ U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == 0) {
+ norm[s]=0;
+ continue;
+ }
+ if (count[s] <= lowThreshold) {
+ norm[s] = lowProbCount;
+ distributed++;
+ total -= count[s];
+ continue;
+ }
+ if (count[s] <= lowOne) {
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ }
+
+ norm[s]=NOT_YET_ASSIGNED;
+ }
+ ToDistribute = (1 << tableLog) - distributed;
+
+ if (ToDistribute == 0)
+ return 0;
+
+ if ((total / ToDistribute) > lowOne) {
+ /* risk of rounding to zero */
+ lowOne = (U32)((total * 3) / (ToDistribute * 2));
+ for (s=0; s<=maxSymbolValue; s++) {
+ if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ } }
+ ToDistribute = (1 << tableLog) - distributed;
+ }
+
+ if (distributed == maxSymbolValue+1) {
+ /* all values are pretty poor;
+ probably incompressible data (should have already been detected);
+ find max, then give all remaining points to max */
+ U32 maxV = 0, maxC = 0;
+ for (s=0; s<=maxSymbolValue; s++)
+ if (count[s] > maxC) { maxV=s; maxC=count[s]; }
+ norm[maxV] += (short)ToDistribute;
+ return 0;
+ }
+
+ if (total == 0) {
+ /* all of the symbols were low enough for the lowOne or lowThreshold */
+ for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
+ if (norm[s] > 0) { ToDistribute--; norm[s]++; }
+ return 0;
+ }
+
+ { U64 const vStepLog = 62 - tableLog;
+ U64 const mid = (1ULL << (vStepLog-1)) - 1;
+ U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
+ U64 tmpTotal = mid;
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (norm[s]==NOT_YET_ASSIGNED) {
+ U64 const end = tmpTotal + (count[s] * rStep);
+ U32 const sStart = (U32)(tmpTotal >> vStepLog);
+ U32 const sEnd = (U32)(end >> vStepLog);
+ U32 const weight = sEnd - sStart;
+ if (weight < 1)
+ return ERROR(GENERIC);
+ norm[s] = (short)weight;
+ tmpTotal = end;
+ } } }
+
+ return 0;
+}
+
+size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t total,
+ unsigned maxSymbolValue, unsigned useLowProbCount)
+{
+ /* Sanity checks */
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
+ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
+
+ { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+ short const lowProbCount = useLowProbCount ? -1 : 1;
+ U64 const scale = 62 - tableLog;
+ U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
+ U64 const vStep = 1ULL<<(scale-20);
+ int stillToDistribute = 1<<tableLog;
+ unsigned s;
+ unsigned largest=0;
+ short largestP=0;
+ U32 lowThreshold = (U32)(total >> tableLog);
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == total) return 0; /* rle special case */
+ if (count[s] == 0) { normalizedCounter[s]=0; continue; }
+ if (count[s] <= lowThreshold) {
+ normalizedCounter[s] = lowProbCount;
+ stillToDistribute--;
+ } else {
+ short proba = (short)((count[s]*step) >> scale);
+ if (proba<8) {
+ U64 restToBeat = vStep * rtbTable[proba];
+ proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
+ }
+ if (proba > largestP) { largestP=proba; largest=s; }
+ normalizedCounter[s] = proba;
+ stillToDistribute -= proba;
+ } }
+ if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
+ /* corner case, need another normalization method */
+ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
+ if (FSE_isError(errorCode)) return errorCode;
+ }
+ else normalizedCounter[largest] += (short)stillToDistribute;
+ }
+
+#if 0
+ { /* Print Table (debug) */
+ U32 s;
+ U32 nTotal = 0;
+ for (s=0; s<=maxSymbolValue; s++)
+ RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
+ for (s=0; s<=maxSymbolValue; s++)
+ nTotal += abs(normalizedCounter[s]);
+ if (nTotal != (1U<<tableLog))
+ RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
+ getchar();
+ }
+#endif
+
+ return tableLog;
+}
+
+
+/* fake FSE_CTable, for raw (uncompressed) input */
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
+{
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* header */
+ tableU16[-2] = (U16) nbBits;
+ tableU16[-1] = (U16) maxSymbolValue;
+
+ /* Build table */
+ for (s=0; s<tableSize; s++)
+ tableU16[s] = (U16)(tableSize + s);
+
+ /* Build Symbol Transformation Table */
+ { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
+ for (s=0; s<=maxSymbolValue; s++) {
+ symbolTT[s].deltaNbBits = deltaNbBits;
+ symbolTT[s].deltaFindState = s-1;
+ } }
+
+ return 0;
+}
+
+/* fake FSE_CTable, for rle input (always same symbol) */
+size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
+{
+ void* ptr = ct;
+ U16* tableU16 = ( (U16*) ptr) + 2;
+ void* FSCTptr = (U32*)ptr + 2;
+ FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
+
+ /* header */
+ tableU16[-2] = (U16) 0;
+ tableU16[-1] = (U16) symbolValue;
+
+ /* Build table */
+ tableU16[0] = 0;
+ tableU16[1] = 0; /* just in case */
+
+ /* Build Symbol Transformation Table */
+ symbolTT[symbolValue].deltaNbBits = 0;
+ symbolTT[symbolValue].deltaFindState = 0;
+
+ return 0;
+}
+
+
+static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct, const unsigned fast)
+{
+ const BYTE* const istart = (const BYTE*) src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip=iend;
+
+ BIT_CStream_t bitC;
+ FSE_CState_t CState1, CState2;
+
+ /* init */
+ if (srcSize <= 2) return 0;
+ { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
+ if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
+
+#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+
+ if (srcSize & 1) {
+ FSE_initCState2(&CState1, ct, *--ip);
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ } else {
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_initCState2(&CState1, ct, *--ip);
+ }
+
+ /* join to mod 4 */
+ srcSize -= 2;
+ if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ /* 2 or 4 encoding per loop */
+ while ( ip>istart ) {
+
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
+ FSE_FLUSHBITS(&bitC);
+
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ }
+
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ FSE_flushCState(&bitC, &CState2);
+ FSE_flushCState(&bitC, &CState1);
+ return BIT_closeCStream(&bitC);
+}
+
+size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct)
+{
+ unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
+
+ if (fast)
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
+ else
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
+}
+
+
+size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c
new file mode 100644
index 00000000000000..3ddc6dfb689482
--- /dev/null
+++ b/lib/zstd/compress/hist.c
@@ -0,0 +1,165 @@
+/* ******************************************************************
+ * hist : Histogram functions
+ * part of Finite State Entropy project
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "../common/mem.h" /* U32, BYTE, etc. */
+#include "../common/debug.h" /* assert, DEBUGLOG */
+#include "../common/error_private.h" /* ERROR */
+#include "hist.h"
+
+
+/* --- Error management --- */
+unsigned HIST_isError(size_t code) { return ERR_isError(code); }
+
+/*-**************************************************************
+ * Histogram functions
+ ****************************************************************/
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const end = ip + srcSize;
+ unsigned maxSymbolValue = *maxSymbolValuePtr;
+ unsigned largestCount=0;
+
+ ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
+ if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
+
+ while (ip<end) {
+ assert(*ip <= maxSymbolValue);
+ count[*ip++]++;
+ }
+
+ while (!count[maxSymbolValue]) maxSymbolValue--;
+ *maxSymbolValuePtr = maxSymbolValue;
+
+ { U32 s;
+ for (s=0; s<=maxSymbolValue; s++)
+ if (count[s] > largestCount) largestCount = count[s];
+ }
+
+ return largestCount;
+}
+
+typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
+
+/* HIST_count_parallel_wksp() :
+ * store histogram into 4 intermediate tables, recombined at the end.
+ * this design makes better use of OoO cpus,
+ * and is noticeably faster when some values are heavily repeated.
+ * But it needs some additional workspace for intermediate tables.
+ * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
+ * @return : largest histogram frequency,
+ * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
+static size_t HIST_count_parallel_wksp(
+ unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ HIST_checkInput_e check,
+ U32* const workSpace)
+{
+ const BYTE* ip = (const BYTE*)source;
+ const BYTE* const iend = ip+sourceSize;
+ size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
+ unsigned max=0;
+ U32* const Counting1 = workSpace;
+ U32* const Counting2 = Counting1 + 256;
+ U32* const Counting3 = Counting2 + 256;
+ U32* const Counting4 = Counting3 + 256;
+
+ /* safety checks */
+ assert(*maxSymbolValuePtr <= 255);
+ if (!sourceSize) {
+ ZSTD_memset(count, 0, countSize);
+ *maxSymbolValuePtr = 0;
+ return 0;
+ }
+ ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
+
+ /* by stripes of 16 bytes */
+ { U32 cached = MEM_read32(ip); ip += 4;
+ while (ip < iend-15) {
+ U32 c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ }
+ ip-=4;
+ }
+
+ /* finish last symbols */
+ while (ip<iend) Counting1[*ip++]++;
+
+ { U32 s;
+ for (s=0; s<256; s++) {
+ Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+ if (Counting1[s] > max) max = Counting1[s];
+ } }
+
+ { unsigned maxSymbolValue = 255;
+ while (!Counting1[maxSymbolValue]) maxSymbolValue--;
+ if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
+ *maxSymbolValuePtr = maxSymbolValue;
+ ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
+ }
+ return (size_t)max;
+}
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if (sourceSize < 1500) /* heuristic threshold */
+ return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
+}
+
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ if (*maxSymbolValuePtr < 255)
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
+ *maxSymbolValuePtr = 255;
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
+}
+
diff --git a/lib/zstd/compress/hist.h b/lib/zstd/compress/hist.h
new file mode 100644
index 00000000000000..fc1830abc9c63a
--- /dev/null
+++ b/lib/zstd/compress/hist.h
@@ -0,0 +1,75 @@
+/* ******************************************************************
+ * hist : Histogram functions
+ * part of Finite State Entropy project
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "../common/zstd_deps.h" /* size_t */
+
+
+/* --- simple histogram functions --- */
+
+/*! HIST_count():
+ * Provides the precise count of each byte within a table 'count'.
+ * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
+ * Updates *maxSymbolValuePtr with actual largest symbol value detected.
+ * @return : count of the most frequent symbol (which isn't identified).
+ * or an error code, which can be tested using HIST_isError().
+ * note : if return == srcSize, there is only one symbol.
+ */
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+unsigned HIST_isError(size_t code); /*< tells if a return value is an error code */
+
+
+/* --- advanced histogram functions --- */
+
+#define HIST_WKSP_SIZE_U32 1024
+#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * Benefit is this function will use very little stack space.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/* HIST_countFast() :
+ * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
+ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
+ */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/*! HIST_count_simple() :
+ * Same as HIST_countFast(), this function is unsafe,
+ * and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
+ * It is also a bit slower for large inputs.
+ * However, it does not need any additional memory (not even on stack).
+ * @return : count of the most frequent symbol.
+ * Note this function doesn't produce any error (i.e. it must succeed).
+ */
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
new file mode 100644
index 00000000000000..f76a526bfa54b5
--- /dev/null
+++ b/lib/zstd/compress/huf_compress.c
@@ -0,0 +1,905 @@
+/* ******************************************************************
+ * Huffman encoder, part of New Generation Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
+#include "../common/compiler.h"
+#include "../common/bitstream.h"
+#include "hist.h"
+#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
+#include "../common/fse.h" /* header compression */
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "../common/error_private.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Utils
+****************************************************************/
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+}
+
+
+/* *******************************************************
+* HUF : Huffman block compression
+*********************************************************/
+/* HUF_compressWeights() :
+ * Same as FSE_compress(), but dedicated to huff0's weights compression.
+ * The use case needs much less stack memory.
+ * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
+ */
+#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
+
+typedef struct {
+ FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
+ U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
+ unsigned count[HUF_TABLELOG_MAX+1];
+ S16 norm[HUF_TABLELOG_MAX+1];
+} HUF_CompressWeightsWksp;
+
+static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + dstSize;
+
+ unsigned maxSymbolValue = HUF_TABLELOG_MAX;
+ U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
+ HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace;
+
+ if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
+
+ /* init conditions */
+ if (wtSize <= 1) return 0; /* Not compressible */
+
+ /* Scan input and build symbol stats */
+ { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */
+ if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
+ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
+ }
+
+ tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
+ CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
+
+ /* Write table description header */
+ { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
+ op += hSize;
+ }
+
+ /* Compress */
+ CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
+ if (cSize == 0) return 0; /* not enough space for compressed data */
+ op += cSize;
+ }
+
+ return (size_t)(op-ostart);
+}
+
+
+typedef struct {
+ HUF_CompressWeightsWksp wksp;
+ BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
+} HUF_WriteCTableWksp;
+
+size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
+ void* workspace, size_t workspaceSize)
+{
+ BYTE* op = (BYTE*)dst;
+ U32 n;
+ HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace;
+
+ /* check conditions */
+ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+
+ /* convert to weight */
+ wksp->bitsToWeight[0] = 0;
+ for (n=1; n<huffLog+1; n++)
+ wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
+ for (n=0; n<maxSymbolValue; n++)
+ wksp->huffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits];
+
+ /* attempt weights compression by FSE */
+ { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
+ if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
+ op[0] = (BYTE)hSize;
+ return hSize+1;
+ } }
+
+ /* write raw values as 4-bits (max : 15) */
+ if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
+ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
+ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
+ wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
+ for (n=0; n<maxSymbolValue; n+=2)
+ op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
+ return ((maxSymbolValue+1)/2) + 1;
+}
+
+/*! HUF_writeCTable() :
+ `CTable` : Huffman tree to save, using huf representation.
+ @return : size of saved CTable */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize,
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
+{
+ HUF_WriteCTableWksp wksp;
+ return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
+}
+
+
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
+{
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+
+ /* get symbol weights */
+ CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
+ *hasZeroWeights = (rankVal[0] > 0);
+
+ /* check result */
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
+
+ /* Prepare base value per rank */
+ { U32 n, nextRankStart = 0;
+ for (n=1; n<=tableLog; n++) {
+ U32 curr = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = curr;
+ } }
+
+ /* fill nbBits */
+ { U32 n; for (n=0; n<nbSymbols; n++) {
+ const U32 w = huffWeight[n];
+ CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
+ } }
+
+ /* fill val */
+ { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
+ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
+ { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
+ /* determine stating value per rank */
+ valPerRank[tableLog+1] = 0; /* for w==0 */
+ { U16 min = 0;
+ U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ /* assign value within rank, symbol order */
+ { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
+ }
+
+ *maxSymbolValuePtr = nbSymbols - 1;
+ return readSize;
+}
+
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
+{
+ const HUF_CElt* table = (const HUF_CElt*)symbolTable;
+ assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
+ return table[symbolValue].nbBits;
+}
+
+
+typedef struct nodeElt_s {
+ U32 count;
+ U16 parent;
+ BYTE byte;
+ BYTE nbBits;
+} nodeElt;
+
+/*
+ * HUF_setMaxHeight():
+ * Enforces maxNbBits on the Huffman tree described in huffNode.
+ *
+ * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
+ * the tree to so that it is a valid canonical Huffman tree.
+ *
+ * @pre The sum of the ranks of each symbol == 2^largestBits,
+ * where largestBits == huffNode[lastNonNull].nbBits.
+ * @post The sum of the ranks of each symbol == 2^largestBits,
+ * where largestBits is the return value <= maxNbBits.
+ *
+ * @param huffNode The Huffman tree modified in place to enforce maxNbBits.
+ * @param lastNonNull The symbol with the lowest count in the Huffman tree.
+ * @param maxNbBits The maximum allowed number of bits, which the Huffman tree
+ * may not respect. After this function the Huffman tree will
+ * respect maxNbBits.
+ * @return The maximum number of bits of the Huffman tree after adjustment,
+ * necessarily no more than maxNbBits.
+ */
+static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+{
+ const U32 largestBits = huffNode[lastNonNull].nbBits;
+ /* early exit : no elt > maxNbBits, so the tree is already valid. */
+ if (largestBits <= maxNbBits) return largestBits;
+
+ /* there are several too large elements (at least >= 2) */
+ { int totalCost = 0;
+ const U32 baseCost = 1 << (largestBits - maxNbBits);
+ int n = (int)lastNonNull;
+
+ /* Adjust any ranks > maxNbBits to maxNbBits.
+ * Compute totalCost, which is how far the sum of the ranks is
+ * we are over 2^largestBits after adjust the offending ranks.
+ */
+ while (huffNode[n].nbBits > maxNbBits) {
+ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
+ huffNode[n].nbBits = (BYTE)maxNbBits;
+ n--;
+ }
+ /* n stops at huffNode[n].nbBits <= maxNbBits */
+ assert(huffNode[n].nbBits <= maxNbBits);
+ /* n end at index of smallest symbol using < maxNbBits */
+ while (huffNode[n].nbBits == maxNbBits) --n;
+
+ /* renorm totalCost from 2^largestBits to 2^maxNbBits
+ * note : totalCost is necessarily a multiple of baseCost */
+ assert((totalCost & (baseCost - 1)) == 0);
+ totalCost >>= (largestBits - maxNbBits);
+ assert(totalCost > 0);
+
+ /* repay normalized cost */
+ { U32 const noSymbol = 0xF0F0F0F0;
+ U32 rankLast[HUF_TABLELOG_MAX+2];
+
+ /* Get pos of last (smallest = lowest cum. count) symbol per rank */
+ ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
+ { U32 currentNbBits = maxNbBits;
+ int pos;
+ for (pos=n ; pos >= 0; pos--) {
+ if (huffNode[pos].nbBits >= currentNbBits) continue;
+ currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
+ rankLast[maxNbBits-currentNbBits] = (U32)pos;
+ } }
+
+ while (totalCost > 0) {
+ /* Try to reduce the next power of 2 above totalCost because we
+ * gain back half the rank.
+ */
+ U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
+ for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
+ U32 const highPos = rankLast[nBitsToDecrease];
+ U32 const lowPos = rankLast[nBitsToDecrease-1];
+ if (highPos == noSymbol) continue;
+ /* Decrease highPos if no symbols of lowPos or if it is
+ * not cheaper to remove 2 lowPos than highPos.
+ */
+ if (lowPos == noSymbol) break;
+ { U32 const highTotal = huffNode[highPos].count;
+ U32 const lowTotal = 2 * huffNode[lowPos].count;
+ if (highTotal <= lowTotal) break;
+ } }
+ /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
+ assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
+ /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
+ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
+ nBitsToDecrease++;
+ assert(rankLast[nBitsToDecrease] != noSymbol);
+ /* Increase the number of bits to gain back half the rank cost. */
+ totalCost -= 1 << (nBitsToDecrease-1);
+ huffNode[rankLast[nBitsToDecrease]].nbBits++;
+
+ /* Fix up the new rank.
+ * If the new rank was empty, this symbol is now its smallest.
+ * Otherwise, this symbol will be the largest in the new rank so no adjustment.
+ */
+ if (rankLast[nBitsToDecrease-1] == noSymbol)
+ rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
+ /* Fix up the old rank.
+ * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
+ * it must be the only symbol in its rank, so the old rank now has no symbols.
+ * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
+ * the smallest node in the rank. If the previous position belongs to a different rank,
+ * then the rank is now empty.
+ */
+ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
+ rankLast[nBitsToDecrease] = noSymbol;
+ else {
+ rankLast[nBitsToDecrease]--;
+ if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
+ rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
+ }
+ } /* while (totalCost > 0) */
+
+ /* If we've removed too much weight, then we have to add it back.
+ * To avoid overshooting again, we only adjust the smallest rank.
+ * We take the largest nodes from the lowest rank 0 and move them
+ * to rank 1. There's guaranteed to be enough rank 0 symbols because
+ * TODO.
+ */
+ while (totalCost < 0) { /* Sometimes, cost correction overshoot */
+ /* special case : no rank 1 symbol (using maxNbBits-1);
+ * let's create one from largest rank 0 (using maxNbBits).
+ */
+ if (rankLast[1] == noSymbol) {
+ while (huffNode[n].nbBits == maxNbBits) n--;
+ huffNode[n+1].nbBits--;
+ assert(n >= 0);
+ rankLast[1] = (U32)(n+1);
+ totalCost++;
+ continue;
+ }
+ huffNode[ rankLast[1] + 1 ].nbBits--;
+ rankLast[1]++;
+ totalCost ++;
+ }
+ } /* repay normalized cost */
+ } /* there are several too large elements (at least >= 2) */
+
+ return maxNbBits;
+}
+
+typedef struct {
+ U32 base;
+ U32 curr;
+} rankPos;
+
+typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
+
+#define RANK_POSITION_TABLE_SIZE 32
+
+typedef struct {
+ huffNodeTable huffNodeTbl;
+ rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
+} HUF_buildCTable_wksp_tables;
+
+/*
+ * HUF_sort():
+ * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
+ *
+ * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
+ * Must have (maxSymbolValue + 1) entries.
+ * @param[in] count Histogram of the symbols.
+ * @param[in] maxSymbolValue Maximum symbol value.
+ * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
+ */
+static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
+{
+ int n;
+ int const maxSymbolValue1 = (int)maxSymbolValue + 1;
+
+ /* Compute base and set curr to base.
+ * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
+ * Then 2^lowerRank <= count[n]+1 <= 2^rank.
+ * We attribute each symbol to lowerRank's base value, because we want to know where
+ * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
+ */
+ ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
+ for (n = 0; n < maxSymbolValue1; ++n) {
+ U32 lowerRank = BIT_highbit32(count[n] + 1);
+ rankPosition[lowerRank].base++;
+ }
+ assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
+ for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
+ rankPosition[n-1].base += rankPosition[n].base;
+ rankPosition[n-1].curr = rankPosition[n-1].base;
+ }
+ /* Sort */
+ for (n = 0; n < maxSymbolValue1; ++n) {
+ U32 const c = count[n];
+ U32 const r = BIT_highbit32(c+1) + 1;
+ U32 pos = rankPosition[r].curr++;
+ /* Insert into the correct position in the rank.
+ * We have at most 256 symbols, so this insertion should be fine.
+ */
+ while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
+ huffNode[pos] = huffNode[pos-1];
+ pos--;
+ }
+ huffNode[pos].count = c;
+ huffNode[pos].byte = (BYTE)n;
+ }
+}
+
+
+/* HUF_buildCTable_wksp() :
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
+ */
+#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
+
+/* HUF_buildTree():
+ * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
+ *
+ * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array.
+ * @param maxSymbolValue The maximum symbol value.
+ * @return The smallest node in the Huffman tree (by count).
+ */
+static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
+{
+ nodeElt* const huffNode0 = huffNode - 1;
+ int nonNullRank;
+ int lowS, lowN;
+ int nodeNb = STARTNODE;
+ int n, nodeRoot;
+ /* init for parents */
+ nonNullRank = (int)maxSymbolValue;
+ while(huffNode[nonNullRank].count == 0) nonNullRank--;
+ lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
+ huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
+ huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
+ nodeNb++; lowS-=2;
+ for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
+ huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
+
+ /* create parents */
+ while (nodeNb <= nodeRoot) {
+ int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+ int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+ huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
+ huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
+ nodeNb++;
+ }
+
+ /* distribute weights (unlimited tree height) */
+ huffNode[nodeRoot].nbBits = 0;
+ for (n=nodeRoot-1; n>=STARTNODE; n--)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+ for (n=0; n<=nonNullRank; n++)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+
+ return nonNullRank;
+}
+
+/*
+ * HUF_buildCTableFromTree():
+ * Build the CTable given the Huffman tree in huffNode.
+ *
+ * @param[out] CTable The output Huffman CTable.
+ * @param huffNode The Huffman tree.
+ * @param nonNullRank The last and smallest node in the Huffman tree.
+ * @param maxSymbolValue The maximum symbol value.
+ * @param maxNbBits The exact maximum number of bits used in the Huffman tree.
+ */
+static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
+{
+ /* fill result into ctable (val, nbBits) */
+ int n;
+ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
+ U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
+ int const alphabetSize = (int)(maxSymbolValue + 1);
+ for (n=0; n<=nonNullRank; n++)
+ nbPerRank[huffNode[n].nbBits]++;
+ /* determine starting value per rank */
+ { U16 min = 0;
+ for (n=(int)maxNbBits; n>0; n--) {
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ for (n=0; n<alphabetSize; n++)
+ CTable[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
+ for (n=0; n<alphabetSize; n++)
+ CTable[n].val = valPerRank[CTable[n].nbBits]++; /* assign value within rank, symbol order */
+}
+
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+{
+ HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
+ nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
+ nodeElt* const huffNode = huffNode0+1;
+ int nonNullRank;
+
+ /* safety checks */
+ if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
+ return ERROR(workSpace_tooSmall);
+ if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
+ return ERROR(maxSymbolValue_tooLarge);
+ ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
+
+ /* sort, decreasing order */
+ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
+
+ /* build tree */
+ nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
+
+ /* enforce maxTableLog */
+ maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
+ if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
+
+ HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
+
+ return maxNbBits;
+}
+
+size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
+{
+ size_t nbBits = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ nbBits += CTable[s].nbBits * count[s];
+ }
+ return nbBits >> 3;
+}
+
+int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
+ int bad = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
+ }
+ return !bad;
+}
+
+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+
+FORCE_INLINE_TEMPLATE void
+HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
+{
+ BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
+}
+
+#define HUF_FLUSHBITS(s) BIT_flushBits(s)
+
+#define HUF_FLUSHBITS_1(stream) \
+ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
+
+#define HUF_FLUSHBITS_2(stream) \
+ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ const BYTE* ip = (const BYTE*) src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+ size_t n;
+ BIT_CStream_t bitC;
+
+ /* init */
+ if (dstSize < 8) return 0; /* not enough space to compress */
+ { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
+ if (HUF_isError(initErr)) return 0; }
+
+ n = srcSize & ~3; /* join to mod 4 */
+ switch (srcSize & 3)
+ {
+ case 3:
+ HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
+ HUF_FLUSHBITS_2(&bitC);
+ ZSTD_FALLTHROUGH;
+ case 2:
+ HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
+ HUF_FLUSHBITS_1(&bitC);
+ ZSTD_FALLTHROUGH;
+ case 1:
+ HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
+ HUF_FLUSHBITS(&bitC);
+ ZSTD_FALLTHROUGH;
+ case 0: ZSTD_FALLTHROUGH;
+ default: break;
+ }
+
+ for (; n>0; n-=4) { /* note : n&3==0 at this stage */
+ HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
+ HUF_FLUSHBITS_1(&bitC);
+ HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
+ HUF_FLUSHBITS_2(&bitC);
+ HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
+ HUF_FLUSHBITS_1(&bitC);
+ HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
+ HUF_FLUSHBITS(&bitC);
+ }
+
+ return BIT_closeCStream(&bitC);
+}
+
+#if DYNAMIC_BMI2
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ if (bmi2) {
+ return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
+ }
+ return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
+}
+
+#else
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ (void)bmi2;
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+#endif
+
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+
+static size_t
+HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, int bmi2)
+{
+ size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
+ const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
+ if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
+ if (srcSize < 12) return 0; /* no saving possible : too small input */
+ op += 6; /* jumpTable */
+
+ assert(op <= oend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ if (cSize==0) return 0;
+ assert(cSize <= 65535);
+ MEM_writeLE16(ostart, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ assert(op <= oend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ if (cSize==0) return 0;
+ assert(cSize <= 65535);
+ MEM_writeLE16(ostart+2, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ assert(op <= oend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ if (cSize==0) return 0;
+ assert(cSize <= 65535);
+ MEM_writeLE16(ostart+4, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ assert(op <= oend);
+ assert(ip <= iend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
+ if (cSize==0) return 0;
+ op += cSize;
+ }
+
+ return (size_t)(op-ostart);
+}
+
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+ return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
+
+static size_t HUF_compressCTable_internal(
+ BYTE* const ostart, BYTE* op, BYTE* const oend,
+ const void* src, size_t srcSize,
+ HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
+{
+ size_t const cSize = (nbStreams==HUF_singleStream) ?
+ HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
+ HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
+ if (HUF_isError(cSize)) { return cSize; }
+ if (cSize==0) { return 0; } /* uncompressible */
+ op += cSize;
+ /* check compressibility */
+ assert(op >= ostart);
+ if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
+ return (size_t)(op-ostart);
+}
+
+typedef struct {
+ unsigned count[HUF_SYMBOLVALUE_MAX + 1];
+ HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
+ union {
+ HUF_buildCTable_wksp_tables buildCTable_wksp;
+ HUF_WriteCTableWksp writeCTable_wksp;
+ } wksps;
+} HUF_compress_tables_t;
+
+/* HUF_compress_internal() :
+ * `workSpace_align4` must be aligned on 4-bytes boundaries,
+ * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */
+static size_t
+HUF_compress_internal (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ HUF_nbStreams_e nbStreams,
+ void* workSpace_align4, size_t wkspSize,
+ HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
+ const int bmi2)
+{
+ HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
+ HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
+ assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */
+
+ /* checks & inits */
+ if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
+ if (!srcSize) return 0; /* Uncompressed */
+ if (!dstSize) return 0; /* cannot fit anything within dst budget */
+ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
+ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+ if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
+
+ /* Heuristic : If old table is valid, use it for small inputs */
+ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ }
+
+ /* Scan input and build symbol stats */
+ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
+ if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
+ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
+ }
+
+ /* Check validity of previous table */
+ if ( repeat
+ && *repeat == HUF_repeat_check
+ && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
+ *repeat = HUF_repeat_none;
+ }
+ /* Heuristic : use existing table for small inputs */
+ if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ }
+
+ /* Build Huffman Tree */
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
+ maxSymbolValue, huffLog,
+ &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
+ CHECK_F(maxBits);
+ huffLog = (U32)maxBits;
+ /* Zero unused symbols in CTable, so we can check it for validity */
+ ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
+ sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
+ }
+
+ /* Write table description header */
+ { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
+ &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
+ /* Check if using previous huffman table is beneficial */
+ if (repeat && *repeat != HUF_repeat_none) {
+ size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
+ size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
+ if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ } }
+
+ /* Use the new huffman table */
+ if (hSize + 12ul >= srcSize) { return 0; }
+ op += hSize;
+ if (repeat) { *repeat = HUF_repeat_none; }
+ if (oldHufTable)
+ ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
+ }
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, table->CTable, bmi2);
+}
+
+
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_singleStream,
+ workSpace, wkspSize,
+ NULL, NULL, 0, 0 /*bmi2*/);
+}
+
+size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_singleStream,
+ workSpace, wkspSize, hufTable,
+ repeat, preferRepeat, bmi2);
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * provide workspace to generate compression tables */
+size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_fourStreams,
+ workSpace, wkspSize,
+ NULL, NULL, 0, 0 /*bmi2*/);
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * re-use an existing huffman compression table */
+size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_fourStreams,
+ workSpace, wkspSize,
+ hufTable, repeat, preferRepeat, bmi2);
+}
+
diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
new file mode 100644
index 00000000000000..a4e916008b3a71
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress.c
@@ -0,0 +1,5109 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
+#include "../common/cpu.h"
+#include "../common/mem.h"
+#include "hist.h" /* HIST_countFast_wksp */
+#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "zstd_compress_internal.h"
+#include "zstd_compress_sequences.h"
+#include "zstd_compress_literals.h"
+#include "zstd_fast.h"
+#include "zstd_double_fast.h"
+#include "zstd_lazy.h"
+#include "zstd_opt.h"
+#include "zstd_ldm.h"
+#include "zstd_compress_superblock.h"
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * COMPRESS_HEAPMODE :
+ * Select how default decompression function ZSTD_compress() allocates its context,
+ * on stack (0, default), or into heap (1).
+ * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
+ */
+
+
+/*-*************************************
+* Helper functions
+***************************************/
+/* ZSTD_compressBound()
+ * Note that the result from this function is only compatible with the "normal"
+ * full-block strategy.
+ * When there are a lot of small blocks due to frequent flush in streaming mode
+ * the overhead of headers can make the compressed data to be larger than the
+ * return value of ZSTD_compressBound().
+ */
+size_t ZSTD_compressBound(size_t srcSize) {
+ return ZSTD_COMPRESSBOUND(srcSize);
+}
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+struct ZSTD_CDict_s {
+ const void* dictContent;
+ size_t dictContentSize;
+ ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
+ U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
+ ZSTD_cwksp workspace;
+ ZSTD_matchState_t matchState;
+ ZSTD_compressedBlockState_t cBlockState;
+ ZSTD_customMem customMem;
+ U32 dictID;
+ int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
+}; /* typedef'd to ZSTD_CDict within "zstd.h" */
+
+ZSTD_CCtx* ZSTD_createCCtx(void)
+{
+ return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
+}
+
+static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
+{
+ assert(cctx != NULL);
+ ZSTD_memset(cctx, 0, sizeof(*cctx));
+ cctx->customMem = memManager;
+ cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
+ assert(!ZSTD_isError(err));
+ (void)err;
+ }
+}
+
+ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
+{
+ ZSTD_STATIC_ASSERT(zcss_init==0);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
+ if (!cctx) return NULL;
+ ZSTD_initCCtx(cctx, customMem);
+ return cctx;
+ }
+}
+
+ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
+{
+ ZSTD_cwksp ws;
+ ZSTD_CCtx* cctx;
+ if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
+ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
+
+ cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
+ if (cctx == NULL) return NULL;
+
+ ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
+ ZSTD_cwksp_move(&cctx->workspace, &ws);
+ cctx->staticSize = workspaceSize;
+
+ /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+ if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
+ cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+ cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+ cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
+ cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ return cctx;
+}
+
+/*
+ * Clears and frees all of the dictionaries in the CCtx.
+ */
+static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
+{
+ ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
+ ZSTD_freeCDict(cctx->localDict.cdict);
+ ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
+ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
+ cctx->cdict = NULL;
+}
+
+static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
+{
+ size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
+ size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
+ return bufferSize + cdictSize;
+}
+
+static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
+{
+ assert(cctx != NULL);
+ assert(cctx->staticSize == 0);
+ ZSTD_clearAllDicts(cctx);
+ ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return 0; /* support free on NULL */
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "not compatible with static CCtx");
+ {
+ int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
+ ZSTD_freeCCtxContent(cctx);
+ if (!cctxInWorkspace) {
+ ZSTD_customFree(cctx, cctx->customMem);
+ }
+ }
+ return 0;
+}
+
+
+static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
+{
+ (void)cctx;
+ return 0;
+}
+
+
+size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return 0; /* support sizeof on NULL */
+ /* cctx may be in the workspace */
+ return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
+ + ZSTD_cwksp_sizeof(&cctx->workspace)
+ + ZSTD_sizeof_localDict(cctx->localDict)
+ + ZSTD_sizeof_mtctx(cctx);
+}
+
+size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
+{
+ return ZSTD_sizeof_CCtx(zcs); /* same object */
+}
+
+/* private API call, for dictBuilder only */
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+
+/* Returns 1 if compression parameters are such that we should
+ * enable long distance matching (wlog >= 27, strategy >= btopt).
+ * Returns 0 otherwise.
+ */
+static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
+ return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
+}
+
+static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
+ ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params cctxParams;
+ /* should not matter, as all cParams are presumed properly defined */
+ ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
+ cctxParams.cParams = cParams;
+
+ if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
+ DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
+ cctxParams.ldmParams.enableLdm = 1;
+ /* LDM is enabled by default for optimal parser and window size >= 128MB */
+ ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
+ assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
+ assert(cctxParams.ldmParams.hashRateLog < 32);
+ }
+
+ assert(!ZSTD_checkCParams(cParams));
+ return cctxParams;
+}
+
+static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params* params;
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
+ sizeof(ZSTD_CCtx_params), customMem);
+ if (!params) { return NULL; }
+ ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
+ params->customMem = customMem;
+ return params;
+}
+
+ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
+{
+ return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
+{
+ if (params == NULL) { return 0; }
+ ZSTD_customFree(params, params->customMem);
+ return 0;
+}
+
+size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
+{
+ return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
+}
+
+size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
+ RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
+ ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->compressionLevel = compressionLevel;
+ cctxParams->fParams.contentSizeFlag = 1;
+ return 0;
+}
+
+#define ZSTD_NO_CLEVEL 0
+
+/*
+ * Initializes the cctxParams from params and compressionLevel.
+ * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
+ */
+static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
+{
+ assert(!ZSTD_checkCParams(params->cParams));
+ ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->cParams = params->cParams;
+ cctxParams->fParams = params->fParams;
+ /* Should not matter, as all cParams are presumed properly defined.
+ * But, set it for tracing anyway.
+ */
+ cctxParams->compressionLevel = compressionLevel;
+}
+
+size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
+{
+ RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
+ ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
+ return 0;
+}
+
+/*
+ * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
+ * @param param Validated zstd parameters.
+ */
+static void ZSTD_CCtxParams_setZstdParams(
+ ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
+{
+ assert(!ZSTD_checkCParams(params->cParams));
+ cctxParams->cParams = params->cParams;
+ cctxParams->fParams = params->fParams;
+ /* Should not matter, as all cParams are presumed properly defined.
+ * But, set it for tracing anyway.
+ */
+ cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
+}
+
+ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
+{
+ ZSTD_bounds bounds = { 0, 0, 0 };
+
+ switch(param)
+ {
+ case ZSTD_c_compressionLevel:
+ bounds.lowerBound = ZSTD_minCLevel();
+ bounds.upperBound = ZSTD_maxCLevel();
+ return bounds;
+
+ case ZSTD_c_windowLog:
+ bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_hashLog:
+ bounds.lowerBound = ZSTD_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_chainLog:
+ bounds.lowerBound = ZSTD_CHAINLOG_MIN;
+ bounds.upperBound = ZSTD_CHAINLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_searchLog:
+ bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
+ bounds.upperBound = ZSTD_SEARCHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_minMatch:
+ bounds.lowerBound = ZSTD_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_targetLength:
+ bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
+ bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
+ return bounds;
+
+ case ZSTD_c_strategy:
+ bounds.lowerBound = ZSTD_STRATEGY_MIN;
+ bounds.upperBound = ZSTD_STRATEGY_MAX;
+ return bounds;
+
+ case ZSTD_c_contentSizeFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_checksumFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_dictIDFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_nbWorkers:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 0;
+ return bounds;
+
+ case ZSTD_c_jobSize:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 0;
+ return bounds;
+
+ case ZSTD_c_overlapLog:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 0;
+ return bounds;
+
+ case ZSTD_c_enableDedicatedDictSearch:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_enableLongDistanceMatching:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_ldmHashLog:
+ bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmMinMatch:
+ bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmBucketSizeLog:
+ bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmHashRateLog:
+ bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
+ return bounds;
+
+ /* experimental parameters */
+ case ZSTD_c_rsyncable:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_forceMaxWindow :
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_format:
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+ bounds.lowerBound = ZSTD_f_zstd1;
+ bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ case ZSTD_c_forceAttachDict:
+ ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
+ bounds.lowerBound = ZSTD_dictDefaultAttach;
+ bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ case ZSTD_c_literalCompressionMode:
+ ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
+ bounds.lowerBound = ZSTD_lcm_auto;
+ bounds.upperBound = ZSTD_lcm_uncompressed;
+ return bounds;
+
+ case ZSTD_c_targetCBlockSize:
+ bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
+ bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
+ return bounds;
+
+ case ZSTD_c_srcSizeHint:
+ bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
+ bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
+ return bounds;
+
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ bounds.lowerBound = (int)ZSTD_bm_buffered;
+ bounds.upperBound = (int)ZSTD_bm_stable;
+ return bounds;
+
+ case ZSTD_c_blockDelimiters:
+ bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
+ bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
+ return bounds;
+
+ case ZSTD_c_validateSequences:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ default:
+ bounds.error = ERROR(parameter_unsupported);
+ return bounds;
+ }
+}
+
+/* ZSTD_cParam_clampBounds:
+ * Clamps the value into the bounded range.
+ */
+static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return bounds.error;
+ if (*value < bounds.lowerBound) *value = bounds.lowerBound;
+ if (*value > bounds.upperBound) *value = bounds.upperBound;
+ return 0;
+}
+
+#define BOUNDCHECK(cParam, val) { \
+ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+ parameter_outOfBound, "Param out of bounds"); \
+}
+
+
+static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
+{
+ switch(param)
+ {
+ case ZSTD_c_compressionLevel:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
+ return 1;
+
+ case ZSTD_c_format:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
+ case ZSTD_c_forceMaxWindow :
+ case ZSTD_c_nbWorkers:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
+ case ZSTD_c_enableDedicatedDictSearch:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_forceAttachDict:
+ case ZSTD_c_literalCompressionMode:
+ case ZSTD_c_targetCBlockSize:
+ case ZSTD_c_srcSizeHint:
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ case ZSTD_c_blockDelimiters:
+ case ZSTD_c_validateSequences:
+ default:
+ return 0;
+ }
+}
+
+size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
+ if (cctx->streamStage != zcss_init) {
+ if (ZSTD_isUpdateAuthorized(param)) {
+ cctx->cParamsChanged = 1;
+ } else {
+ RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
+ } }
+
+ switch(param)
+ {
+ case ZSTD_c_nbWorkers:
+ RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
+ "MT not compatible with static alloc");
+ break;
+
+ case ZSTD_c_compressionLevel:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_format:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
+ case ZSTD_c_forceMaxWindow:
+ case ZSTD_c_forceAttachDict:
+ case ZSTD_c_literalCompressionMode:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
+ case ZSTD_c_enableDedicatedDictSearch:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ case ZSTD_c_targetCBlockSize:
+ case ZSTD_c_srcSizeHint:
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ case ZSTD_c_blockDelimiters:
+ case ZSTD_c_validateSequences:
+ break;
+
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+ return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+ ZSTD_cParameter param, int value)
+{
+ DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
+ switch(param)
+ {
+ case ZSTD_c_format :
+ BOUNDCHECK(ZSTD_c_format, value);
+ CCtxParams->format = (ZSTD_format_e)value;
+ return (size_t)CCtxParams->format;
+
+ case ZSTD_c_compressionLevel : {
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
+ if (value == 0)
+ CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
+ else
+ CCtxParams->compressionLevel = value;
+ if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
+ return 0; /* return type (size_t) cannot represent negative values */
+ }
+
+ case ZSTD_c_windowLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_windowLog, value);
+ CCtxParams->cParams.windowLog = (U32)value;
+ return CCtxParams->cParams.windowLog;
+
+ case ZSTD_c_hashLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_hashLog, value);
+ CCtxParams->cParams.hashLog = (U32)value;
+ return CCtxParams->cParams.hashLog;
+
+ case ZSTD_c_chainLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_chainLog, value);
+ CCtxParams->cParams.chainLog = (U32)value;
+ return CCtxParams->cParams.chainLog;
+
+ case ZSTD_c_searchLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_searchLog, value);
+ CCtxParams->cParams.searchLog = (U32)value;
+ return (size_t)value;
+
+ case ZSTD_c_minMatch :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_minMatch, value);
+ CCtxParams->cParams.minMatch = value;
+ return CCtxParams->cParams.minMatch;
+
+ case ZSTD_c_targetLength :
+ BOUNDCHECK(ZSTD_c_targetLength, value);
+ CCtxParams->cParams.targetLength = value;
+ return CCtxParams->cParams.targetLength;
+
+ case ZSTD_c_strategy :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_strategy, value);
+ CCtxParams->cParams.strategy = (ZSTD_strategy)value;
+ return (size_t)CCtxParams->cParams.strategy;
+
+ case ZSTD_c_contentSizeFlag :
+ /* Content size written in frame header _when known_ (default:1) */
+ DEBUGLOG(4, "set content size flag = %u", (value!=0));
+ CCtxParams->fParams.contentSizeFlag = value != 0;
+ return CCtxParams->fParams.contentSizeFlag;
+
+ case ZSTD_c_checksumFlag :
+ /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
+ CCtxParams->fParams.checksumFlag = value != 0;
+ return CCtxParams->fParams.checksumFlag;
+
+ case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
+ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
+ CCtxParams->fParams.noDictIDFlag = !value;
+ return !CCtxParams->fParams.noDictIDFlag;
+
+ case ZSTD_c_forceMaxWindow :
+ CCtxParams->forceWindow = (value != 0);
+ return CCtxParams->forceWindow;
+
+ case ZSTD_c_forceAttachDict : {
+ const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
+ BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
+ CCtxParams->attachDictPref = pref;
+ return CCtxParams->attachDictPref;
+ }
+
+ case ZSTD_c_literalCompressionMode : {
+ const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
+ BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
+ CCtxParams->literalCompressionMode = lcm;
+ return CCtxParams->literalCompressionMode;
+ }
+
+ case ZSTD_c_nbWorkers :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_jobSize :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_overlapLog :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_rsyncable :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_enableDedicatedDictSearch :
+ CCtxParams->enableDedicatedDictSearch = (value!=0);
+ return CCtxParams->enableDedicatedDictSearch;
+
+ case ZSTD_c_enableLongDistanceMatching :
+ CCtxParams->ldmParams.enableLdm = (value!=0);
+ return CCtxParams->ldmParams.enableLdm;
+
+ case ZSTD_c_ldmHashLog :
+ if (value!=0) /* 0 ==> auto */
+ BOUNDCHECK(ZSTD_c_ldmHashLog, value);
+ CCtxParams->ldmParams.hashLog = value;
+ return CCtxParams->ldmParams.hashLog;
+
+ case ZSTD_c_ldmMinMatch :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
+ CCtxParams->ldmParams.minMatchLength = value;
+ return CCtxParams->ldmParams.minMatchLength;
+
+ case ZSTD_c_ldmBucketSizeLog :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
+ CCtxParams->ldmParams.bucketSizeLog = value;
+ return CCtxParams->ldmParams.bucketSizeLog;
+
+ case ZSTD_c_ldmHashRateLog :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
+ CCtxParams->ldmParams.hashRateLog = value;
+ return CCtxParams->ldmParams.hashRateLog;
+
+ case ZSTD_c_targetCBlockSize :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
+ CCtxParams->targetCBlockSize = value;
+ return CCtxParams->targetCBlockSize;
+
+ case ZSTD_c_srcSizeHint :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_srcSizeHint, value);
+ CCtxParams->srcSizeHint = value;
+ return CCtxParams->srcSizeHint;
+
+ case ZSTD_c_stableInBuffer:
+ BOUNDCHECK(ZSTD_c_stableInBuffer, value);
+ CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
+ return CCtxParams->inBufferMode;
+
+ case ZSTD_c_stableOutBuffer:
+ BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
+ CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
+ return CCtxParams->outBufferMode;
+
+ case ZSTD_c_blockDelimiters:
+ BOUNDCHECK(ZSTD_c_blockDelimiters, value);
+ CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
+ return CCtxParams->blockDelimiters;
+
+ case ZSTD_c_validateSequences:
+ BOUNDCHECK(ZSTD_c_validateSequences, value);
+ CCtxParams->validateSequences = value;
+ return CCtxParams->validateSequences;
+
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+}
+
+size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
+{
+ return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParams_getParameter(
+ ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
+{
+ switch(param)
+ {
+ case ZSTD_c_format :
+ *value = CCtxParams->format;
+ break;
+ case ZSTD_c_compressionLevel :
+ *value = CCtxParams->compressionLevel;
+ break;
+ case ZSTD_c_windowLog :
+ *value = (int)CCtxParams->cParams.windowLog;
+ break;
+ case ZSTD_c_hashLog :
+ *value = (int)CCtxParams->cParams.hashLog;
+ break;
+ case ZSTD_c_chainLog :
+ *value = (int)CCtxParams->cParams.chainLog;
+ break;
+ case ZSTD_c_searchLog :
+ *value = CCtxParams->cParams.searchLog;
+ break;
+ case ZSTD_c_minMatch :
+ *value = CCtxParams->cParams.minMatch;
+ break;
+ case ZSTD_c_targetLength :
+ *value = CCtxParams->cParams.targetLength;
+ break;
+ case ZSTD_c_strategy :
+ *value = (unsigned)CCtxParams->cParams.strategy;
+ break;
+ case ZSTD_c_contentSizeFlag :
+ *value = CCtxParams->fParams.contentSizeFlag;
+ break;
+ case ZSTD_c_checksumFlag :
+ *value = CCtxParams->fParams.checksumFlag;
+ break;
+ case ZSTD_c_dictIDFlag :
+ *value = !CCtxParams->fParams.noDictIDFlag;
+ break;
+ case ZSTD_c_forceMaxWindow :
+ *value = CCtxParams->forceWindow;
+ break;
+ case ZSTD_c_forceAttachDict :
+ *value = CCtxParams->attachDictPref;
+ break;
+ case ZSTD_c_literalCompressionMode :
+ *value = CCtxParams->literalCompressionMode;
+ break;
+ case ZSTD_c_nbWorkers :
+ assert(CCtxParams->nbWorkers == 0);
+ *value = CCtxParams->nbWorkers;
+ break;
+ case ZSTD_c_jobSize :
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+ case ZSTD_c_overlapLog :
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+ case ZSTD_c_rsyncable :
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+ case ZSTD_c_enableDedicatedDictSearch :
+ *value = CCtxParams->enableDedicatedDictSearch;
+ break;
+ case ZSTD_c_enableLongDistanceMatching :
+ *value = CCtxParams->ldmParams.enableLdm;
+ break;
+ case ZSTD_c_ldmHashLog :
+ *value = CCtxParams->ldmParams.hashLog;
+ break;
+ case ZSTD_c_ldmMinMatch :
+ *value = CCtxParams->ldmParams.minMatchLength;
+ break;
+ case ZSTD_c_ldmBucketSizeLog :
+ *value = CCtxParams->ldmParams.bucketSizeLog;
+ break;
+ case ZSTD_c_ldmHashRateLog :
+ *value = CCtxParams->ldmParams.hashRateLog;
+ break;
+ case ZSTD_c_targetCBlockSize :
+ *value = (int)CCtxParams->targetCBlockSize;
+ break;
+ case ZSTD_c_srcSizeHint :
+ *value = (int)CCtxParams->srcSizeHint;
+ break;
+ case ZSTD_c_stableInBuffer :
+ *value = (int)CCtxParams->inBufferMode;
+ break;
+ case ZSTD_c_stableOutBuffer :
+ *value = (int)CCtxParams->outBufferMode;
+ break;
+ case ZSTD_c_blockDelimiters :
+ *value = (int)CCtxParams->blockDelimiters;
+ break;
+ case ZSTD_c_validateSequences :
+ *value = (int)CCtxParams->validateSequences;
+ break;
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+ return 0;
+}
+
+/* ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * just applies `params` into `cctx`
+ * no action is performed, parameters are merely stored.
+ * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
+ * This is possible even if a compression is ongoing.
+ * In which case, new parameters will be applied on the fly, starting with next compression job.
+ */
+size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "The context is in the wrong stage!");
+ RETURN_ERROR_IF(cctx->cdict, stage_wrong,
+ "Can't override parameters with cdict attached (some must "
+ "be inherited from the cdict).");
+
+ cctx->requestedParams = *params;
+ return 0;
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't set pledgedSrcSize when not in init stage.");
+ cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ return 0;
+}
+
+static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
+ int const compressionLevel,
+ size_t const dictSize);
+static int ZSTD_dedicatedDictSearch_isSupported(
+ const ZSTD_compressionParameters* cParams);
+static void ZSTD_dedicatedDictSearch_revertCParams(
+ ZSTD_compressionParameters* cParams);
+
+/*
+ * Initializes the local dict using the requested parameters.
+ * NOTE: This does not use the pledged src size, because it may be used for more
+ * than one compression.
+ */
+static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
+{
+ ZSTD_localDict* const dl = &cctx->localDict;
+ if (dl->dict == NULL) {
+ /* No local dictionary. */
+ assert(dl->dictBuffer == NULL);
+ assert(dl->cdict == NULL);
+ assert(dl->dictSize == 0);
+ return 0;
+ }
+ if (dl->cdict != NULL) {
+ assert(cctx->cdict == dl->cdict);
+ /* Local dictionary already initialized. */
+ return 0;
+ }
+ assert(dl->dictSize > 0);
+ assert(cctx->cdict == NULL);
+ assert(cctx->prefixDict.dict == NULL);
+
+ dl->cdict = ZSTD_createCDict_advanced2(
+ dl->dict,
+ dl->dictSize,
+ ZSTD_dlm_byRef,
+ dl->dictContentType,
+ &cctx->requestedParams,
+ cctx->customMem);
+ RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
+ cctx->cdict = dl->cdict;
+ return 0;
+}
+
+size_t ZSTD_CCtx_loadDictionary_advanced(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't load a dictionary when ctx is not in init stage.");
+ DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
+ ZSTD_clearAllDicts(cctx); /* in case one already exists */
+ if (dict == NULL || dictSize == 0) /* no dictionary mode */
+ return 0;
+ if (dictLoadMethod == ZSTD_dlm_byRef) {
+ cctx->localDict.dict = dict;
+ } else {
+ void* dictBuffer;
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "no malloc for static CCtx");
+ dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
+ RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
+ ZSTD_memcpy(dictBuffer, dict, dictSize);
+ cctx->localDict.dictBuffer = dictBuffer;
+ cctx->localDict.dict = dictBuffer;
+ }
+ cctx->localDict.dictSize = dictSize;
+ cctx->localDict.dictContentType = dictContentType;
+ return 0;
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+
+size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a dict when ctx not in init stage.");
+ /* Free the existing local cdict (if any) to save memory. */
+ ZSTD_clearAllDicts(cctx);
+ cctx->cdict = cdict;
+ return 0;
+}
+
+size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a pool when ctx not in init stage.");
+ cctx->pool = pool;
+ return 0;
+}
+
+size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+size_t ZSTD_CCtx_refPrefix_advanced(
+ ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a prefix when ctx not in init stage.");
+ ZSTD_clearAllDicts(cctx);
+ if (prefix != NULL && prefixSize > 0) {
+ cctx->prefixDict.dict = prefix;
+ cctx->prefixDict.dictSize = prefixSize;
+ cctx->prefixDict.dictContentType = dictContentType;
+ }
+ return 0;
+}
+
+/*! ZSTD_CCtx_reset() :
+ * Also dumps dictionary */
+size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
+{
+ if ( (reset == ZSTD_reset_session_only)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ cctx->streamStage = zcss_init;
+ cctx->pledgedSrcSizePlusOne = 0;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't reset parameters only when not in init stage.");
+ ZSTD_clearAllDicts(cctx);
+ return ZSTD_CCtxParams_reset(&cctx->requestedParams);
+ }
+ return 0;
+}
+
+
+/* ZSTD_checkCParams() :
+ control CParam values remain within authorized range.
+ @return : 0, or an error code if one value is beyond authorized range */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
+{
+ BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
+ BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
+ BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
+ BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
+ BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
+ BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
+ BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
+ return 0;
+}
+
+/* ZSTD_clampCParams() :
+ * make CParam values within valid range.
+ * @return : valid CParams */
+static ZSTD_compressionParameters
+ZSTD_clampCParams(ZSTD_compressionParameters cParams)
+{
+# define CLAMP_TYPE(cParam, val, type) { \
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
+ if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
+ else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
+ }
+# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
+ CLAMP(ZSTD_c_windowLog, cParams.windowLog);
+ CLAMP(ZSTD_c_chainLog, cParams.chainLog);
+ CLAMP(ZSTD_c_hashLog, cParams.hashLog);
+ CLAMP(ZSTD_c_searchLog, cParams.searchLog);
+ CLAMP(ZSTD_c_minMatch, cParams.minMatch);
+ CLAMP(ZSTD_c_targetLength,cParams.targetLength);
+ CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
+ return cParams;
+}
+
+/* ZSTD_cycleLog() :
+ * condition for correct operation : hashLog > 1 */
+U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
+{
+ U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
+ return hashLog - btScale;
+}
+
+/* ZSTD_dictAndWindowLog() :
+ * Returns an adjusted window log that is large enough to fit the source and the dictionary.
+ * The zstd format says that the entire dictionary is valid if one byte of the dictionary
+ * is within the window. So the hashLog and chainLog should be large enough to reference both
+ * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
+ * the hashLog and windowLog.
+ * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
+ */
+static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
+{
+ const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
+ /* No dictionary ==> No change */
+ if (dictSize == 0) {
+ return windowLog;
+ }
+ assert(windowLog <= ZSTD_WINDOWLOG_MAX);
+ assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
+ {
+ U64 const windowSize = 1ULL << windowLog;
+ U64 const dictAndWindowSize = dictSize + windowSize;
+ /* If the window size is already large enough to fit both the source and the dictionary
+ * then just use the window size. Otherwise adjust so that it fits the dictionary and
+ * the window.
+ */
+ if (windowSize >= dictSize + srcSize) {
+ return windowLog; /* Window size large enough already */
+ } else if (dictAndWindowSize >= maxWindowSize) {
+ return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
+ } else {
+ return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
+ }
+ }
+}
+
+/* ZSTD_adjustCParams_internal() :
+ * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
+ * mostly downsize to reduce memory consumption and initialization latency.
+ * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
+ * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
+ * note : `srcSize==0` means 0!
+ * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
+static ZSTD_compressionParameters
+ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize,
+ ZSTD_cParamMode_e mode)
+{
+ const U64 minSrcSize = 513; /* (1<<9) + 1 */
+ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
+ assert(ZSTD_checkCParams(cPar)==0);
+
+ switch (mode) {
+ case ZSTD_cpm_unknown:
+ case ZSTD_cpm_noAttachDict:
+ /* If we don't know the source size, don't make any
+ * assumptions about it. We will already have selected
+ * smaller parameters if a dictionary is in use.
+ */
+ break;
+ case ZSTD_cpm_createCDict:
+ /* Assume a small source size when creating a dictionary
+ * with an unkown source size.
+ */
+ if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ srcSize = minSrcSize;
+ break;
+ case ZSTD_cpm_attachDict:
+ /* Dictionary has its own dedicated parameters which have
+ * already been selected. We are selecting parameters
+ * for only the source.
+ */
+ dictSize = 0;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ /* resize windowLog if input is small enough, to use less memory */
+ if ( (srcSize < maxWindowResize)
+ && (dictSize < maxWindowResize) ) {
+ U32 const tSize = (U32)(srcSize + dictSize);
+ static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
+ U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
+ ZSTD_highbit32(tSize-1) + 1;
+ if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
+ }
+ if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
+ U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
+ if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
+ if (cycleLog > dictAndWindowLog)
+ cPar.chainLog -= (cycleLog - dictAndWindowLog);
+ }
+
+ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
+ cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
+
+ return cPar;
+}
+
+ZSTD_compressionParameters
+ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize)
+{
+ cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
+ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
+}
+
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+
+static void ZSTD_overrideCParams(
+ ZSTD_compressionParameters* cParams,
+ const ZSTD_compressionParameters* overrides)
+{
+ if (overrides->windowLog) cParams->windowLog = overrides->windowLog;
+ if (overrides->hashLog) cParams->hashLog = overrides->hashLog;
+ if (overrides->chainLog) cParams->chainLog = overrides->chainLog;
+ if (overrides->searchLog) cParams->searchLog = overrides->searchLog;
+ if (overrides->minMatch) cParams->minMatch = overrides->minMatch;
+ if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
+ if (overrides->strategy) cParams->strategy = overrides->strategy;
+}
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ ZSTD_compressionParameters cParams;
+ if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
+ srcSizeHint = CCtxParams->srcSizeHint;
+ }
+ cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
+ if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
+ ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
+ assert(!ZSTD_checkCParams(cParams));
+ /* srcSizeHint == 0 means 0 */
+ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
+}
+
+static size_t
+ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ const U32 forCCtx)
+{
+ size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
+ U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+ /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
+ * surrounded by redzones in ASAN. */
+ size_t const tableSpace = chainSize * sizeof(U32)
+ + hSize * sizeof(U32)
+ + h3Size * sizeof(U32);
+ size_t const optPotentialSpace =
+ ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
+ + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
+ + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
+ + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
+ + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
+ ? optPotentialSpace
+ : 0;
+ DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
+ (U32)chainSize, (U32)hSize, (U32)h3Size);
+ return tableSpace + optSpace;
+}
+
+static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ const ZSTD_compressionParameters* cParams,
+ const ldmParams_t* ldmParams,
+ const int isStatic,
+ const size_t buffInSize,
+ const size_t buffOutSize,
+ const U64 pledgedSrcSize)
+{
+ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ U32 const divider = (cParams->minMatch==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+ + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
+ + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
+ size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
+ size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
+ size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1);
+
+ size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
+ size_t const ldmSeqSpace = ldmParams->enableLdm ?
+ ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
+
+
+ size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
+ + ZSTD_cwksp_alloc_size(buffOutSize);
+
+ size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
+
+ size_t const neededSpace =
+ cctxSpace +
+ entropySpace +
+ blockStateSpace +
+ ldmSpace +
+ ldmSeqSpace +
+ matchStateSize +
+ tokenSpace +
+ bufferSpace;
+
+ DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
+ return neededSpace;
+}
+
+size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+ ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ /* estimateCCtxSize is for one-shot compression. So no buffers should
+ * be needed. However, we still allocate two 0-sized buffers, which can
+ * take space under ASAN. */
+ return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &cParams, &params->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
+ return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
+}
+
+static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
+{
+ int tier = 0;
+ size_t largestSize = 0;
+ static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
+ for (; tier < 4; ++tier) {
+ /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
+ largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
+ }
+ return largestSize;
+}
+
+size_t ZSTD_estimateCCtxSize(int compressionLevel)
+{
+ int level;
+ size_t memBudget = 0;
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
+ /* Ensure monotonically increasing memory usage as compression level increases */
+ size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ { ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+ size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
+ ? ((size_t)1 << cParams.windowLog) + blockSize
+ : 0;
+ size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
+ ? ZSTD_compressBound(blockSize) + 1
+ : 0;
+
+ return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &cParams, &params->ldmParams, 1, inBuffSize, outBuffSize,
+ ZSTD_CONTENTSIZE_UNKNOWN);
+ }
+}
+
+size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
+ return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
+}
+
+static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
+{
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ return ZSTD_estimateCStreamSize_usingCParams(cParams);
+}
+
+size_t ZSTD_estimateCStreamSize(int compressionLevel)
+{
+ int level;
+ size_t memBudget = 0;
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
+ size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+/* ZSTD_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads (non-blocking mode).
+ */
+ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
+{
+ { ZSTD_frameProgression fp;
+ size_t const buffered = (cctx->inBuff == NULL) ? 0 :
+ cctx->inBuffPos - cctx->inToCompress;
+ if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
+ assert(buffered <= ZSTD_BLOCKSIZE_MAX);
+ fp.ingested = cctx->consumedSrcSize + buffered;
+ fp.consumed = cctx->consumedSrcSize;
+ fp.produced = cctx->producedCSize;
+ fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
+ fp.currentJobID = 0;
+ fp.nbActiveWorkers = 0;
+ return fp;
+} }
+
+/*! ZSTD_toFlushNow()
+ * Only useful for multithreading scenarios currently (nbWorkers >= 1).
+ */
+size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
+{
+ (void)cctx;
+ return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
+}
+
+static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
+ ZSTD_compressionParameters cParams2)
+{
+ (void)cParams1;
+ (void)cParams2;
+ assert(cParams1.windowLog == cParams2.windowLog);
+ assert(cParams1.chainLog == cParams2.chainLog);
+ assert(cParams1.hashLog == cParams2.hashLog);
+ assert(cParams1.searchLog == cParams2.searchLog);
+ assert(cParams1.minMatch == cParams2.minMatch);
+ assert(cParams1.targetLength == cParams2.targetLength);
+ assert(cParams1.strategy == cParams2.strategy);
+}
+
+void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
+{
+ int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ bs->rep[i] = repStartValue[i];
+ bs->entropy.huf.repeatMode = HUF_repeat_none;
+ bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
+}
+
+/*! ZSTD_invalidateMatchState()
+ * Invalidate all the matches in the match finder tables.
+ * Requires nextSrc and base to be set (can be NULL).
+ */
+static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
+{
+ ZSTD_window_clear(&ms->window);
+
+ ms->nextToUpdate = ms->window.dictLimit;
+ ms->loadedDictEnd = 0;
+ ms->opt.litLengthSum = 0; /* force reset of btopt stats */
+ ms->dictMatchState = NULL;
+}
+
+/*
+ * Controls, for this matchState reset, whether the tables need to be cleared /
+ * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
+ * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
+ * subsequent operation will overwrite the table space anyways (e.g., copying
+ * the matchState contents in from a CDict).
+ */
+typedef enum {
+ ZSTDcrp_makeClean,
+ ZSTDcrp_leaveDirty
+} ZSTD_compResetPolicy_e;
+
+/*
+ * Controls, for this matchState reset, whether indexing can continue where it
+ * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
+ * (ZSTDirp_reset).
+ */
+typedef enum {
+ ZSTDirp_continue,
+ ZSTDirp_reset
+} ZSTD_indexResetPolicy_e;
+
+typedef enum {
+ ZSTD_resetTarget_CDict,
+ ZSTD_resetTarget_CCtx
+} ZSTD_resetTarget_e;
+
+static size_t
+ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ const ZSTD_compressionParameters* cParams,
+ const ZSTD_compResetPolicy_e crp,
+ const ZSTD_indexResetPolicy_e forceResetIndex,
+ const ZSTD_resetTarget_e forWho)
+{
+ size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
+ U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+
+ DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
+ if (forceResetIndex == ZSTDirp_reset) {
+ ZSTD_window_init(&ms->window);
+ ZSTD_cwksp_mark_tables_dirty(ws);
+ }
+
+ ms->hashLog3 = hashLog3;
+
+ ZSTD_invalidateMatchState(ms);
+
+ assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
+
+ ZSTD_cwksp_clear_tables(ws);
+
+ DEBUGLOG(5, "reserving table space");
+ /* table Space */
+ ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
+ ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
+ ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
+ RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+ "failed a workspace allocation in ZSTD_reset_matchState");
+
+ DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
+ if (crp!=ZSTDcrp_leaveDirty) {
+ /* reset tables only */
+ ZSTD_cwksp_clean_tables(ws);
+ }
+
+ /* opt parser space */
+ if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
+ DEBUGLOG(4, "reserving optimal parser space");
+ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
+ ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
+ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
+ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
+ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
+ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ }
+
+ ms->cParams = *cParams;
+
+ RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+ "failed a workspace allocation in ZSTD_reset_matchState");
+
+ return 0;
+}
+
+/* ZSTD_indexTooCloseToMax() :
+ * minor optimization : prefer memset() rather than reduceIndex()
+ * which is measurably slow in some circumstances (reported for Visual Studio).
+ * Works when re-using a context for a lot of smallish inputs :
+ * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
+ * memset() will be triggered before reduceIndex().
+ */
+#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
+static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
+{
+ return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
+}
+
+/*! ZSTD_resetCCtx_internal() :
+ note : `params` are assumed fully validated at this stage */
+static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+ ZSTD_CCtx_params params,
+ U64 const pledgedSrcSize,
+ ZSTD_compResetPolicy_e const crp,
+ ZSTD_buffered_policy_e const zbuff)
+{
+ ZSTD_cwksp* const ws = &zc->workspace;
+ DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
+ (U32)pledgedSrcSize, params.cParams.windowLog);
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+
+ zc->isFirstBlock = 1;
+
+ if (params.ldmParams.enableLdm) {
+ /* Adjust long distance matching parameters */
+ ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
+ assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
+ assert(params.ldmParams.hashRateLog < 32);
+ }
+
+ { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ U32 const divider = (params.cParams.minMatch==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered)
+ ? ZSTD_compressBound(blockSize) + 1
+ : 0;
+ size_t const buffInSize = (zbuff == ZSTDb_buffered && params.inBufferMode == ZSTD_bm_buffered)
+ ? windowSize + blockSize
+ : 0;
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
+
+ int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
+ ZSTD_indexResetPolicy_e needsIndexReset =
+ (!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset;
+
+ size_t const neededSpace =
+ ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &params.cParams, &params.ldmParams, zc->staticSize != 0,
+ buffInSize, buffOutSize, pledgedSrcSize);
+ FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
+
+ if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
+
+ /* Check if workspace is large enough, alloc a new one if needed */
+ {
+ int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
+ int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
+
+ DEBUGLOG(4, "Need %zu B workspace", neededSpace);
+ DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
+
+ if (workspaceTooSmall || workspaceWasteful) {
+ DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
+ ZSTD_cwksp_sizeof(ws) >> 10,
+ neededSpace >> 10);
+
+ RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
+
+ needsIndexReset = ZSTDirp_reset;
+
+ ZSTD_cwksp_free(ws, zc->customMem);
+ FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
+
+ DEBUGLOG(5, "reserving object space");
+ /* Statically sized space.
+ * entropyWorkspace never moves,
+ * though prev/next block swap places */
+ assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
+ zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+ RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
+ zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+ RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
+ zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
+ RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
+ } }
+
+ ZSTD_cwksp_clear(ws);
+
+ /* init params */
+ zc->appliedParams = params;
+ zc->blockState.matchState.cParams = params.cParams;
+ zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ zc->consumedSrcSize = 0;
+ zc->producedCSize = 0;
+ if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ zc->appliedParams.fParams.contentSizeFlag = 0;
+ DEBUGLOG(4, "pledged content size : %u ; flag : %u",
+ (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
+ zc->blockSize = blockSize;
+
+ xxh64_reset(&zc->xxhState, 0);
+ zc->stage = ZSTDcs_init;
+ zc->dictID = 0;
+ zc->dictContentSize = 0;
+
+ ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
+
+ /* ZSTD_wildcopy() is used to copy into the literals buffer,
+ * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
+ */
+ zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
+ zc->seqStore.maxNbLit = blockSize;
+
+ /* buffers */
+ zc->bufferedPolicy = zbuff;
+ zc->inBuffSize = buffInSize;
+ zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
+ zc->outBuffSize = buffOutSize;
+ zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
+
+ /* ldm bucketOffsets table */
+ if (params.ldmParams.enableLdm) {
+ /* TODO: avoid memset? */
+ size_t const numBuckets =
+ ((size_t)1) << (params.ldmParams.hashLog -
+ params.ldmParams.bucketSizeLog);
+ zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
+ ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
+ }
+
+ /* sequences storage */
+ ZSTD_referenceExternalSequences(zc, NULL, 0);
+ zc->seqStore.maxNbSeq = maxNbSeq;
+ zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
+
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &zc->blockState.matchState,
+ ws,
+ &params.cParams,
+ crp,
+ needsIndexReset,
+ ZSTD_resetTarget_CCtx), "");
+
+ /* ldm hash table */
+ if (params.ldmParams.enableLdm) {
+ /* TODO: avoid memset? */
+ size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
+ zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
+ ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
+ zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
+ zc->maxNbLdmSequences = maxNbLdmSeq;
+
+ ZSTD_window_init(&zc->ldmState.window);
+ ZSTD_window_clear(&zc->ldmState.window);
+ zc->ldmState.loadedDictEnd = 0;
+ }
+
+ /* Due to alignment, when reusing a workspace, we can actually consume
+ * up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
+ */
+ assert(ZSTD_cwksp_used(ws) >= neededSpace &&
+ ZSTD_cwksp_used(ws) <= neededSpace + 3);
+
+ DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
+ zc->initialized = 1;
+
+ return 0;
+ }
+}
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
+ int i;
+ for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
+ assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
+}
+
+/* These are the approximate sizes for each strategy past which copying the
+ * dictionary tables into the working context is faster than using them
+ * in-place.
+ */
+static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
+ 8 KB, /* unused */
+ 8 KB, /* ZSTD_fast */
+ 16 KB, /* ZSTD_dfast */
+ 32 KB, /* ZSTD_greedy */
+ 32 KB, /* ZSTD_lazy */
+ 32 KB, /* ZSTD_lazy2 */
+ 32 KB, /* ZSTD_btlazy2 */
+ 32 KB, /* ZSTD_btopt */
+ 8 KB, /* ZSTD_btultra */
+ 8 KB /* ZSTD_btultra2 */
+};
+
+static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ U64 pledgedSrcSize)
+{
+ size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
+ int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
+ return dedicatedDictSearch
+ || ( ( pledgedSrcSize <= cutoff
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || params->attachDictPref == ZSTD_dictForceAttach )
+ && params->attachDictPref != ZSTD_dictForceCopy
+ && !params->forceWindow ); /* dictMatchState isn't correctly
+ * handled in _enforceMaxDist */
+}
+
+static size_t
+ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ {
+ ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
+ unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
+ /* Resize working context table params for input only, since the dict
+ * has its own tables. */
+ /* pledgedSrcSize == 0 means 0! */
+
+ if (cdict->matchState.dedicatedDictSearch) {
+ ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
+ }
+
+ params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
+ cdict->dictContentSize, ZSTD_cpm_attachDict);
+ params.cParams.windowLog = windowLog;
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ ZSTDcrp_makeClean, zbuff), "");
+ assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
+ }
+
+ { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
+ - cdict->matchState.window.base);
+ const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
+ if (cdictLen == 0) {
+ /* don't even attach dictionaries with no contents */
+ DEBUGLOG(4, "skipping attaching empty dictionary");
+ } else {
+ DEBUGLOG(4, "attaching dictionary into context");
+ cctx->blockState.matchState.dictMatchState = &cdict->matchState;
+
+ /* prep working match state so dict matches never have negative indices
+ * when they are translated to the working context's index space. */
+ if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
+ cctx->blockState.matchState.window.nextSrc =
+ cctx->blockState.matchState.window.base + cdictEnd;
+ ZSTD_window_clear(&cctx->blockState.matchState.window);
+ }
+ /* loadedDictEnd is expressed within the referential of the active context */
+ cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
+ } }
+
+ cctx->dictID = cdict->dictID;
+ cctx->dictContentSize = cdict->dictContentSize;
+
+ /* copy block state */
+ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+ return 0;
+}
+
+static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+
+ assert(!cdict->matchState.dedicatedDictSearch);
+
+ DEBUGLOG(4, "copying dictionary into context");
+
+ { unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
+ /* Copy only compression parameters related to tables. */
+ params.cParams = *cdict_cParams;
+ params.cParams.windowLog = windowLog;
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ ZSTDcrp_leaveDirty, zbuff), "");
+ assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+ assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
+ assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
+ }
+
+ ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
+
+ /* copy tables */
+ { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
+ size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
+
+ ZSTD_memcpy(cctx->blockState.matchState.hashTable,
+ cdict->matchState.hashTable,
+ hSize * sizeof(U32));
+ ZSTD_memcpy(cctx->blockState.matchState.chainTable,
+ cdict->matchState.chainTable,
+ chainSize * sizeof(U32));
+ }
+
+ /* Zero the hashTable3, since the cdict never fills it */
+ { int const h3log = cctx->blockState.matchState.hashLog3;
+ size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
+ assert(cdict->matchState.hashLog3 == 0);
+ ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
+ }
+
+ ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
+
+ /* copy dictionary offsets */
+ { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
+ ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ }
+
+ cctx->dictID = cdict->dictID;
+ cctx->dictContentSize = cdict->dictContentSize;
+
+ /* copy block state */
+ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+ return 0;
+}
+
+/* We have a choice between copying the dictionary context into the working
+ * context, or referencing the dictionary context from the working context
+ * in-place. We decide here which strategy to use. */
+static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+
+ DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
+ (unsigned)pledgedSrcSize);
+
+ if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
+ return ZSTD_resetCCtx_byAttachingCDict(
+ cctx, cdict, *params, pledgedSrcSize, zbuff);
+ } else {
+ return ZSTD_resetCCtx_byCopyingCDict(
+ cctx, cdict, *params, pledgedSrcSize, zbuff);
+ }
+}
+
+/*! ZSTD_copyCCtx_internal() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * The "context", in this case, refers to the hash and chain tables,
+ * entropy tables, and dictionary references.
+ * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
+ * @return : 0, or an error code */
+static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
+ const ZSTD_CCtx* srcCCtx,
+ ZSTD_frameParameters fParams,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ DEBUGLOG(5, "ZSTD_copyCCtx_internal");
+ RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
+ "Can't copy a ctx that's not in init stage.");
+
+ ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
+ { ZSTD_CCtx_params params = dstCCtx->requestedParams;
+ /* Copy only compression parameters related to tables. */
+ params.cParams = srcCCtx->appliedParams.cParams;
+ params.fParams = fParams;
+ ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
+ ZSTDcrp_leaveDirty, zbuff);
+ assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
+ assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
+ assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
+ assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
+ assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
+ }
+
+ ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
+
+ /* copy tables */
+ { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
+ size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
+ int const h3log = srcCCtx->blockState.matchState.hashLog3;
+ size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
+
+ ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
+ srcCCtx->blockState.matchState.hashTable,
+ hSize * sizeof(U32));
+ ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
+ srcCCtx->blockState.matchState.chainTable,
+ chainSize * sizeof(U32));
+ ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
+ srcCCtx->blockState.matchState.hashTable3,
+ h3Size * sizeof(U32));
+ }
+
+ ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
+
+ /* copy dictionary offsets */
+ {
+ const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
+ ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ }
+ dstCCtx->dictID = srcCCtx->dictID;
+ dstCCtx->dictContentSize = srcCCtx->dictContentSize;
+
+ /* copy block state */
+ ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
+
+ return 0;
+}
+
+/*! ZSTD_copyCCtx() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * pledgedSrcSize==0 means "unknown".
+* @return : 0, or an error code */
+size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
+{
+ ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
+ ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
+ if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
+
+ return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
+ fParams, pledgedSrcSize,
+ zbuff);
+}
+
+
+#define ZSTD_ROWSIZE 16
+/*! ZSTD_reduceTable() :
+ * reduce table indexes by `reducerValue`, or squash to zero.
+ * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
+ * It must be set to a clear 0/1 value, to remove branch during inlining.
+ * Presume table size is a multiple of ZSTD_ROWSIZE
+ * to help auto-vectorization */
+FORCE_INLINE_TEMPLATE void
+ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
+{
+ int const nbRows = (int)size / ZSTD_ROWSIZE;
+ int cellNb = 0;
+ int rowNb;
+ assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
+ assert(size < (1U<<31)); /* can be casted to int */
+
+
+ for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
+ int column;
+ for (column=0; column<ZSTD_ROWSIZE; column++) {
+ if (preserveMark) {
+ U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
+ table[cellNb] += adder;
+ }
+ if (table[cellNb] < reducerValue) table[cellNb] = 0;
+ else table[cellNb] -= reducerValue;
+ cellNb++;
+ } }
+}
+
+static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 0);
+}
+
+static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 1);
+}
+
+/*! ZSTD_reduceIndex() :
+* rescale all indexes to avoid future overflow (indexes are U32) */
+static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
+{
+ { U32 const hSize = (U32)1 << params->cParams.hashLog;
+ ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
+ }
+
+ if (params->cParams.strategy != ZSTD_fast) {
+ U32 const chainSize = (U32)1 << params->cParams.chainLog;
+ if (params->cParams.strategy == ZSTD_btlazy2)
+ ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
+ else
+ ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
+ }
+
+ if (ms->hashLog3) {
+ U32 const h3Size = (U32)1 << ms->hashLog3;
+ ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
+ }
+}
+
+
+/*-*******************************************************
+* Block entropic compression
+*********************************************************/
+
+/* See doc/zstd_compression_format.md for detailed format description */
+
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+{
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
+ BYTE* const llCodeTable = seqStorePtr->llCode;
+ BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ U32 u;
+ assert(nbSeq <= seqStorePtr->maxNbSeq);
+ for (u=0; u<nbSeq; u++) {
+ U32 const llv = sequences[u].litLength;
+ U32 const mlv = sequences[u].matchLength;
+ llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
+ ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
+ mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
+ }
+ if (seqStorePtr->longLengthID==1)
+ llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
+ if (seqStorePtr->longLengthID==2)
+ mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+}
+
+/* ZSTD_useTargetCBlockSize():
+ * Returns if target compressed block size param is being used.
+ * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
+ * Returns 1 if true, 0 otherwise. */
+static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
+{
+ DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
+ return (cctxParams->targetCBlockSize != 0);
+}
+
+/* ZSTD_entropyCompressSequences_internal():
+ * actually compresses both literals and sequences */
+MEM_STATIC size_t
+ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ const int bmi2)
+{
+ const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ unsigned* count = (unsigned*)entropyWorkspace;
+ FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
+ U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ BYTE* seqHead;
+ BYTE* lastNCount = NULL;
+
+ entropyWorkspace = count + (MaxSeq + 1);
+ entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
+
+ DEBUGLOG(4, "ZSTD_entropyCompressSequences_internal (nbSeq=%zu)", nbSeq);
+ ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
+
+ /* Compress literals */
+ { const BYTE* const literals = seqStorePtr->litStart;
+ size_t const litSize = (size_t)(seqStorePtr->lit - literals);
+ size_t const cSize = ZSTD_compressLiterals(
+ &prevEntropy->huf, &nextEntropy->huf,
+ cctxParams->cParams.strategy,
+ ZSTD_disableLiteralsCompression(cctxParams),
+ op, dstCapacity,
+ literals, litSize,
+ entropyWorkspace, entropyWkspSize,
+ bmi2);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
+ assert(cSize <= dstCapacity);
+ op += cSize;
+ }
+
+ /* Sequences Header */
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+ dstSize_tooSmall, "Can't fit seq hdr in output buf!");
+ if (nbSeq < 128) {
+ *op++ = (BYTE)nbSeq;
+ } else if (nbSeq < LONGNBSEQ) {
+ op[0] = (BYTE)((nbSeq>>8) + 0x80);
+ op[1] = (BYTE)nbSeq;
+ op+=2;
+ } else {
+ op[0]=0xFF;
+ MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
+ op+=3;
+ }
+ assert(op <= oend);
+ if (nbSeq==0) {
+ /* Copy the old tables over as if we repeated them */
+ ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
+ return (size_t)(op - ostart);
+ }
+
+ /* seqHead : flags for FSE encoding type */
+ seqHead = op++;
+ assert(op <= oend);
+
+ /* convert length/distances into codes */
+ ZSTD_seqToCodes(seqStorePtr);
+ /* build CTable for Literal Lengths */
+ { unsigned max = MaxLL;
+ size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building LL table");
+ nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
+ LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
+ count, max, mostFrequent, nbSeq,
+ LLFSELog, prevEntropy->fse.litlengthCTable,
+ LL_defaultNorm, LL_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(set_basic < set_compressed && set_rle < set_compressed);
+ assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
+ count, max, llCodeTable, nbSeq,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ prevEntropy->fse.litlengthCTable,
+ sizeof(prevEntropy->fse.litlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
+ if (LLtype == set_compressed)
+ lastNCount = op;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for Offsets */
+ { unsigned max = MaxOff;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
+ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ DEBUGLOG(5, "Building OF table");
+ nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
+ Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
+ count, max, mostFrequent, nbSeq,
+ OffFSELog, prevEntropy->fse.offcodeCTable,
+ OF_defaultNorm, OF_defaultNormLog,
+ defaultPolicy, strategy);
+ assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
+ count, max, ofCodeTable, nbSeq,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ prevEntropy->fse.offcodeCTable,
+ sizeof(prevEntropy->fse.offcodeCTable),
+ entropyWorkspace, entropyWkspSize);
+ FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
+ if (Offtype == set_compressed)
+ lastNCount = op;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for MatchLengths */
+ { unsigned max = MaxML;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
+ nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
+ MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
+ count, max, mostFrequent, nbSeq,
+ MLFSELog, prevEntropy->fse.matchlengthCTable,
+ ML_defaultNorm, ML_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
+ count, max, mlCodeTable, nbSeq,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ prevEntropy->fse.matchlengthCTable,
+ sizeof(prevEntropy->fse.matchlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
+ if (MLtype == set_compressed)
+ lastNCount = op;
+ op += countSize;
+ assert(op <= oend);
+ } }
+
+ *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+
+ { size_t const bitstreamSize = ZSTD_encodeSequences(
+ op, (size_t)(oend - op),
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq,
+ longOffsets, bmi2);
+ FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
+ op += bitstreamSize;
+ assert(op <= oend);
+ /* zstd versions <= 1.3.4 mistakenly report corruption when
+ * FSE_readNCount() receives a buffer < 4 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1146.
+ * This can happen when the last set_compressed table present is 2
+ * bytes and the bitstream is only one byte.
+ * In this exceedingly rare case, we will simply emit an uncompressed
+ * block, since it isn't worth optimizing.
+ */
+ if (lastNCount && (op - lastNCount) < 4) {
+ /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(op - lastNCount == 3);
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+ "emitting an uncompressed block.");
+ return 0;
+ }
+ }
+
+ DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
+ return (size_t)(op - ostart);
+}
+
+MEM_STATIC size_t
+ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ size_t srcSize,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ int bmi2)
+{
+ size_t const cSize = ZSTD_entropyCompressSequences_internal(
+ seqStorePtr, prevEntropy, nextEntropy, cctxParams,
+ dst, dstCapacity,
+ entropyWorkspace, entropyWkspSize, bmi2);
+ if (cSize == 0) return 0;
+ /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
+ * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
+ */
+ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
+ return 0; /* block not compressed */
+ FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSequences_internal failed");
+
+ /* Check compressibility */
+ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
+ if (cSize >= maxCSize) return 0; /* block not compressed */
+ }
+ DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize);
+ return cSize;
+}
+
+/* ZSTD_selectBlockCompressor() :
+ * Not static, but internal use only (used by long distance matcher)
+ * assumption : strat is a valid strategy */
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
+{
+ static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
+ { ZSTD_compressBlock_fast /* default for 0 */,
+ ZSTD_compressBlock_fast,
+ ZSTD_compressBlock_doubleFast,
+ ZSTD_compressBlock_greedy,
+ ZSTD_compressBlock_lazy,
+ ZSTD_compressBlock_lazy2,
+ ZSTD_compressBlock_btlazy2,
+ ZSTD_compressBlock_btopt,
+ ZSTD_compressBlock_btultra,
+ ZSTD_compressBlock_btultra2 },
+ { ZSTD_compressBlock_fast_extDict /* default for 0 */,
+ ZSTD_compressBlock_fast_extDict,
+ ZSTD_compressBlock_doubleFast_extDict,
+ ZSTD_compressBlock_greedy_extDict,
+ ZSTD_compressBlock_lazy_extDict,
+ ZSTD_compressBlock_lazy2_extDict,
+ ZSTD_compressBlock_btlazy2_extDict,
+ ZSTD_compressBlock_btopt_extDict,
+ ZSTD_compressBlock_btultra_extDict,
+ ZSTD_compressBlock_btultra_extDict },
+ { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
+ ZSTD_compressBlock_fast_dictMatchState,
+ ZSTD_compressBlock_doubleFast_dictMatchState,
+ ZSTD_compressBlock_greedy_dictMatchState,
+ ZSTD_compressBlock_lazy_dictMatchState,
+ ZSTD_compressBlock_lazy2_dictMatchState,
+ ZSTD_compressBlock_btlazy2_dictMatchState,
+ ZSTD_compressBlock_btopt_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState },
+ { NULL /* default for 0 */,
+ NULL,
+ NULL,
+ ZSTD_compressBlock_greedy_dedicatedDictSearch,
+ ZSTD_compressBlock_lazy_dedicatedDictSearch,
+ ZSTD_compressBlock_lazy2_dedicatedDictSearch,
+ NULL,
+ NULL,
+ NULL,
+ NULL }
+ };
+ ZSTD_blockCompressor selectedCompressor;
+ ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
+
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+ assert(selectedCompressor != NULL);
+ return selectedCompressor;
+}
+
+static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+ const BYTE* anchor, size_t lastLLSize)
+{
+ ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+}
+
+void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+{
+ ssPtr->lit = ssPtr->litStart;
+ ssPtr->sequences = ssPtr->sequencesStart;
+ ssPtr->longLengthID = 0;
+}
+
+typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
+
+static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
+{
+ ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+ /* Assert that we have correctly flushed the ctx params into the ms's copy */
+ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
+ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
+ ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
+ } else {
+ ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
+ }
+ return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
+ }
+ ZSTD_resetSeqStore(&(zc->seqStore));
+ /* required for optimal parser to read stats from dictionary */
+ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
+ /* tell the optimal parser how we expect to compress literals */
+ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
+ /* a gap between an attached dict and the current window is not safe,
+ * they must remain adjacent,
+ * and when that stops being the case, the dict must be unset */
+ assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
+
+ /* limited update after a very long match */
+ { const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const U32 curr = (U32)(istart-base);
+ if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
+ if (curr > ms->nextToUpdate + 384)
+ ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
+ }
+
+ /* select and store sequences */
+ { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
+ size_t lastLLSize;
+ { int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
+ }
+ if (zc->externSeqStore.pos < zc->externSeqStore.size) {
+ assert(!zc->appliedParams.ldmParams.enableLdm);
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&zc->externSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
+ src, srcSize);
+ assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
+ } else if (zc->appliedParams.ldmParams.enableLdm) {
+ rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
+
+ ldmSeqStore.seq = zc->ldmSequences;
+ ldmSeqStore.capacity = zc->maxNbLdmSequences;
+ /* Updates ldmSeqStore.size */
+ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
+ &zc->appliedParams.ldmParams,
+ src, srcSize), "");
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&ldmSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
+ src, srcSize);
+ assert(ldmSeqStore.pos == ldmSeqStore.size);
+ } else { /* not long range mode */
+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
+ ms->ldmSeqStore = NULL;
+ lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
+ }
+ { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
+ ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
+ } }
+ return ZSTDbss_compress;
+}
+
+static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
+{
+ const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
+ const seqDef* seqStoreSeqs = seqStore->sequencesStart;
+ size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
+ size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
+ size_t literalsRead = 0;
+ size_t lastLLSize;
+
+ ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
+ size_t i;
+ repcodes_t updatedRepcodes;
+
+ assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
+ /* Ensure we have enough space for last literals "sequence" */
+ assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
+ ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ for (i = 0; i < seqStoreSeqSize; ++i) {
+ U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
+ outSeqs[i].litLength = seqStoreSeqs[i].litLength;
+ outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
+ outSeqs[i].rep = 0;
+
+ if (i == seqStore->longLengthPos) {
+ if (seqStore->longLengthID == 1) {
+ outSeqs[i].litLength += 0x10000;
+ } else if (seqStore->longLengthID == 2) {
+ outSeqs[i].matchLength += 0x10000;
+ }
+ }
+
+ if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
+ /* Derive the correct offset corresponding to a repcode */
+ outSeqs[i].rep = seqStoreSeqs[i].offset;
+ if (outSeqs[i].litLength != 0) {
+ rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
+ } else {
+ if (outSeqs[i].rep == 3) {
+ rawOffset = updatedRepcodes.rep[0] - 1;
+ } else {
+ rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
+ }
+ }
+ }
+ outSeqs[i].offset = rawOffset;
+ /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
+ so we provide seqStoreSeqs[i].offset - 1 */
+ updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
+ seqStoreSeqs[i].offset - 1,
+ seqStoreSeqs[i].litLength == 0);
+ literalsRead += outSeqs[i].litLength;
+ }
+ /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
+ * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
+ * for the block boundary, according to the API.
+ */
+ assert(seqStoreLiteralsSize >= literalsRead);
+ lastLLSize = seqStoreLiteralsSize - literalsRead;
+ outSeqs[i].litLength = (U32)lastLLSize;
+ outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
+ seqStoreSeqSize++;
+ zc->seqCollector.seqIndex += seqStoreSeqSize;
+}
+
+size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ size_t outSeqsSize, const void* src, size_t srcSize)
+{
+ const size_t dstCapacity = ZSTD_compressBound(srcSize);
+ void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
+ SeqCollector seqCollector;
+
+ RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
+
+ seqCollector.collectSequences = 1;
+ seqCollector.seqStart = outSeqs;
+ seqCollector.seqIndex = 0;
+ seqCollector.maxSequences = outSeqsSize;
+ zc->seqCollector = seqCollector;
+
+ ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
+ ZSTD_customFree(dst, ZSTD_defaultCMem);
+ return zc->seqCollector.seqIndex;
+}
+
+size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
+ size_t in = 0;
+ size_t out = 0;
+ for (; in < seqsSize; ++in) {
+ if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
+ if (in != seqsSize - 1) {
+ sequences[in+1].litLength += sequences[in].litLength;
+ }
+ } else {
+ sequences[out] = sequences[in];
+ ++out;
+ }
+ }
+ return out;
+}
+
+/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
+static int ZSTD_isRLE(const BYTE* src, size_t length) {
+ const BYTE* ip = src;
+ const BYTE value = ip[0];
+ const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
+ const size_t unrollSize = sizeof(size_t) * 4;
+ const size_t unrollMask = unrollSize - 1;
+ const size_t prefixLength = length & unrollMask;
+ size_t i;
+ size_t u;
+ if (length == 1) return 1;
+ /* Check if prefix is RLE first before using unrolled loop */
+ if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
+ return 0;
+ }
+ for (i = prefixLength; i != length; i += unrollSize) {
+ for (u = 0; u < unrollSize; u += sizeof(size_t)) {
+ if (MEM_readST(ip + i + u) != valueST) {
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+/* Returns true if the given block may be RLE.
+ * This is just a heuristic based on the compressibility.
+ * It may return both false positives and false negatives.
+ */
+static int ZSTD_maybeRLE(seqStore_t const* seqStore)
+{
+ size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
+ size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
+
+ return nbSeqs < 4 && nbLits < 10;
+}
+
+static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
+{
+ ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
+ zc->blockState.prevCBlock = zc->blockState.nextCBlock;
+ zc->blockState.nextCBlock = tmp;
+}
+
+static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, U32 frame)
+{
+ /* This the upper bound for the length of an rle block.
+ * This isn't the actual upper bound. Finding the real threshold
+ * needs further investigation.
+ */
+ const U32 rleMaxLength = 25;
+ size_t cSize;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
+ (unsigned)zc->blockState.matchState.nextToUpdate);
+
+ { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+ if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
+ }
+
+ if (zc->seqCollector.collectSequences) {
+ ZSTD_copyBlockSequences(zc);
+ ZSTD_confirmRepcodesAndEntropyTables(zc);
+ return 0;
+ }
+
+ /* encode sequences and literals */
+ cSize = ZSTD_entropyCompressSequences(&zc->seqStore,
+ &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ dst, dstCapacity,
+ srcSize,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->bmi2);
+
+ if (zc->seqCollector.collectSequences) {
+ ZSTD_copyBlockSequences(zc);
+ return 0;
+ }
+
+
+ if (frame &&
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ !zc->isFirstBlock &&
+ cSize < rleMaxLength &&
+ ZSTD_isRLE(ip, srcSize))
+ {
+ cSize = 1;
+ op[0] = ip[0];
+ }
+
+out:
+ if (!ZSTD_isError(cSize) && cSize > 1) {
+ ZSTD_confirmRepcodesAndEntropyTables(zc);
+ }
+ /* We check that dictionaries have offset codes available for the first
+ * block. After the first block, the offcode table might not have large
+ * enough codes to represent the offsets in the data.
+ */
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const size_t bss, U32 lastBlock)
+{
+ DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
+ if (bss == ZSTDbss_compress) {
+ if (/* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ !zc->isFirstBlock &&
+ ZSTD_maybeRLE(&zc->seqStore) &&
+ ZSTD_isRLE((BYTE const*)src, srcSize))
+ {
+ return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
+ }
+ /* Attempt superblock compression.
+ *
+ * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
+ * standard ZSTD_compressBound(). This is a problem, because even if we have
+ * space now, taking an extra byte now could cause us to run out of space later
+ * and violate ZSTD_compressBound().
+ *
+ * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
+ *
+ * In order to respect ZSTD_compressBound() we must attempt to emit a raw
+ * uncompressed block in these cases:
+ * * cSize == 0: Return code for an uncompressed block.
+ * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
+ * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
+ * output space.
+ * * cSize >= blockBound(srcSize): We have expanded the block too much so
+ * emit an uncompressed block.
+ */
+ {
+ size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
+ if (cSize != ERROR(dstSize_tooSmall)) {
+ size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
+ if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
+ ZSTD_confirmRepcodesAndEntropyTables(zc);
+ return cSize;
+ }
+ }
+ }
+ }
+
+ DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
+ /* Superblock compression failed, attempt to emit a single no compress block.
+ * The decoder will be able to stream this block since it is uncompressed.
+ */
+ return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
+}
+
+static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastBlock)
+{
+ size_t cSize = 0;
+ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+
+ cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
+
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ void const* ip,
+ void const* iend)
+{
+ if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
+ U32 const maxDist = (U32)1 << params->cParams.windowLog;
+ U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
+ U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
+ ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+ ZSTD_cwksp_mark_tables_dirty(ws);
+ ZSTD_reduceIndex(ms, params, correction);
+ ZSTD_cwksp_mark_tables_clean(ws);
+ if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+ else ms->nextToUpdate -= correction;
+ /* invalidate dictionaries on overflow correction */
+ ms->loadedDictEnd = 0;
+ ms->dictMatchState = NULL;
+ }
+}
+
+/*! ZSTD_compress_frameChunk() :
+* Compress a chunk of data into one or multiple blocks.
+* All blocks will be terminated, all input will be consumed.
+* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
+* Frame is supposed already started (header already produced)
+* @return : compressed size, or an error code
+*/
+static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastFrameChunk)
+{
+ size_t blockSize = cctx->blockSize;
+ size_t remaining = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+ U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
+
+ assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
+
+ DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
+ if (cctx->appliedParams.fParams.checksumFlag && srcSize)
+ xxh64_update(&cctx->xxhState, src, srcSize);
+
+ while (remaining) {
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
+ dstSize_tooSmall,
+ "not enough space to store compressed block");
+ if (remaining < blockSize) blockSize = remaining;
+
+ ZSTD_overflowCorrectIfNeeded(
+ ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
+ ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
+
+ /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
+ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
+
+ { size_t cSize;
+ if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
+ cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
+ assert(cSize > 0);
+ assert(cSize <= blockSize + ZSTD_blockHeaderSize);
+ } else {
+ cSize = ZSTD_compressBlock_internal(cctx,
+ op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
+ ip, blockSize, 1 /* frame */);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
+
+ if (cSize == 0) { /* block is not compressible */
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ } else {
+ U32 const cBlockHeader = cSize == 1 ?
+ lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
+ lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cSize += ZSTD_blockHeaderSize;
+ }
+ }
+
+
+ ip += blockSize;
+ assert(remaining >= blockSize);
+ remaining -= blockSize;
+ op += cSize;
+ assert(dstCapacity >= cSize);
+ dstCapacity -= cSize;
+ cctx->isFirstBlock = 0;
+ DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
+ (unsigned)cSize);
+ } }
+
+ if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
+ return (size_t)(op-ostart);
+}
+
+
+static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
+ const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
+{ BYTE* const op = (BYTE*)dst;
+ U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
+ U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
+ U32 const checksumFlag = params->fParams.checksumFlag>0;
+ U32 const windowSize = (U32)1 << params->cParams.windowLog;
+ U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
+ BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
+ U32 const fcsCode = params->fParams.contentSizeFlag ?
+ (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
+ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+ size_t pos=0;
+
+ assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
+ RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
+ "dst buf is too small to fit worst-case frame header size.");
+ DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
+ !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
+ if (params->format == ZSTD_f_zstd1) {
+ MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
+ pos = 4;
+ }
+ op[pos++] = frameHeaderDescriptionByte;
+ if (!singleSegment) op[pos++] = windowLogByte;
+ switch(dictIDSizeCode)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : break;
+ case 1 : op[pos] = (BYTE)(dictID); pos++; break;
+ case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
+ case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
+ }
+ switch(fcsCode)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
+ case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
+ case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
+ case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
+ }
+ return pos;
+}
+
+/* ZSTD_writeSkippableFrame_advanced() :
+ * Writes out a skippable frame with the specified magic number variant (16 are supported),
+ * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
+ *
+ * Returns the total number of bytes written, or a ZSTD error code.
+ */
+size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, unsigned magicVariant) {
+ BYTE* op = (BYTE*)dst;
+ RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
+ dstSize_tooSmall, "Not enough room for skippable frame");
+ RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
+ RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
+
+ MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
+ MEM_writeLE32(op+4, (U32)srcSize);
+ ZSTD_memcpy(op+8, src, srcSize);
+ return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
+}
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
+{
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
+ "dst buf is too small to write frame trailer empty block.");
+ { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
+ MEM_writeLE24(dst, cBlockHeader24);
+ return ZSTD_blockHeaderSize;
+ }
+}
+
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
+{
+ RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
+ "wrong cctx stage");
+ RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
+ parameter_unsupported,
+ "incompatible with ldm");
+ cctx->externSeqStore.seq = seq;
+ cctx->externSeqStore.size = nbSeq;
+ cctx->externSeqStore.capacity = nbSeq;
+ cctx->externSeqStore.pos = 0;
+ cctx->externSeqStore.posInSequence = 0;
+ return 0;
+}
+
+
+static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 frame, U32 lastFrameChunk)
+{
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ size_t fhSize = 0;
+
+ DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
+ cctx->stage, (unsigned)srcSize);
+ RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
+ "missing init (ZSTD_compressBegin)");
+
+ if (frame && (cctx->stage==ZSTDcs_init)) {
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
+ cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
+ FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
+ assert(fhSize <= dstCapacity);
+ dstCapacity -= fhSize;
+ dst = (char*)dst + fhSize;
+ cctx->stage = ZSTDcs_ongoing;
+ }
+
+ if (!srcSize) return fhSize; /* do not generate an empty block if no input */
+
+ if (!ZSTD_window_update(&ms->window, src, srcSize)) {
+ ms->nextToUpdate = ms->window.dictLimit;
+ }
+ if (cctx->appliedParams.ldmParams.enableLdm) {
+ ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
+ }
+
+ if (!frame) {
+ /* overflow check and correction for block mode */
+ ZSTD_overflowCorrectIfNeeded(
+ ms, &cctx->workspace, &cctx->appliedParams,
+ src, (BYTE const*)src + srcSize);
+ }
+
+ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
+ { size_t const cSize = frame ?
+ ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
+ ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
+ FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
+ cctx->consumedSrcSize += srcSize;
+ cctx->producedCSize += (cSize + fhSize);
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
+ RETURN_ERROR_IF(
+ cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize >= %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
+ }
+ return cSize + fhSize;
+ }
+}
+
+size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
+}
+
+
+size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+{
+ ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
+ assert(!ZSTD_checkCParams(cParams));
+ return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
+}
+
+size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
+ { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
+ RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
+
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
+}
+
+/*! ZSTD_loadDictionaryContent() :
+ * @return : 0, or an error code
+ */
+static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+ ldmState_t* ls,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ const void* src, size_t srcSize,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+
+ ZSTD_window_update(&ms->window, src, srcSize);
+ ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+
+ if (params->ldmParams.enableLdm && ls != NULL) {
+ ZSTD_window_update(&ls->window, src, srcSize);
+ ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
+ }
+
+ /* Assert that we the ms params match the params we're being given */
+ ZSTD_assertEqualCParams(params->cParams, ms->cParams);
+
+ if (srcSize <= HASH_READ_SIZE) return 0;
+
+ while (iend - ip > HASH_READ_SIZE) {
+ size_t const remaining = (size_t)(iend - ip);
+ size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
+ const BYTE* const ichunk = ip + chunk;
+
+ ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
+
+ if (params->ldmParams.enableLdm && ls != NULL)
+ ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, &params->ldmParams);
+
+ switch(params->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, ichunk, dtlm);
+ break;
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
+ assert(chunk == remaining); /* must load everything in one go */
+ ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
+ } else if (chunk >= HASH_READ_SIZE) {
+ ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
+ }
+ break;
+
+ case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ if (chunk >= HASH_READ_SIZE)
+ ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
+ break;
+
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ ip = ichunk;
+ }
+
+ ms->nextToUpdate = (U32)(iend - ms->window.base);
+ return 0;
+}
+
+
+/* Dictionaries that assign zero probability to symbols that show up causes problems
+ * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
+ * and only dictionaries with 100% valid symbols can be assumed valid.
+ */
+static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
+{
+ U32 s;
+ if (dictMaxSymbolValue < maxSymbolValue) {
+ return FSE_repeat_check;
+ }
+ for (s = 0; s <= maxSymbolValue; ++s) {
+ if (normalizedCounter[s] == 0) {
+ return FSE_repeat_check;
+ }
+ }
+ return FSE_repeat_valid;
+}
+
+size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
+ const void* const dict, size_t dictSize)
+{
+ short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue = MaxOff;
+ const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
+ const BYTE* const dictEnd = dictPtr + dictSize;
+ dictPtr += 8;
+ bs->entropy.huf.repeatMode = HUF_repeat_check;
+
+ { unsigned maxSymbolValue = 255;
+ unsigned hasZeroWeights = 1;
+ size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
+ dictEnd-dictPtr, &hasZeroWeights);
+
+ /* We only set the loaded table as valid if it contains all non-zero
+ * weights. Otherwise, we set it to check */
+ if (!hasZeroWeights)
+ bs->entropy.huf.repeatMode = HUF_repeat_valid;
+
+ RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
+ dictPtr += hufHeaderSize;
+ }
+
+ { unsigned offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
+ /* fill all offset symbols to avoid garbage at end of table */
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.offcodeCTable,
+ offcodeNCount, MaxOff, offcodeLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted, "");
+ /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.matchlengthCTable,
+ matchlengthNCount, matchlengthMaxValue, matchlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted, "");
+ bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.litlengthCTable,
+ litlengthNCount, litlengthMaxValue, litlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted, "");
+ bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
+ dictPtr += litlengthHeaderSize;
+ }
+
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
+ bs->rep[0] = MEM_readLE32(dictPtr+0);
+ bs->rep[1] = MEM_readLE32(dictPtr+4);
+ bs->rep[2] = MEM_readLE32(dictPtr+8);
+ dictPtr += 12;
+
+ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+ U32 offcodeMax = MaxOff;
+ if (dictContentSize <= ((U32)-1) - 128 KB) {
+ U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
+ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
+ }
+ /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
+ bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
+
+ /* All repCodes must be <= dictContentSize and != 0 */
+ { U32 u;
+ for (u=0; u<3; u++) {
+ RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
+ RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
+ } } }
+
+ return dictPtr - (const BYTE*)dict;
+}
+
+/* Dictionary format :
+ * See :
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
+ */
+/*! ZSTD_loadZstdDictionary() :
+ * @return : dictID, or an error code
+ * assumptions : magic number supposed already checked
+ * dictSize supposed >= 8
+ */
+static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+ size_t dictID;
+ size_t eSize;
+
+ ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ assert(dictSize >= 8);
+ assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
+
+ dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
+ eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
+ FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
+ dictPtr += eSize;
+
+ {
+ size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+ FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
+ ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
+ }
+ return dictID;
+}
+
+/* ZSTD_compress_insertDictionary() :
+* @return : dictID, or an error code */
+static size_t
+ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ ldmState_t* ls,
+ ZSTD_cwksp* ws,
+ const ZSTD_CCtx_params* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
+{
+ DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
+ if ((dict==NULL) || (dictSize<8)) {
+ RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
+ return 0;
+ }
+
+ ZSTD_reset_compressedBlockState(bs);
+
+ /* dict restricted modes */
+ if (dictContentType == ZSTD_dct_rawContent)
+ return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
+
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_auto) {
+ DEBUGLOG(4, "raw content dictionary detected");
+ return ZSTD_loadDictionaryContent(
+ ms, ls, ws, params, dict, dictSize, dtlm);
+ }
+ RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
+ assert(0); /* impossible */
+ }
+
+ /* dict as full zstd dictionary */
+ return ZSTD_loadZstdDictionary(
+ bs, ms, ws, params, dict, dictSize, dtlm, workspace);
+}
+
+#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
+#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
+
+/*! ZSTD_compressBegin_internal() :
+ * @return : 0, or an error code */
+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
+ /* params are supposed to be fully validated at this point */
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ if ( (cdict)
+ && (cdict->dictContentSize > 0)
+ && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+ || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || cdict->compressionLevel == 0)
+ && (params->attachDictPref != ZSTD_dictForceLoad) ) {
+ return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
+ }
+
+ FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
+ ZSTDcrp_makeClean, zbuff) , "");
+ { size_t const dictID = cdict ?
+ ZSTD_compress_insertDictionary(
+ cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+ &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
+ cdict->dictContentSize, cdict->dictContentType, dtlm,
+ cctx->entropyWorkspace)
+ : ZSTD_compress_insertDictionary(
+ cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+ &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
+ dictContentType, dtlm, cctx->entropyWorkspace);
+ FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
+ assert(dictID <= UINT_MAX);
+ cctx->dictID = (U32)dictID;
+ cctx->dictContentSize = cdict ? cdict->dictContentSize : dictSize;
+ }
+ return 0;
+}
+
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
+ /* compression parameters verification and optimization */
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
+ return ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, dictContentType, dtlm,
+ cdict,
+ params, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
+
+/*! ZSTD_compressBegin_advanced() :
+* @return : 0, or an error code */
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+ ZSTD_CCtx_params cctxParams;
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
+ return ZSTD_compressBegin_advanced_internal(cctx,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
+ NULL /*cdict*/,
+ &cctxParams, pledgedSrcSize);
+}
+
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_CCtx_params cctxParams;
+ {
+ ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
+ }
+ DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
+}
+
+size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
+{
+ return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
+}
+
+
+/*! ZSTD_writeEpilogue() :
+* Ends a frame.
+* @return : nb of bytes written into dst (or an error code) */
+static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+ size_t fhSize = 0;
+
+ DEBUGLOG(4, "ZSTD_writeEpilogue");
+ RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
+
+ /* special case : empty frame */
+ if (cctx->stage == ZSTDcs_init) {
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
+ FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
+ dstCapacity -= fhSize;
+ op += fhSize;
+ cctx->stage = ZSTDcs_ongoing;
+ }
+
+ if (cctx->stage != ZSTDcs_ending) {
+ /* write one last empty block, make it the "last" block */
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
+ MEM_writeLE32(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ }
+
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
+ DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
+ MEM_writeLE32(op, checksum);
+ op += 4;
+ }
+
+ cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
+ return op-ostart;
+}
+
+void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
+{
+ (void)cctx;
+ (void)extraCSize;
+}
+
+size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t endResult;
+ size_t const cSize = ZSTD_compressContinue_internal(cctx,
+ dst, dstCapacity, src, srcSize,
+ 1 /* frame mode */, 1 /* last chunk */);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
+ endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
+ FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
+ DEBUGLOG(4, "end of frame : controlling src size");
+ RETURN_ERROR_IF(
+ cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize = %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
+ }
+ ZSTD_CCtx_trace(cctx, endResult);
+ return cSize + endResult;
+}
+
+size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params)
+{
+ ZSTD_CCtx_params cctxParams;
+ DEBUGLOG(4, "ZSTD_compress_advanced");
+ FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
+ return ZSTD_compress_advanced_internal(cctx,
+ dst, dstCapacity,
+ src, srcSize,
+ dict, dictSize,
+ &cctxParams);
+}
+
+/* Internal */
+size_t ZSTD_compress_advanced_internal(
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ const ZSTD_CCtx_params* params)
+{
+ DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+ params, srcSize, ZSTDb_not_buffered) , "");
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ int compressionLevel)
+{
+ ZSTD_CCtx_params cctxParams;
+ {
+ ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
+ assert(params.fParams.contentSizeFlag == 1);
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
+ }
+ DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
+ return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
+}
+
+size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
+ assert(cctx != NULL);
+ return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
+}
+
+size_t ZSTD_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
+{
+ size_t result;
+ ZSTD_CCtx* cctx = ZSTD_createCCtx();
+ RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
+ result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
+ ZSTD_freeCCtx(cctx);
+ return result;
+}
+
+
+/* ===== Dictionary API ===== */
+
+/*! ZSTD_estimateCDictSize_advanced() :
+ * Estimate amount of memory that will be needed to create a dictionary with following arguments */
+size_t ZSTD_estimateCDictSize_advanced(
+ size_t dictSize, ZSTD_compressionParameters cParams,
+ ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+ DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
+ return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
+}
+
+size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
+}
+
+size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0; /* support sizeof on NULL */
+ DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
+ /* cdict may be in the workspace */
+ return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
+ + ZSTD_cwksp_sizeof(&cdict->workspace);
+}
+
+static size_t ZSTD_initCDict_internal(
+ ZSTD_CDict* cdict,
+ const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_CCtx_params params)
+{
+ DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
+ assert(!ZSTD_checkCParams(params.cParams));
+ cdict->matchState.cParams = params.cParams;
+ cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
+ if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) {
+ cdict->matchState.dedicatedDictSearch = 0;
+ }
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
+ cdict->dictContent = dictBuffer;
+ } else {
+ void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
+ RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
+ cdict->dictContent = internalBuffer;
+ ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
+ }
+ cdict->dictContentSize = dictSize;
+ cdict->dictContentType = dictContentType;
+
+ cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
+
+
+ /* Reset the state to no dictionary */
+ ZSTD_reset_compressedBlockState(&cdict->cBlockState);
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &cdict->matchState,
+ &cdict->workspace,
+ &params.cParams,
+ ZSTDcrp_makeClean,
+ ZSTDirp_reset,
+ ZSTD_resetTarget_CDict), "");
+ /* (Maybe) load the dictionary
+ * Skips loading the dictionary if it is < 8 bytes.
+ */
+ { params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
+ params.fParams.contentSizeFlag = 1;
+ { size_t const dictID = ZSTD_compress_insertDictionary(
+ &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
+ &params, cdict->dictContent, cdict->dictContentSize,
+ dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
+ FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
+ assert(dictID <= (size_t)(U32)-1);
+ cdict->dictID = (U32)dictID;
+ }
+ }
+
+ return 0;
+}
+
+static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
+{
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+
+ { size_t const workspaceSize =
+ ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
+ ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
+ void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
+ ZSTD_cwksp ws;
+ ZSTD_CDict* cdict;
+
+ if (!workspace) {
+ ZSTD_customFree(workspace, customMem);
+ return NULL;
+ }
+
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
+
+ cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+ assert(cdict != NULL);
+ ZSTD_cwksp_move(&cdict->workspace, &ws);
+ cdict->customMem = customMem;
+ cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
+
+ return cdict;
+ }
+}
+
+ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params cctxParams;
+ ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
+ ZSTD_CCtxParams_init(&cctxParams, 0);
+ cctxParams.cParams = cParams;
+ cctxParams.customMem = customMem;
+ return ZSTD_createCDict_advanced2(
+ dictBuffer, dictSize,
+ dictLoadMethod, dictContentType,
+ &cctxParams, customMem);
+}
+
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CCtx_params* originalCctxParams,
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params cctxParams = *originalCctxParams;
+ ZSTD_compressionParameters cParams;
+ ZSTD_CDict* cdict;
+
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+ if (cctxParams.enableDedicatedDictSearch) {
+ cParams = ZSTD_dedicatedDictSearch_getCParams(
+ cctxParams.compressionLevel, dictSize);
+ ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
+ } else {
+ cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ }
+
+ if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
+ /* Fall back to non-DDSS params */
+ cctxParams.enableDedicatedDictSearch = 0;
+ cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ }
+
+ cctxParams.cParams = cParams;
+
+ cdict = ZSTD_createCDict_advanced_internal(dictSize,
+ dictLoadMethod, cctxParams.cParams,
+ customMem);
+
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType,
+ cctxParams) )) {
+ ZSTD_freeCDict(cdict);
+ return NULL;
+ }
+
+ return cdict;
+}
+
+ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byCopy, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
+ if (cdict)
+ cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
+ return cdict;
+}
+
+ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byRef, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
+ if (cdict)
+ cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
+ return cdict;
+}
+
+size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0; /* support free on NULL */
+ { ZSTD_customMem const cMem = cdict->customMem;
+ int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
+ ZSTD_cwksp_free(&cdict->workspace, cMem);
+ if (!cdictInWorkspace) {
+ ZSTD_customFree(cdict, cMem);
+ }
+ return 0;
+ }
+}
+
+/*! ZSTD_initStaticCDict_advanced() :
+ * Generate a digested dictionary in provided memory area.
+ * workspace: The memory area to emplace the dictionary into.
+ * Provided pointer must 8-bytes aligned.
+ * It must outlive dictionary usage.
+ * workspaceSize: Use ZSTD_estimateCDictSize()
+ * to determine how large workspace must be.
+ * cParams : use ZSTD_getCParams() to transform a compression level
+ * into its relevants cParams.
+ * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
+ * Note : there is no corresponding "free" function.
+ * Since workspace was allocated externally, it must be freed externally.
+ */
+const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams)
+{
+ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
+ size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
+ + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ + matchStateSize;
+ ZSTD_CDict* cdict;
+ ZSTD_CCtx_params params;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+
+ {
+ ZSTD_cwksp ws;
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
+ cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+ if (cdict == NULL) return NULL;
+ ZSTD_cwksp_move(&cdict->workspace, &ws);
+ }
+
+ DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
+ (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
+ if (workspaceSize < neededSize) return NULL;
+
+ ZSTD_CCtxParams_init(&params, 0);
+ params.cParams = cParams;
+
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType,
+ params) ))
+ return NULL;
+
+ return cdict;
+}
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
+{
+ assert(cdict != NULL);
+ return cdict->matchState.cParams;
+}
+
+/*! ZSTD_getDictID_fromCDict() :
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0;
+ return cdict->dictID;
+}
+
+
+/* ZSTD_compressBegin_usingCDict_advanced() :
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict_advanced(
+ ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+ ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
+ ZSTD_CCtx_params cctxParams;
+ DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
+ RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
+ /* Initialize the cctxParams from the cdict */
+ {
+ ZSTD_parameters params;
+ params.fParams = fParams;
+ params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+ || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || cdict->compressionLevel == 0 ) ?
+ ZSTD_getCParamsFromCDict(cdict)
+ : ZSTD_getCParams(cdict->compressionLevel,
+ pledgedSrcSize,
+ cdict->dictContentSize);
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
+ }
+ /* Increase window log to fit the entire dictionary and source if the
+ * source size is known. Limit the increase to 19, which is the
+ * window log for compression level 1 with the largest source size.
+ */
+ if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
+ U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
+ cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
+ }
+ return ZSTD_compressBegin_internal(cctx,
+ NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
+ cdict,
+ &cctxParams, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
+
+/* ZSTD_compressBegin_usingCDict() :
+ * pledgedSrcSize=0 means "unknown"
+ * if pledgedSrcSize>0, it will enable contentSizeFlag */
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
+ return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
+ FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+ * Note that compression parameters are decided at CDict creation time
+ * while frame parameters are hardcoded */
+size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+}
+
+
+
+/* ******************************************************************
+* Streaming
+********************************************************************/
+
+ZSTD_CStream* ZSTD_createCStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createCStream");
+ return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticCCtx(workspace, workspaceSize);
+}
+
+ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
+{ /* CStream and CCtx are now same object */
+ return ZSTD_createCCtx_advanced(customMem);
+}
+
+size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
+{
+ return ZSTD_freeCCtx(zcs); /* same object */
+}
+
+
+
+/*====== Initialization ======*/
+
+size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_CStreamOutSize(void)
+{
+ return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
+}
+
+static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
+{
+ if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
+ return ZSTD_cpm_attachDict;
+ else
+ return ZSTD_cpm_noAttachDict;
+}
+
+/* ZSTD_resetCStream():
+ * pledgedSrcSize == 0 means "unknown" */
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
+{
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ return 0;
+}
+
+/*! ZSTD_initCStream_internal() :
+ * Note : for lib/compress only. Used by zstdmt_compress.c.
+ * Assumption 1 : params are valid
+ * Assumption 2 : either dict, or cdict, is defined, not both */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_internal");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+ zcs->requestedParams = *params;
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ if (dict) {
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
+ } else {
+ /* Dictionary is cleared if !cdict */
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
+ }
+ return 0;
+}
+
+/* ZSTD_initCStream_usingCDict_advanced() :
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
+size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ zcs->requestedParams.fParams = fParams;
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
+ return 0;
+}
+
+/* note : cdict must outlive compression session */
+size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
+ return 0;
+}
+
+
+/* ZSTD_initCStream_advanced() :
+ * pledgedSrcSize must be exact.
+ * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pss)
+{
+ /* for compatibility with older programs relying on this behavior.
+ * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
+ * This line will be removed in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_advanced");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
+ ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
+ return 0;
+}
+
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingDict");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
+ return 0;
+}
+
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
+{
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_srcSize");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ return 0;
+}
+
+size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_initCStream");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
+ return 0;
+}
+
+/*====== Compression ======*/
+
+static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
+{
+ size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
+ if (hintInSize==0) hintInSize = cctx->blockSize;
+ return hintInSize;
+}
+
+/* ZSTD_compressStream_generic():
+ * internal function for all *compressStream*() variants
+ * non-static, because can be called from zstdmt_compress.c
+ * @return : hint size for next input */
+static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective const flushMode)
+{
+ const char* const istart = (const char*)input->src;
+ const char* const iend = input->size != 0 ? istart + input->size : istart;
+ const char* ip = input->pos != 0 ? istart + input->pos : istart;
+ char* const ostart = (char*)output->dst;
+ char* const oend = output->size != 0 ? ostart + output->size : ostart;
+ char* op = output->pos != 0 ? ostart + output->pos : ostart;
+ U32 someMoreWork = 1;
+
+ /* check expectations */
+ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ assert(zcs->inBuff != NULL);
+ assert(zcs->inBuffSize > 0);
+ }
+ if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
+ assert(zcs->outBuff != NULL);
+ assert(zcs->outBuffSize > 0);
+ }
+ assert(output->pos <= output->size);
+ assert(input->pos <= input->size);
+ assert((U32)flushMode <= (U32)ZSTD_e_end);
+
+ while (someMoreWork) {
+ switch(zcs->streamStage)
+ {
+ case zcss_init:
+ RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
+
+ case zcss_load:
+ if ( (flushMode == ZSTD_e_end)
+ && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
+ || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
+ && (zcs->inBuffPos == 0) ) {
+ /* shortcut to compression pass directly into output buffer */
+ size_t const cSize = ZSTD_compressEnd(zcs,
+ op, oend-op, ip, iend-ip);
+ DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
+ ip = iend;
+ op += cSize;
+ zcs->frameEnded = 1;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ someMoreWork = 0; break;
+ }
+ /* complete loading into inBuffer in buffered mode */
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
+ size_t const loaded = ZSTD_limitCopy(
+ zcs->inBuff + zcs->inBuffPos, toLoad,
+ ip, iend-ip);
+ zcs->inBuffPos += loaded;
+ if (loaded != 0)
+ ip += loaded;
+ if ( (flushMode == ZSTD_e_continue)
+ && (zcs->inBuffPos < zcs->inBuffTarget) ) {
+ /* not enough input to fill full block : stop here */
+ someMoreWork = 0; break;
+ }
+ if ( (flushMode == ZSTD_e_flush)
+ && (zcs->inBuffPos == zcs->inToCompress) ) {
+ /* empty */
+ someMoreWork = 0; break;
+ }
+ }
+ /* compress current block (note : this stage cannot be stopped in the middle) */
+ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
+ { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
+ void* cDst;
+ size_t cSize;
+ size_t oSize = oend-op;
+ size_t const iSize = inputBuffered
+ ? zcs->inBuffPos - zcs->inToCompress
+ : MIN((size_t)(iend - ip), zcs->blockSize);
+ if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
+ cDst = op; /* compress into output buffer, to skip flush stage */
+ else
+ cDst = zcs->outBuff, oSize = zcs->outBuffSize;
+ if (inputBuffered) {
+ unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
+ cSize = lastBlock ?
+ ZSTD_compressEnd(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize) :
+ ZSTD_compressContinue(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize);
+ FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
+ zcs->frameEnded = lastBlock;
+ /* prepare next block */
+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+ if (zcs->inBuffTarget > zcs->inBuffSize)
+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
+ DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
+ (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
+ if (!lastBlock)
+ assert(zcs->inBuffTarget <= zcs->inBuffSize);
+ zcs->inToCompress = zcs->inBuffPos;
+ } else {
+ unsigned const lastBlock = (ip + iSize == iend);
+ assert(flushMode == ZSTD_e_end /* Already validated */);
+ cSize = lastBlock ?
+ ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
+ ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
+ /* Consume the input prior to error checking to mirror buffered mode. */
+ if (iSize > 0)
+ ip += iSize;
+ FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
+ zcs->frameEnded = lastBlock;
+ if (lastBlock)
+ assert(ip == iend);
+ }
+ if (cDst == op) { /* no need to flush */
+ op += cSize;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed directly in outBuffer");
+ someMoreWork = 0;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ }
+ break;
+ }
+ zcs->outBuffContentSize = cSize;
+ zcs->outBuffFlushedSize = 0;
+ zcs->streamStage = zcss_flush; /* pass-through to flush stage */
+ }
+ ZSTD_FALLTHROUGH;
+ case zcss_flush:
+ DEBUGLOG(5, "flush stage");
+ assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+ size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
+ zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+ DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
+ (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
+ if (flushed)
+ op += flushed;
+ zcs->outBuffFlushedSize += flushed;
+ if (toFlush!=flushed) {
+ /* flush not fully completed, presumably because dst is too small */
+ assert(op==oend);
+ someMoreWork = 0;
+ break;
+ }
+ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed on flush");
+ someMoreWork = 0;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ break;
+ }
+ zcs->streamStage = zcss_load;
+ break;
+ }
+
+ default: /* impossible */
+ assert(0);
+ }
+ }
+
+ input->pos = ip - istart;
+ output->pos = op - ostart;
+ if (zcs->frameEnded) return 0;
+ return ZSTD_nextInputSizeHint(zcs);
+}
+
+static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
+{
+ return ZSTD_nextInputSizeHint(cctx);
+
+}
+
+size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
+ return ZSTD_nextInputSizeHint_MTorST(zcs);
+}
+
+/* After a compression call set the expected input/output buffer.
+ * This is validated at the start of the next compression call.
+ */
+static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
+{
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ cctx->expectedInBuffer = *input;
+ }
+ if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
+ cctx->expectedOutBufferSize = output->size - output->pos;
+ }
+}
+
+/* Validate that the input/output buffers match the expectations set by
+ * ZSTD_setBufferExpectations.
+ */
+static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
+ ZSTD_outBuffer const* output,
+ ZSTD_inBuffer const* input,
+ ZSTD_EndDirective endOp)
+{
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ ZSTD_inBuffer const expect = cctx->expectedInBuffer;
+ if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
+ RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
+ if (endOp != ZSTD_e_end)
+ RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
+ }
+ if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
+ size_t const outBufferSize = output->size - output->pos;
+ if (cctx->expectedOutBufferSize != outBufferSize)
+ RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
+ }
+ return 0;
+}
+
+static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
+ ZSTD_EndDirective endOp,
+ size_t inSize) {
+ ZSTD_CCtx_params params = cctx->requestedParams;
+ ZSTD_prefixDict const prefixDict = cctx->prefixDict;
+ FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
+ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
+ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
+ if (cctx->cdict)
+ params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */
+ DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
+ if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
+ {
+ size_t const dictSize = prefixDict.dict
+ ? prefixDict.dictSize
+ : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
+ ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
+ params.cParams = ZSTD_getCParamsFromCCtxParams(
+ &params, cctx->pledgedSrcSizePlusOne-1,
+ dictSize, mode);
+ }
+
+ if (ZSTD_CParams_shouldEnableLdm(&params.cParams)) {
+ /* Enable LDM by default for optimal parser and window size >= 128MB */
+ DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
+ params.ldmParams.enableLdm = 1;
+ }
+
+ { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+ prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
+ cctx->cdict,
+ &params, pledgedSrcSize,
+ ZSTDb_buffered) , "");
+ assert(cctx->appliedParams.nbWorkers == 0);
+ cctx->inToCompress = 0;
+ cctx->inBuffPos = 0;
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ /* for small input: avoid automatic flush on reaching end of block, since
+ * it would require to add a 3-bytes null block to end frame
+ */
+ cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
+ } else {
+ cctx->inBuffTarget = 0;
+ }
+ cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
+ cctx->streamStage = zcss_load;
+ cctx->frameEnded = 0;
+ }
+ return 0;
+}
+
+size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp)
+{
+ DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
+ /* check conditions */
+ RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
+ RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
+ RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
+ assert(cctx != NULL);
+
+ /* transparent initialization stage */
+ if (cctx->streamStage == zcss_init) {
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
+ ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
+ }
+ /* end of transparent initialization stage */
+
+ FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
+ /* compression stage */
+ FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
+ DEBUGLOG(5, "completed ZSTD_compressStream2");
+ ZSTD_setBufferExpectations(cctx, output, input);
+ return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
+}
+
+size_t ZSTD_compressStream2_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
+
+size_t ZSTD_compress2(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
+ ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
+ DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
+ ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
+ /* Enable stable input/output buffers. */
+ cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
+ cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
+ { size_t oPos = 0;
+ size_t iPos = 0;
+ size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
+ dst, dstCapacity, &oPos,
+ src, srcSize, &iPos,
+ ZSTD_e_end);
+ /* Reset to the original values. */
+ cctx->requestedParams.inBufferMode = originalInBufferMode;
+ cctx->requestedParams.outBufferMode = originalOutBufferMode;
+ FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
+ if (result != 0) { /* compression not completed, due to lack of output space */
+ assert(oPos == dstCapacity);
+ RETURN_ERROR(dstSize_tooSmall, "");
+ }
+ assert(iPos == srcSize); /* all input is expected consumed */
+ return oPos;
+ }
+}
+
+typedef struct {
+ U32 idx; /* Index in array of ZSTD_Sequence */
+ U32 posInSequence; /* Position within sequence at idx */
+ size_t posInSrc; /* Number of bytes given by sequences provided so far */
+} ZSTD_sequencePosition;
+
+/* Returns a ZSTD error code if sequence is not valid */
+static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
+ size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
+ size_t offsetBound;
+ U32 windowSize = 1 << windowLog;
+ /* posInSrc represents the amount of data the the decoder would decode up to this point.
+ * As long as the amount of data decoded is less than or equal to window size, offsets may be
+ * larger than the total length of output decoded in order to reference the dict, even larger than
+ * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
+ */
+ offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
+ RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
+ RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
+ return 0;
+}
+
+/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
+static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
+ U32 offCode = rawOffset + ZSTD_REP_MOVE;
+ U32 repCode = 0;
+
+ if (!ll0 && rawOffset == rep[0]) {
+ repCode = 1;
+ } else if (rawOffset == rep[1]) {
+ repCode = 2 - ll0;
+ } else if (rawOffset == rep[2]) {
+ repCode = 3 - ll0;
+ } else if (ll0 && rawOffset == rep[0] - 1) {
+ repCode = 3;
+ }
+ if (repCode) {
+ /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
+ offCode = repCode - 1;
+ }
+ return offCode;
+}
+
+/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
+ * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
+ */
+static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize) {
+ U32 idx = seqPos->idx;
+ BYTE const* ip = (BYTE const*)(src);
+ const BYTE* const iend = ip + blockSize;
+ repcodes_t updatedRepcodes;
+ U32 dictSize;
+ U32 litLength;
+ U32 matchLength;
+ U32 ll0;
+ U32 offCode;
+
+ if (cctx->cdict) {
+ dictSize = (U32)cctx->cdict->dictContentSize;
+ } else if (cctx->prefixDict.dict) {
+ dictSize = (U32)cctx->prefixDict.dictSize;
+ } else {
+ dictSize = 0;
+ }
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
+ litLength = inSeqs[idx].litLength;
+ matchLength = inSeqs[idx].matchLength;
+ ll0 = litLength == 0;
+ offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
+ updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ if (cctx->appliedParams.validateSequences) {
+ seqPos->posInSrc += litLength + matchLength;
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize,
+ cctx->appliedParams.cParams.minMatch),
+ "Sequence validation failed");
+ }
+ RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
+ ip += matchLength + litLength;
+ }
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+
+ if (inSeqs[idx].litLength) {
+ DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
+ ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
+ ip += inSeqs[idx].litLength;
+ seqPos->posInSrc += inSeqs[idx].litLength;
+ }
+ RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
+ seqPos->idx = idx+1;
+ return 0;
+}
+
+/* Returns the number of bytes to move the current read position back by. Only non-zero
+ * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
+ * went wrong.
+ *
+ * This function will attempt to scan through blockSize bytes represented by the sequences
+ * in inSeqs, storing any (partial) sequences.
+ *
+ * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
+ * avoid splitting a match, or to avoid splitting a match such that it would produce a match
+ * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
+ */
+static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize) {
+ U32 idx = seqPos->idx;
+ U32 startPosInSequence = seqPos->posInSequence;
+ U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
+ size_t dictSize;
+ BYTE const* ip = (BYTE const*)(src);
+ BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
+ repcodes_t updatedRepcodes;
+ U32 bytesAdjustment = 0;
+ U32 finalMatchSplit = 0;
+ U32 litLength;
+ U32 matchLength;
+ U32 rawOffset;
+ U32 offCode;
+
+ if (cctx->cdict) {
+ dictSize = cctx->cdict->dictContentSize;
+ } else if (cctx->prefixDict.dict) {
+ dictSize = cctx->prefixDict.dictSize;
+ } else {
+ dictSize = 0;
+ }
+ DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
+ DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
+ const ZSTD_Sequence currSeq = inSeqs[idx];
+ litLength = currSeq.litLength;
+ matchLength = currSeq.matchLength;
+ rawOffset = currSeq.offset;
+
+ /* Modify the sequence depending on where endPosInSequence lies */
+ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
+ if (startPosInSequence >= litLength) {
+ startPosInSequence -= litLength;
+ litLength = 0;
+ matchLength -= startPosInSequence;
+ } else {
+ litLength -= startPosInSequence;
+ }
+ /* Move to the next sequence */
+ endPosInSequence -= currSeq.litLength + currSeq.matchLength;
+ startPosInSequence = 0;
+ idx++;
+ } else {
+ /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
+ does not reach the end of the match. So, we have to split the sequence */
+ DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
+ currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
+ if (endPosInSequence > litLength) {
+ U32 firstHalfMatchLength;
+ litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
+ firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
+ if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
+ /* Only ever split the match if it is larger than the block size */
+ U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
+ if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
+ /* Move the endPosInSequence backward so that it creates match of minMatch length */
+ endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
+ bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
+ firstHalfMatchLength -= bytesAdjustment;
+ }
+ matchLength = firstHalfMatchLength;
+ /* Flag that we split the last match - after storing the sequence, exit the loop,
+ but keep the value of endPosInSequence */
+ finalMatchSplit = 1;
+ } else {
+ /* Move the position in sequence backwards so that we don't split match, and break to store
+ * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
+ * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
+ * would cause the first half of the match to be too small
+ */
+ bytesAdjustment = endPosInSequence - currSeq.litLength;
+ endPosInSequence = currSeq.litLength;
+ break;
+ }
+ } else {
+ /* This sequence ends inside the literals, break to store the last literals */
+ break;
+ }
+ }
+ /* Check if this offset can be represented with a repcode */
+ { U32 ll0 = (litLength == 0);
+ offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
+ updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+ }
+
+ if (cctx->appliedParams.validateSequences) {
+ seqPos->posInSrc += litLength + matchLength;
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize,
+ cctx->appliedParams.cParams.minMatch),
+ "Sequence validation failed");
+ }
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
+ ip += matchLength + litLength;
+ }
+ DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
+ assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
+ seqPos->idx = idx;
+ seqPos->posInSequence = endPosInSequence;
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+
+ iend -= bytesAdjustment;
+ if (ip != iend) {
+ /* Store any last literals */
+ U32 lastLLSize = (U32)(iend - ip);
+ assert(ip <= iend);
+ DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
+ ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
+ seqPos->posInSrc += lastLLSize;
+ }
+
+ return bytesAdjustment;
+}
+
+typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize);
+static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
+ ZSTD_sequenceCopier sequenceCopier = NULL;
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
+ if (mode == ZSTD_sf_explicitBlockDelimiters) {
+ return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
+ } else if (mode == ZSTD_sf_noBlockDelimiters) {
+ return ZSTD_copySequencesToSeqStoreNoBlockDelim;
+ }
+ assert(sequenceCopier != NULL);
+ return sequenceCopier;
+}
+
+/* Compress, block-by-block, all of the sequences given.
+ *
+ * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
+ */
+static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize) {
+ size_t cSize = 0;
+ U32 lastBlock;
+ size_t blockSize;
+ size_t compressedSeqsSize;
+ size_t remaining = srcSize;
+ ZSTD_sequencePosition seqPos = {0, 0, 0};
+
+ BYTE const* ip = (BYTE const*)src;
+ BYTE* op = (BYTE*)dst;
+ ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
+
+ DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
+ /* Special case: empty frame */
+ if (remaining == 0) {
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
+ MEM_writeLE32(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ cSize += ZSTD_blockHeaderSize;
+ }
+
+ while (remaining) {
+ size_t cBlockSize;
+ size_t additionalByteAdjustment;
+ lastBlock = remaining <= cctx->blockSize;
+ blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
+ ZSTD_resetSeqStore(&cctx->seqStore);
+ DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
+
+ additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
+ FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
+ blockSize -= additionalByteAdjustment;
+
+ /* If blocks are too small, emit as a nocompress block */
+ if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
+ DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
+ cSize += cBlockSize;
+ ip += blockSize;
+ op += cBlockSize;
+ remaining -= blockSize;
+ dstCapacity -= cBlockSize;
+ continue;
+ }
+
+ compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore,
+ &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
+ &cctx->appliedParams,
+ op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
+ blockSize,
+ cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ cctx->bmi2);
+ FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
+ DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
+
+ if (!cctx->isFirstBlock &&
+ ZSTD_maybeRLE(&cctx->seqStore) &&
+ ZSTD_isRLE((BYTE const*)src, srcSize)) {
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ compressedSeqsSize = 1;
+ }
+
+ if (compressedSeqsSize == 0) {
+ /* ZSTD_noCompressBlock writes the block header as well */
+ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
+ DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
+ } else if (compressedSeqsSize == 1) {
+ cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
+ DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
+ } else {
+ U32 cBlockHeader;
+ /* Error checking and repcodes update */
+ ZSTD_confirmRepcodesAndEntropyTables(cctx);
+ if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ /* Write block header into beginning of block*/
+ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
+ DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
+ }
+
+ cSize += cBlockSize;
+ DEBUGLOG(4, "cSize running total: %zu", cSize);
+
+ if (lastBlock) {
+ break;
+ } else {
+ ip += blockSize;
+ op += cBlockSize;
+ remaining -= blockSize;
+ dstCapacity -= cBlockSize;
+ cctx->isFirstBlock = 0;
+ }
+ }
+
+ return cSize;
+}
+
+size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize) {
+ BYTE* op = (BYTE*)dst;
+ size_t cSize = 0;
+ size_t compressedBlocksSize = 0;
+ size_t frameHeaderSize = 0;
+
+ /* Transparent initialization stage, same as compressStream2() */
+ DEBUGLOG(3, "ZSTD_compressSequences()");
+ assert(cctx != NULL);
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
+ /* Begin writing output, starting with frame header */
+ frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
+ op += frameHeaderSize;
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
+ xxh64_update(&cctx->xxhState, src, srcSize);
+ }
+ /* cSize includes block header size and compressed sequences size */
+ compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
+ op, dstCapacity,
+ inSeqs, inSeqsSize,
+ src, srcSize);
+ FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
+ cSize += compressedBlocksSize;
+ dstCapacity -= compressedBlocksSize;
+
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
+ DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
+ MEM_writeLE32((char*)dst + cSize, checksum);
+ cSize += 4;
+ }
+
+ DEBUGLOG(3, "Final compressed size: %zu", cSize);
+ return cSize;
+}
+
+/*====== Finalize ======*/
+
+/*! ZSTD_flushStream() :
+ * @return : amount of data remaining to flush */
+size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+ ZSTD_inBuffer input = { NULL, 0, 0 };
+ return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
+}
+
+
+size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+ ZSTD_inBuffer input = { NULL, 0, 0 };
+ size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
+ FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
+ if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
+ /* single thread mode : attempt to calculate remaining to flush more precisely */
+ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
+ size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
+ size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
+ DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
+ return toFlush;
+ }
+}
+
+
+/*-===== Pre-defined compression levels =====-*/
+
+#define ZSTD_MAX_CLEVEL 22
+int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
+
+static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
+{ /* "default" - for any srcSize > 256 KB */
+ /* W, C, H, S, L, TL, strat */
+ { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
+ { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
+ { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
+ { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
+ { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
+ { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */
+ { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */
+ { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */
+ { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
+ { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
+ { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
+ { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
+ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
+ { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */
+ { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
+ { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
+ { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
+ { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
+ { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
+ { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
+ { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
+ { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
+ { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
+},
+{ /* for srcSize <= 256 KB */
+ /* W, C, H, S, L, T, strat */
+ { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
+ { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
+ { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/
+ { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/
+ { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
+ { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
+ { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
+ { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
+ { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
+ { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 128 KB */
+ /* W, C, H, S, L, T, strat */
+ { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
+ { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
+ { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
+ { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
+ { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
+ { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
+ { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
+ { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
+ { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
+ { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
+ { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 16 KB */
+ /* W, C, H, S, L, T, strat */
+ { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
+ { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
+ { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
+ { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
+ { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
+ { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
+ { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
+ { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
+ { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
+ { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
+ { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
+ { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
+ { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
+ { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
+ { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
+ { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+};
+
+static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
+ switch (cParams.strategy) {
+ case ZSTD_fast:
+ case ZSTD_dfast:
+ break;
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
+ break;
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ }
+ return cParams;
+}
+
+static int ZSTD_dedicatedDictSearch_isSupported(
+ ZSTD_compressionParameters const* cParams)
+{
+ return (cParams->strategy >= ZSTD_greedy)
+ && (cParams->strategy <= ZSTD_lazy2)
+ && (cParams->hashLog >= cParams->chainLog)
+ && (cParams->chainLog <= 24);
+}
+
+/*
+ * Reverses the adjustment applied to cparams when enabling dedicated dict
+ * search. This is used to recover the params set to be used in the working
+ * context. (Otherwise, those tables would also grow.)
+ */
+static void ZSTD_dedicatedDictSearch_revertCParams(
+ ZSTD_compressionParameters* cParams) {
+ switch (cParams->strategy) {
+ case ZSTD_fast:
+ case ZSTD_dfast:
+ break;
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
+ break;
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ }
+}
+
+static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ switch (mode) {
+ case ZSTD_cpm_unknown:
+ case ZSTD_cpm_noAttachDict:
+ case ZSTD_cpm_createCDict:
+ break;
+ case ZSTD_cpm_attachDict:
+ dictSize = 0;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
+ size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
+ return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
+ }
+}
+
+/*! ZSTD_getCParams_internal() :
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
+ * Use dictSize == 0 for unknown or unused.
+ * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
+ U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
+ int row;
+ DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
+
+ /* row */
+ if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
+ else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
+ else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
+ else row = compressionLevel;
+
+ { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
+ /* acceleration factor */
+ if (compressionLevel < 0) {
+ int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
+ cp.targetLength = (unsigned)(-clampedCompressionLevel);
+ }
+ /* refine parameters based on srcSize & dictSize */
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
+ }
+}
+
+/*! ZSTD_getCParams() :
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ * Size values are optional, provide 0 if not known or unused */
+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
+{
+ if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
+}
+
+/*! ZSTD_getParams() :
+ * same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ * Fields of `ZSTD_frameParameters` are set to default values */
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
+ ZSTD_parameters params;
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
+ DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
+ ZSTD_memset(&params, 0, sizeof(params));
+ params.cParams = cParams;
+ params.fParams.contentSizeFlag = 1;
+ return params;
+}
+
+/*! ZSTD_getParams() :
+ * same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ * Fields of `ZSTD_frameParameters` are set to default values */
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
+ if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
+}
diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
new file mode 100644
index 00000000000000..685d2f996cc2a6
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_internal.h
@@ -0,0 +1,1188 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* This header contains definitions
+ * that shall **only** be used by modules within lib/compress.
+ */
+
+#ifndef ZSTD_COMPRESS_H
+#define ZSTD_COMPRESS_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "../common/zstd_internal.h"
+#include "zstd_cwksp.h"
+
+
+/*-*************************************
+* Constants
+***************************************/
+#define kSearchStrength 8
+#define HASH_READ_SIZE 8
+#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
+ It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
+ It's not a big deal though : candidate will just be sorted again.
+ Additionally, candidate position 1 will be lost.
+ But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
+ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
+ This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
+
+typedef struct ZSTD_prefixDict_s {
+ const void* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+} ZSTD_prefixDict;
+
+typedef struct {
+ void* dictBuffer;
+ void const* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+ ZSTD_CDict* cdict;
+} ZSTD_localDict;
+
+typedef struct {
+ HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
+ HUF_repeat repeatMode;
+} ZSTD_hufCTables_t;
+
+typedef struct {
+ FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
+ FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
+ FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
+ FSE_repeat offcode_repeatMode;
+ FSE_repeat matchlength_repeatMode;
+ FSE_repeat litlength_repeatMode;
+} ZSTD_fseCTables_t;
+
+typedef struct {
+ ZSTD_hufCTables_t huf;
+ ZSTD_fseCTables_t fse;
+} ZSTD_entropyCTables_t;
+
+typedef struct {
+ U32 off; /* Offset code (offset + ZSTD_REP_MOVE) for the match */
+ U32 len; /* Raw length of match */
+} ZSTD_match_t;
+
+typedef struct {
+ U32 offset; /* Offset of sequence */
+ U32 litLength; /* Length of literals prior to match */
+ U32 matchLength; /* Raw length of match */
+} rawSeq;
+
+typedef struct {
+ rawSeq* seq; /* The start of the sequences */
+ size_t pos; /* The index in seq where reading stopped. pos <= size. */
+ size_t posInSequence; /* The position within the sequence at seq[pos] where reading
+ stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
+ size_t size; /* The number of sequences. <= capacity. */
+ size_t capacity; /* The capacity starting from `seq` pointer */
+} rawSeqStore_t;
+
+UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
+
+typedef struct {
+ int price;
+ U32 off;
+ U32 mlen;
+ U32 litlen;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_optimal_t;
+
+typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+
+typedef struct {
+ /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
+ unsigned* litFreq; /* table of literals statistics, of size 256 */
+ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
+ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
+ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
+ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
+ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+
+ U32 litSum; /* nb of literals */
+ U32 litLengthSum; /* nb of litLength codes */
+ U32 matchLengthSum; /* nb of matchLength codes */
+ U32 offCodeSum; /* nb of offset codes */
+ U32 litSumBasePrice; /* to compare to log2(litfreq) */
+ U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */
+ U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */
+ U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
+ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
+ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
+ ZSTD_literalCompressionMode_e literalCompressionMode;
+} optState_t;
+
+typedef struct {
+ ZSTD_entropyCTables_t entropy;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_compressedBlockState_t;
+
+typedef struct {
+ BYTE const* nextSrc; /* next block here to continue on current prefix */
+ BYTE const* base; /* All regular indexes relative to this position */
+ BYTE const* dictBase; /* extDict indexes relative to this position */
+ U32 dictLimit; /* below that point, need extDict */
+ U32 lowLimit; /* below that point, no more valid data */
+} ZSTD_window_t;
+
+typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+struct ZSTD_matchState_t {
+ ZSTD_window_t window; /* State for window round buffer management */
+ U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
+ * When loadedDictEnd != 0, a dictionary is in use, and still valid.
+ * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
+ * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
+ * When dict referential is copied into active context (i.e. not attached),
+ * loadedDictEnd == dictSize, since referential starts from zero.
+ */
+ U32 nextToUpdate; /* index from which to continue table update */
+ U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
+ U32* hashTable;
+ U32* hashTable3;
+ U32* chainTable;
+ int dedicatedDictSearch; /* Indicates whether this matchState is using the
+ * dedicated dictionary search structure.
+ */
+ optState_t opt; /* optimal parser state */
+ const ZSTD_matchState_t* dictMatchState;
+ ZSTD_compressionParameters cParams;
+ const rawSeqStore_t* ldmSeqStore;
+};
+
+typedef struct {
+ ZSTD_compressedBlockState_t* prevCBlock;
+ ZSTD_compressedBlockState_t* nextCBlock;
+ ZSTD_matchState_t matchState;
+} ZSTD_blockState_t;
+
+typedef struct {
+ U32 offset;
+ U32 checksum;
+} ldmEntry_t;
+
+typedef struct {
+ BYTE const* split;
+ U32 hash;
+ U32 checksum;
+ ldmEntry_t* bucket;
+} ldmMatchCandidate_t;
+
+#define LDM_BATCH_SIZE 64
+
+typedef struct {
+ ZSTD_window_t window; /* State for the window round buffer management */
+ ldmEntry_t* hashTable;
+ U32 loadedDictEnd;
+ BYTE* bucketOffsets; /* Next position in bucket to insert entry */
+ size_t splitIndices[LDM_BATCH_SIZE];
+ ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
+} ldmState_t;
+
+typedef struct {
+ U32 enableLdm; /* 1 if enable long distance matching */
+ U32 hashLog; /* Log size of hashTable */
+ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
+ U32 minMatchLength; /* Minimum match length */
+ U32 hashRateLog; /* Log number of entries to skip */
+ U32 windowLog; /* Window log for the LDM */
+} ldmParams_t;
+
+typedef struct {
+ int collectSequences;
+ ZSTD_Sequence* seqStart;
+ size_t seqIndex;
+ size_t maxSequences;
+} SeqCollector;
+
+struct ZSTD_CCtx_params_s {
+ ZSTD_format_e format;
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+
+ int compressionLevel;
+ int forceWindow; /* force back-references to respect limit of
+ * 1<<wLog, even for dictionary */
+ size_t targetCBlockSize; /* Tries to fit compressed block size to be around targetCBlockSize.
+ * No target when targetCBlockSize == 0.
+ * There is no guarantee on compressed block size */
+ int srcSizeHint; /* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size */
+
+ ZSTD_dictAttachPref_e attachDictPref;
+ ZSTD_literalCompressionMode_e literalCompressionMode;
+
+ /* Multithreading: used to pass parameters to mtctx */
+ int nbWorkers;
+ size_t jobSize;
+ int overlapLog;
+ int rsyncable;
+
+ /* Long distance matching parameters */
+ ldmParams_t ldmParams;
+
+ /* Dedicated dict search algorithm trigger */
+ int enableDedicatedDictSearch;
+
+ /* Input/output buffer modes */
+ ZSTD_bufferMode_e inBufferMode;
+ ZSTD_bufferMode_e outBufferMode;
+
+ /* Sequence compression API */
+ ZSTD_sequenceFormat_e blockDelimiters;
+ int validateSequences;
+
+ /* Internal use, for createCCtxParams() and freeCCtxParams() only */
+ ZSTD_customMem customMem;
+}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
+
+#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
+#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
+
+/*
+ * Indicates whether this compression proceeds directly from user-provided
+ * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
+ * whether the context needs to buffer the input/output (ZSTDb_buffered).
+ */
+typedef enum {
+ ZSTDb_not_buffered,
+ ZSTDb_buffered
+} ZSTD_buffered_policy_e;
+
+struct ZSTD_CCtx_s {
+ ZSTD_compressionStage_e stage;
+ int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+ ZSTD_CCtx_params requestedParams;
+ ZSTD_CCtx_params appliedParams;
+ U32 dictID;
+ size_t dictContentSize;
+
+ ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
+ size_t blockSize;
+ unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
+ unsigned long long consumedSrcSize;
+ unsigned long long producedCSize;
+ struct xxh64_state xxhState;
+ ZSTD_customMem customMem;
+ ZSTD_threadPool* pool;
+ size_t staticSize;
+ SeqCollector seqCollector;
+ int isFirstBlock;
+ int initialized;
+
+ seqStore_t seqStore; /* sequences storage ptrs */
+ ldmState_t ldmState; /* long distance matching state */
+ rawSeq* ldmSequences; /* Storage for the ldm output sequences */
+ size_t maxNbLdmSequences;
+ rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
+ ZSTD_blockState_t blockState;
+ U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
+
+ /* Wether we are streaming or not */
+ ZSTD_buffered_policy_e bufferedPolicy;
+
+ /* streaming */
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inToCompress;
+ size_t inBuffPos;
+ size_t inBuffTarget;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outBuffContentSize;
+ size_t outBuffFlushedSize;
+ ZSTD_cStreamStage streamStage;
+ U32 frameEnded;
+
+ /* Stable in/out buffer verification */
+ ZSTD_inBuffer expectedInBuffer;
+ size_t expectedOutBufferSize;
+
+ /* Dictionary */
+ ZSTD_localDict localDict;
+ const ZSTD_CDict* cdict;
+ ZSTD_prefixDict prefixDict; /* single-usage dictionary */
+
+ /* Multi-threading */
+
+ /* Tracing */
+};
+
+typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
+
+typedef enum {
+ ZSTD_noDict = 0,
+ ZSTD_extDict = 1,
+ ZSTD_dictMatchState = 2,
+ ZSTD_dedicatedDictSearch = 3
+} ZSTD_dictMode_e;
+
+typedef enum {
+ ZSTD_cpm_noAttachDict = 0, /* Compression with ZSTD_noDict or ZSTD_extDict.
+ * In this mode we use both the srcSize and the dictSize
+ * when selecting and adjusting parameters.
+ */
+ ZSTD_cpm_attachDict = 1, /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
+ * In this mode we only take the srcSize into account when selecting
+ * and adjusting parameters.
+ */
+ ZSTD_cpm_createCDict = 2, /* Creating a CDict.
+ * In this mode we take both the source size and the dictionary size
+ * into account when selecting and adjusting the parameters.
+ */
+ ZSTD_cpm_unknown = 3, /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
+ * We don't know what these parameters are for. We default to the legacy
+ * behavior of taking both the source size and the dict size into account
+ * when selecting and adjusting parameters.
+ */
+} ZSTD_cParamMode_e;
+
+typedef size_t (*ZSTD_blockCompressor) (
+ ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
+
+
+MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
+{
+ static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24 };
+ static const U32 LL_deltaCode = 19;
+ return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+}
+
+/* ZSTD_MLcode() :
+ * note : mlBase = matchLength - MINMATCH;
+ * because it's the format it's stored in seqStore->sequences */
+MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
+{
+ static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
+ static const U32 ML_deltaCode = 36;
+ return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
+}
+
+typedef struct repcodes_s {
+ U32 rep[3];
+} repcodes_t;
+
+MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
+{
+ repcodes_t newReps;
+ if (offset >= ZSTD_REP_NUM) { /* full offset */
+ newReps.rep[2] = rep[1];
+ newReps.rep[1] = rep[0];
+ newReps.rep[0] = offset - ZSTD_REP_MOVE;
+ } else { /* repcode */
+ U32 const repCode = offset + ll0;
+ if (repCode > 0) { /* note : if repCode==0, no change */
+ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
+ newReps.rep[1] = rep[0];
+ newReps.rep[0] = currentOffset;
+ } else { /* repCode == 0 */
+ ZSTD_memcpy(&newReps, rep, sizeof(newReps));
+ }
+ }
+ return newReps;
+}
+
+/* ZSTD_cParam_withinBounds:
+ * @return 1 if value is within cParam bounds,
+ * 0 otherwise */
+MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return 0;
+ if (value < bounds.lowerBound) return 0;
+ if (value > bounds.upperBound) return 0;
+ return 1;
+}
+
+/* ZSTD_noCompressBlock() :
+ * Writes uncompressed block to dst buffer from given src.
+ * Returns the size of the block */
+MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
+{
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
+ RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
+ dstSize_tooSmall, "dst buf too small for uncompressed block");
+ MEM_writeLE24(dst, cBlockHeader24);
+ ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
+ return ZSTD_blockHeaderSize + srcSize;
+}
+
+MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
+{
+ BYTE* const op = (BYTE*)dst;
+ U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
+ RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
+ MEM_writeLE24(op, cBlockHeader);
+ op[3] = src;
+ return 4;
+}
+
+
+/* ZSTD_minGain() :
+ * minimum compression required
+ * to generate a compress block or a compressed literals section.
+ * note : use same formula for both situations */
+MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
+{
+ U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
+ ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ return (srcSize >> minlog) + 2;
+}
+
+MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
+{
+ switch (cctxParams->literalCompressionMode) {
+ case ZSTD_lcm_huffman:
+ return 0;
+ case ZSTD_lcm_uncompressed:
+ return 1;
+ default:
+ assert(0 /* impossible: pre-validated */);
+ ZSTD_FALLTHROUGH;
+ case ZSTD_lcm_auto:
+ return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
+ }
+}
+
+/*! ZSTD_safecopyLiterals() :
+ * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
+ * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
+ * large copies.
+ */
+static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
+ assert(iend > ilimit_w);
+ if (ip <= ilimit_w) {
+ ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
+ op += ilimit_w - ip;
+ ip = ilimit_w;
+ }
+ while (ip < iend) *op++ = *ip++;
+}
+
+/*! ZSTD_storeSeq() :
+ * Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
+ * `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
+ * `mlBase` : matchLength - MINMATCH
+ * Allowed to overread literals up to litLimit.
+*/
+HINT_INLINE UNUSED_ATTR
+void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
+{
+ BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
+ BYTE const* const litEnd = literals + litLength;
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
+ static const BYTE* g_start = NULL;
+ if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
+ { U32 const pos = (U32)((const BYTE*)literals - g_start);
+ DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
+ pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
+ }
+#endif
+ assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
+ /* copy Literals */
+ assert(seqStorePtr->maxNbLit <= 128 KB);
+ assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
+ assert(literals + litLength <= litLimit);
+ if (litEnd <= litLimit_w) {
+ /* Common case we can use wildcopy.
+ * First copy 16 bytes, because literals are likely short.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(seqStorePtr->lit, literals);
+ if (litLength > 16) {
+ ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
+ }
+ } else {
+ ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
+ }
+ seqStorePtr->lit += litLength;
+
+ /* literal Length */
+ if (litLength>0xFFFF) {
+ assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
+ seqStorePtr->longLengthID = 1;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+ /* match offset */
+ seqStorePtr->sequences[0].offset = offCode + 1;
+
+ /* match Length */
+ if (mlBase>0xFFFF) {
+ assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
+ seqStorePtr->longLengthID = 2;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].matchLength = (U16)mlBase;
+
+ seqStorePtr->sequences++;
+}
+
+
+/*-*************************************
+* Match length counter
+***************************************/
+static unsigned ZSTD_NbCommonBytes (size_t val)
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
+# if (__GNUC__ >= 4)
+ return (__builtin_ctzll((U64)val) >> 3);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
+ 0, 3, 1, 3, 1, 4, 2, 7,
+ 0, 2, 3, 6, 1, 5, 3, 5,
+ 1, 3, 4, 4, 2, 5, 6, 7,
+ 7, 0, 1, 2, 3, 3, 4, 6,
+ 2, 6, 5, 5, 3, 4, 5, 6,
+ 7, 1, 2, 4, 6, 4, 4, 5,
+ 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ } else { /* 32 bits */
+# if (__GNUC__ >= 3)
+ return (__builtin_ctz((U32)val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
+ 3, 2, 2, 1, 3, 2, 0, 1,
+ 3, 3, 1, 2, 2, 2, 2, 0,
+ 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
+# if (__GNUC__ >= 4)
+ return (__builtin_clzll(val) >> 3);
+# else
+ unsigned r;
+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } else { /* 32 bits */
+# if (__GNUC__ >= 3)
+ return (__builtin_clz((U32)val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } }
+}
+
+
+MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
+{
+ const BYTE* const pStart = pIn;
+ const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
+
+ if (pIn < pInLoopLimit) {
+ { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (diff) return ZSTD_NbCommonBytes(diff); }
+ pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
+ while (pIn < pInLoopLimit) {
+ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
+ pIn += ZSTD_NbCommonBytes(diff);
+ return (size_t)(pIn - pStart);
+ } }
+ if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (size_t)(pIn - pStart);
+}
+
+/* ZSTD_count_2segments() :
+ * can count match length with `ip` & `match` in 2 different segments.
+ * convention : on reaching mEnd, match count continue starting from iStart
+ */
+MEM_STATIC size_t
+ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
+ const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
+{
+ const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
+ size_t const matchLength = ZSTD_count(ip, match, vEnd);
+ if (match + matchLength != mEnd) return matchLength;
+ DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
+ DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
+ DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+ DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
+ DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
+ return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
+}
+
+
+/*-*************************************
+ * Hashes
+ ***************************************/
+static const U32 prime3bytes = 506832829U;
+static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
+
+static const U32 prime4bytes = 2654435761U;
+static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+
+static const U64 prime5bytes = 889523592379ULL;
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime7bytes = 58295818150454627ULL;
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+MEM_STATIC FORCE_INLINE_ATTR
+size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
+{
+ switch(mls)
+ {
+ default:
+ case 4: return ZSTD_hash4Ptr(p, hBits);
+ case 5: return ZSTD_hash5Ptr(p, hBits);
+ case 6: return ZSTD_hash6Ptr(p, hBits);
+ case 7: return ZSTD_hash7Ptr(p, hBits);
+ case 8: return ZSTD_hash8Ptr(p, hBits);
+ }
+}
+
+/* ZSTD_ipow() :
+ * Return base^exponent.
+ */
+static U64 ZSTD_ipow(U64 base, U64 exponent)
+{
+ U64 power = 1;
+ while (exponent) {
+ if (exponent & 1) power *= base;
+ exponent >>= 1;
+ base *= base;
+ }
+ return power;
+}
+
+#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
+
+/* ZSTD_rollingHash_append() :
+ * Add the buffer to the hash value.
+ */
+static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
+{
+ BYTE const* istart = (BYTE const*)buf;
+ size_t pos;
+ for (pos = 0; pos < size; ++pos) {
+ hash *= prime8bytes;
+ hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ }
+ return hash;
+}
+
+/* ZSTD_rollingHash_compute() :
+ * Compute the rolling hash value of the buffer.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
+{
+ return ZSTD_rollingHash_append(0, buf, size);
+}
+
+/* ZSTD_rollingHash_primePower() :
+ * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
+ * over a window of length bytes.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
+{
+ return ZSTD_ipow(prime8bytes, length - 1);
+}
+
+/* ZSTD_rollingHash_rotate() :
+ * Rotate the rolling hash by one byte.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
+{
+ hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
+ hash *= prime8bytes;
+ hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ return hash;
+}
+
+/*-*************************************
+* Round buffer management
+***************************************/
+#if (ZSTD_WINDOWLOG_MAX_64 > 31)
+# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
+#endif
+/* Max current allowed */
+#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
+/* Maximum chunk size before overflow correction needs to be called again */
+#define ZSTD_CHUNKSIZE_MAX \
+ ( ((U32)-1) /* Maximum ending current index */ \
+ - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
+
+/*
+ * ZSTD_window_clear():
+ * Clears the window containing the history by simply setting it to empty.
+ */
+MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
+{
+ size_t const endT = (size_t)(window->nextSrc - window->base);
+ U32 const end = (U32)endT;
+
+ window->lowLimit = end;
+ window->dictLimit = end;
+}
+
+/*
+ * ZSTD_window_hasExtDict():
+ * Returns non-zero if the window has a non-empty extDict.
+ */
+MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
+{
+ return window.lowLimit < window.dictLimit;
+}
+
+/*
+ * ZSTD_matchState_dictMode():
+ * Inspects the provided matchState and figures out what dictMode should be
+ * passed to the compressor.
+ */
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+{
+ return ZSTD_window_hasExtDict(ms->window) ?
+ ZSTD_extDict :
+ ms->dictMatchState != NULL ?
+ (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
+ ZSTD_noDict;
+}
+
+/*
+ * ZSTD_window_needOverflowCorrection():
+ * Returns non-zero if the indices are getting too large and need overflow
+ * protection.
+ */
+MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
+ void const* srcEnd)
+{
+ U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
+ return curr > ZSTD_CURRENT_MAX;
+}
+
+/*
+ * ZSTD_window_correctOverflow():
+ * Reduces the indices to protect from index overflow.
+ * Returns the correction made to the indices, which must be applied to every
+ * stored index.
+ *
+ * The least significant cycleLog bits of the indices must remain the same,
+ * which may be 0. Every index up to maxDist in the past must be valid.
+ * NOTE: (maxDist & cycleMask) must be zero.
+ */
+MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
+ U32 maxDist, void const* src)
+{
+ /* preemptive overflow correction:
+ * 1. correction is large enough:
+ * lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
+ * 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
+ *
+ * current - newCurrent
+ * > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
+ * > (3<<29) - (1<<chainLog)
+ * > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
+ * > 1<<29
+ *
+ * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
+ * After correction, current is less than (1<<chainLog + 1<<windowLog).
+ * In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
+ * In 32-bit mode we are safe, because (chainLog <= 29), so
+ * ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
+ * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
+ * windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
+ */
+ U32 const cycleMask = (1U << cycleLog) - 1;
+ U32 const curr = (U32)((BYTE const*)src - window->base);
+ U32 const currentCycle0 = curr & cycleMask;
+ /* Exclude zero so that newCurrent - maxDist >= 1. */
+ U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
+ U32 const newCurrent = currentCycle1 + maxDist;
+ U32 const correction = curr - newCurrent;
+ assert((maxDist & cycleMask) == 0);
+ assert(curr > newCurrent);
+ /* Loose bound, should be around 1<<29 (see above) */
+ assert(correction > 1<<28);
+
+ window->base += correction;
+ window->dictBase += correction;
+ if (window->lowLimit <= correction) window->lowLimit = 1;
+ else window->lowLimit -= correction;
+ if (window->dictLimit <= correction) window->dictLimit = 1;
+ else window->dictLimit -= correction;
+
+ /* Ensure we can still reference the full window. */
+ assert(newCurrent >= maxDist);
+ assert(newCurrent - maxDist >= 1);
+ /* Ensure that lowLimit and dictLimit didn't underflow. */
+ assert(window->lowLimit <= newCurrent);
+ assert(window->dictLimit <= newCurrent);
+
+ DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
+ window->lowLimit);
+ return correction;
+}
+
+/*
+ * ZSTD_window_enforceMaxDist():
+ * Updates lowLimit so that:
+ * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
+ *
+ * It ensures index is valid as long as index >= lowLimit.
+ * This must be called before a block compression call.
+ *
+ * loadedDictEnd is only defined if a dictionary is in use for current compression.
+ * As the name implies, loadedDictEnd represents the index at end of dictionary.
+ * The value lies within context's referential, it can be directly compared to blockEndIdx.
+ *
+ * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
+ * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
+ * This is because dictionaries are allowed to be referenced fully
+ * as long as the last byte of the dictionary is in the window.
+ * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
+ *
+ * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
+ * In dictMatchState mode, lowLimit and dictLimit are the same,
+ * and the dictionary is below them.
+ * forceWindow and dictMatchState are therefore incompatible.
+ */
+MEM_STATIC void
+ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
+ const void* blockEnd,
+ U32 maxDist,
+ U32* loadedDictEndPtr,
+ const ZSTD_matchState_t** dictMatchStatePtr)
+{
+ U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+ U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
+ DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+ (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+
+ /* - When there is no dictionary : loadedDictEnd == 0.
+ In which case, the test (blockEndIdx > maxDist) is merely to avoid
+ overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
+ - When there is a standard dictionary :
+ Index referential is copied from the dictionary,
+ which means it starts from 0.
+ In which case, loadedDictEnd == dictSize,
+ and it makes sense to compare `blockEndIdx > maxDist + dictSize`
+ since `blockEndIdx` also starts from zero.
+ - When there is an attached dictionary :
+ loadedDictEnd is expressed within the referential of the context,
+ so it can be directly compared against blockEndIdx.
+ */
+ if (blockEndIdx > maxDist + loadedDictEnd) {
+ U32 const newLowLimit = blockEndIdx - maxDist;
+ if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
+ if (window->dictLimit < window->lowLimit) {
+ DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
+ (unsigned)window->dictLimit, (unsigned)window->lowLimit);
+ window->dictLimit = window->lowLimit;
+ }
+ /* On reaching window size, dictionaries are invalidated */
+ if (loadedDictEndPtr) *loadedDictEndPtr = 0;
+ if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
+ }
+}
+
+/* Similar to ZSTD_window_enforceMaxDist(),
+ * but only invalidates dictionary
+ * when input progresses beyond window size.
+ * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
+ * loadedDictEnd uses same referential as window->base
+ * maxDist is the window size */
+MEM_STATIC void
+ZSTD_checkDictValidity(const ZSTD_window_t* window,
+ const void* blockEnd,
+ U32 maxDist,
+ U32* loadedDictEndPtr,
+ const ZSTD_matchState_t** dictMatchStatePtr)
+{
+ assert(loadedDictEndPtr != NULL);
+ assert(dictMatchStatePtr != NULL);
+ { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+ U32 const loadedDictEnd = *loadedDictEndPtr;
+ DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+ (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+ assert(blockEndIdx >= loadedDictEnd);
+
+ if (blockEndIdx > loadedDictEnd + maxDist) {
+ /* On reaching window size, dictionaries are invalidated.
+ * For simplification, if window size is reached anywhere within next block,
+ * the dictionary is invalidated for the full block.
+ */
+ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
+ *loadedDictEndPtr = 0;
+ *dictMatchStatePtr = NULL;
+ } else {
+ if (*loadedDictEndPtr != 0) {
+ DEBUGLOG(6, "dictionary considered valid for current block");
+ } } }
+}
+
+MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
+ ZSTD_memset(window, 0, sizeof(*window));
+ window->base = (BYTE const*)"";
+ window->dictBase = (BYTE const*)"";
+ window->dictLimit = 1; /* start from 1, so that 1st position is valid */
+ window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
+ window->nextSrc = window->base + 1; /* see issue #1241 */
+}
+
+/*
+ * ZSTD_window_update():
+ * Updates the window by appending [src, src + srcSize) to the window.
+ * If it is not contiguous, the current prefix becomes the extDict, and we
+ * forget about the extDict. Handles overlap of the prefix and extDict.
+ * Returns non-zero if the segment is contiguous.
+ */
+MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
+ void const* src, size_t srcSize)
+{
+ BYTE const* const ip = (BYTE const*)src;
+ U32 contiguous = 1;
+ DEBUGLOG(5, "ZSTD_window_update");
+ if (srcSize == 0)
+ return contiguous;
+ assert(window->base != NULL);
+ assert(window->dictBase != NULL);
+ /* Check if blocks follow each other */
+ if (src != window->nextSrc) {
+ /* not contiguous */
+ size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
+ DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
+ window->lowLimit = window->dictLimit;
+ assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
+ window->dictLimit = (U32)distanceFromBase;
+ window->dictBase = window->base;
+ window->base = ip - distanceFromBase;
+ /* ms->nextToUpdate = window->dictLimit; */
+ if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
+ contiguous = 0;
+ }
+ window->nextSrc = ip + srcSize;
+ /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
+ if ( (ip+srcSize > window->dictBase + window->lowLimit)
+ & (ip < window->dictBase + window->dictLimit)) {
+ ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
+ U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ window->lowLimit = lowLimitMax;
+ DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
+ }
+ return contiguous;
+}
+
+/*
+ * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
+ */
+MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+{
+ U32 const maxDistance = 1U << windowLog;
+ U32 const lowestValid = ms->window.lowLimit;
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ U32 const isDictionary = (ms->loadedDictEnd != 0);
+ /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
+ * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
+ * valid for the entire block. So this check is sufficient to find the lowest valid match index.
+ */
+ U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
+ return matchLowest;
+}
+
+/*
+ * Returns the lowest allowed match index in the prefix.
+ */
+MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+{
+ U32 const maxDistance = 1U << windowLog;
+ U32 const lowestValid = ms->window.dictLimit;
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ U32 const isDictionary = (ms->loadedDictEnd != 0);
+ /* When computing the lowest prefix index we need to take the dictionary into account to handle
+ * the edge case where the dictionary and the source are contiguous in memory.
+ */
+ U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
+ return matchLowest;
+}
+
+
+
+/* debug functions */
+#if (DEBUGLEVEL>=2)
+
+MEM_STATIC double ZSTD_fWeight(U32 rawStat)
+{
+ U32 const fp_accuracy = 8;
+ U32 const fp_multiplier = (1 << fp_accuracy);
+ U32 const newStat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(newStat);
+ U32 const BWeight = hb * fp_multiplier;
+ U32 const FWeight = (newStat << fp_accuracy) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + fp_accuracy < 31);
+ return (double)weight / fp_multiplier;
+}
+
+/* display a table content,
+ * listing each element, its frequency, and its predicted bit cost */
+MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
+{
+ unsigned u, sum;
+ for (u=0, sum=0; u<=max; u++) sum += table[u];
+ DEBUGLOG(2, "total nb elts: %u", sum);
+ for (u=0; u<=max; u++) {
+ DEBUGLOG(2, "%2u: %5u (%.2f)",
+ u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
+ }
+}
+
+#endif
+
+
+
+/* ===============================================================
+ * Shared internal declarations
+ * These prototypes may be called from sources not in lib/compress
+ * =============================================================== */
+
+/* ZSTD_loadCEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * return : size of dictionary header (size of magic number + dict ID + entropy tables)
+ * assumptions : magic number supposed already checked
+ * and dictSize >= 8 */
+size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
+ const void* const dict, size_t dictSize);
+
+void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
+
+/* ==============================================================
+ * Private declarations
+ * These prototypes shall only be called from within lib/compress
+ * ============================================================== */
+
+/* ZSTD_getCParamsFromCCtxParams() :
+ * cParams are built depending on compressionLevel, src size hints,
+ * LDM and manually set compression parameters.
+ * Note: srcSizeHint == 0 means 0!
+ */
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+
+/*! ZSTD_initCStream_internal() :
+ * Private use only. Init streaming operation.
+ * expects params to be valid.
+ * must receive dict, or cdict, or none, but not both.
+ * @return : 0, or an error code */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
+
+void ZSTD_resetSeqStore(seqStore_t* ssPtr);
+
+/*! ZSTD_getCParamsFromCDict() :
+ * as the name implies */
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
+
+/* ZSTD_compressBegin_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize);
+
+/* ZSTD_compress_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ const ZSTD_CCtx_params* params);
+
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
+
+
+/* ZSTD_referenceExternalSequences() :
+ * Must be called before starting a compression operation.
+ * seqs must parse a prefix of the source.
+ * This cannot be used when long range matching is enabled.
+ * Zstd will use these sequences, and pass the literals to a secondary block
+ * compressor.
+ * @return : An error code on failure.
+ * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
+ * access and data corruption.
+ */
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
+
+/* ZSTD_cycleLog() :
+ * condition for correct operation : hashLog > 1 */
+U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
+
+/* ZSTD_CCtx_trace() :
+ * Trace the end of a compression call.
+ */
+void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
+
+#endif /* ZSTD_COMPRESS_H */
diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c
new file mode 100644
index 00000000000000..655bcda4d1f1c5
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_literals.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ * Dependencies
+ ***************************************/
+#include "zstd_compress_literals.h"
+
+size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+ RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
+
+ switch(flSize)
+ {
+ case 1: /* 2 - 1 - 5 */
+ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
+ break;
+ case 2: /* 2 - 2 - 12 */
+ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
+ break;
+ case 3: /* 2 - 2 - 20 */
+ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
+ break;
+ default: /* not necessary : flSize is {1,2,3} */
+ assert(0);
+ }
+
+ ZSTD_memcpy(ostart + flSize, src, srcSize);
+ DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
+ return srcSize + flSize;
+}
+
+size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+ (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
+
+ switch(flSize)
+ {
+ case 1: /* 2 - 1 - 5 */
+ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
+ break;
+ case 2: /* 2 - 2 - 12 */
+ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
+ break;
+ case 3: /* 2 - 2 - 20 */
+ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
+ break;
+ default: /* not necessary : flSize is {1,2,3} */
+ assert(0);
+ }
+
+ ostart[flSize] = *(const BYTE*)src;
+ DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
+ return flSize+1;
+}
+
+size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy, int disableLiteralCompression,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
+ const int bmi2)
+{
+ size_t const minGain = ZSTD_minGain(srcSize, strategy);
+ size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
+ BYTE* const ostart = (BYTE*)dst;
+ U32 singleStream = srcSize < 256;
+ symbolEncodingType_e hType = set_compressed;
+ size_t cLitSize;
+
+ DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
+ disableLiteralCompression, (U32)srcSize);
+
+ /* Prepare nextEntropy assuming reusing the existing table */
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+ if (disableLiteralCompression)
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+
+ /* small ? don't even attempt compression (speed opt) */
+# define COMPRESS_LITERALS_SIZE_MIN 63
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ }
+
+ RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
+ { HUF_repeat repeat = prevHuf->repeatMode;
+ int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+ if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
+ cLitSize = singleStream ?
+ HUF_compress1X_repeat(
+ ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+ HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
+ (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
+ HUF_compress4X_repeat(
+ ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+ HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
+ (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
+ if (repeat != HUF_repeat_none) {
+ /* reused the existing table */
+ DEBUGLOG(5, "Reusing previous huffman table");
+ hType = set_repeat;
+ }
+ }
+
+ if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ }
+ if (cLitSize==1) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+ }
+
+ if (hType == set_compressed) {
+ /* using a newly constructed table */
+ nextHuf->repeatMode = HUF_repeat_check;
+ }
+
+ /* Build header */
+ switch(lhSize)
+ {
+ case 3: /* 2 - 2 - 10 - 10 */
+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+ MEM_writeLE24(ostart, lhc);
+ break;
+ }
+ case 4: /* 2 - 2 - 14 - 14 */
+ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
+ MEM_writeLE32(ostart, lhc);
+ break;
+ }
+ case 5: /* 2 - 2 - 18 - 18 */
+ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
+ MEM_writeLE32(ostart, lhc);
+ ostart[4] = (BYTE)(cLitSize >> 10);
+ break;
+ }
+ default: /* not possible : lhSize is {3,4,5} */
+ assert(0);
+ }
+ DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
+ return lhSize+cLitSize;
+}
diff --git a/lib/zstd/compress/zstd_compress_literals.h b/lib/zstd/compress/zstd_compress_literals.h
new file mode 100644
index 00000000000000..9904c0cd30a0a1
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_literals.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_LITERALS_H
+#define ZSTD_COMPRESS_LITERALS_H
+
+#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
+
+
+size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy, int disableLiteralCompression,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
+ const int bmi2);
+
+#endif /* ZSTD_COMPRESS_LITERALS_H */
diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c
new file mode 100644
index 00000000000000..dcfcdc9cc5e8da
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_sequences.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ * Dependencies
+ ***************************************/
+#include "zstd_compress_sequences.h"
+
+/*
+ * -log2(x / 256) lookup table for x in [0, 256).
+ * If x == 0: Return 0
+ * Else: Return floor(-log2(x / 256) * 256)
+ */
+static unsigned const kInverseProbabilityLog256[256] = {
+ 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
+ 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
+ 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
+ 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626,
+ 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542,
+ 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473,
+ 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415,
+ 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366,
+ 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322,
+ 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282,
+ 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247,
+ 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215,
+ 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185,
+ 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157,
+ 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132,
+ 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108,
+ 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85,
+ 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64,
+ 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44,
+ 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25,
+ 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7,
+ 5, 4, 2, 1,
+};
+
+static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
+ void const* ptr = ctable;
+ U16 const* u16ptr = (U16 const*)ptr;
+ U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
+ return maxSymbolValue;
+}
+
+/*
+ * Returns true if we should use ncount=-1 else we should
+ * use ncount=1 for low probability symbols instead.
+ */
+static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
+{
+ /* Heuristic: This should cover most blocks <= 16K and
+ * start to fade out after 16K to about 32K depending on
+ * comprssibility.
+ */
+ return nbSeq >= 2048;
+}
+
+/*
+ * Returns the cost in bytes of encoding the normalized count header.
+ * Returns an error if any of the helper functions return an error.
+ */
+static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
+ size_t const nbSeq, unsigned const FSELog)
+{
+ BYTE wksp[FSE_NCOUNTBOUND];
+ S16 norm[MaxSeq + 1];
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
+ return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
+}
+
+/*
+ * Returns the cost in bits of encoding the distribution described by count
+ * using the entropy bound.
+ */
+static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
+{
+ unsigned cost = 0;
+ unsigned s;
+ for (s = 0; s <= max; ++s) {
+ unsigned norm = (unsigned)((256 * count[s]) / total);
+ if (count[s] != 0 && norm == 0)
+ norm = 1;
+ assert(count[s] < total);
+ cost += count[s] * kInverseProbabilityLog256[norm];
+ }
+ return cost >> 8;
+}
+
+/*
+ * Returns the cost in bits of encoding the distribution in count using ctable.
+ * Returns an error if ctable cannot represent all the symbols in count.
+ */
+size_t ZSTD_fseBitCost(
+ FSE_CTable const* ctable,
+ unsigned const* count,
+ unsigned const max)
+{
+ unsigned const kAccuracyLog = 8;
+ size_t cost = 0;
+ unsigned s;
+ FSE_CState_t cstate;
+ FSE_initCState(&cstate, ctable);
+ if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
+ DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
+ ZSTD_getFSEMaxSymbolValue(ctable), max);
+ return ERROR(GENERIC);
+ }
+ for (s = 0; s <= max; ++s) {
+ unsigned const tableLog = cstate.stateLog;
+ unsigned const badCost = (tableLog + 1) << kAccuracyLog;
+ unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
+ if (count[s] == 0)
+ continue;
+ if (bitCost >= badCost) {
+ DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
+ return ERROR(GENERIC);
+ }
+ cost += (size_t)count[s] * bitCost;
+ }
+ return cost >> kAccuracyLog;
+}
+
+/*
+ * Returns the cost in bits of encoding the distribution in count using the
+ * table described by norm. The max symbol support by norm is assumed >= max.
+ * norm must be valid for every symbol with non-zero probability in count.
+ */
+size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+ unsigned const* count, unsigned const max)
+{
+ unsigned const shift = 8 - accuracyLog;
+ size_t cost = 0;
+ unsigned s;
+ assert(accuracyLog <= 8);
+ for (s = 0; s <= max; ++s) {
+ unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
+ unsigned const norm256 = normAcc << shift;
+ assert(norm256 > 0);
+ assert(norm256 < 256);
+ cost += count[s] * kInverseProbabilityLog256[norm256];
+ }
+ return cost >> 8;
+}
+
+symbolEncodingType_e
+ZSTD_selectEncodingType(
+ FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+ size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+ FSE_CTable const* prevCTable,
+ short const* defaultNorm, U32 defaultNormLog,
+ ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_strategy const strategy)
+{
+ ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
+ if (mostFrequent == nbSeq) {
+ *repeatMode = FSE_repeat_none;
+ if (isDefaultAllowed && nbSeq <= 2) {
+ /* Prefer set_basic over set_rle when there are 2 or less symbols,
+ * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+ * If basic encoding isn't possible, always choose RLE.
+ */
+ DEBUGLOG(5, "Selected set_basic");
+ return set_basic;
+ }
+ DEBUGLOG(5, "Selected set_rle");
+ return set_rle;
+ }
+ if (strategy < ZSTD_lazy) {
+ if (isDefaultAllowed) {
+ size_t const staticFse_nbSeq_max = 1000;
+ size_t const mult = 10 - strategy;
+ size_t const baseLog = 3;
+ size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
+ assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */
+ assert(mult <= 9 && mult >= 7);
+ if ( (*repeatMode == FSE_repeat_valid)
+ && (nbSeq < staticFse_nbSeq_max) ) {
+ DEBUGLOG(5, "Selected set_repeat");
+ return set_repeat;
+ }
+ if ( (nbSeq < dynamicFse_nbSeq_min)
+ || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
+ DEBUGLOG(5, "Selected set_basic");
+ /* The format allows default tables to be repeated, but it isn't useful.
+ * When using simple heuristics to select encoding type, we don't want
+ * to confuse these tables with dictionaries. When running more careful
+ * analysis, we don't need to waste time checking both repeating tables
+ * and default tables.
+ */
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ }
+ } else {
+ size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
+ size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
+ size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
+ size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
+
+ if (isDefaultAllowed) {
+ assert(!ZSTD_isError(basicCost));
+ assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
+ }
+ assert(!ZSTD_isError(NCountCost));
+ assert(compressedCost < ERROR(maxCode));
+ DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
+ (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
+ if (basicCost <= repeatCost && basicCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_basic");
+ assert(isDefaultAllowed);
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ if (repeatCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_repeat");
+ assert(!ZSTD_isError(repeatCost));
+ return set_repeat;
+ }
+ assert(compressedCost < basicCost && compressedCost < repeatCost);
+ }
+ DEBUGLOG(5, "Selected set_compressed");
+ *repeatMode = FSE_repeat_check;
+ return set_compressed;
+}
+
+typedef struct {
+ S16 norm[MaxSeq + 1];
+ U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
+} ZSTD_BuildCTableWksp;
+
+size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+ FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ unsigned* count, U32 max,
+ const BYTE* codeTable, size_t nbSeq,
+ const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ const FSE_CTable* prevCTable, size_t prevCTableSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize)
+{
+ BYTE* op = (BYTE*)dst;
+ const BYTE* const oend = op + dstCapacity;
+ DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
+
+ switch (type) {
+ case set_rle:
+ FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
+ RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
+ *op = codeTable[0];
+ return 1;
+ case set_repeat:
+ ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
+ return 0;
+ case set_basic:
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
+ return 0;
+ case set_compressed: {
+ ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
+ size_t nbSeq_1 = nbSeq;
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+ if (count[codeTable[nbSeq-1]] > 1) {
+ count[codeTable[nbSeq-1]]--;
+ nbSeq_1--;
+ }
+ assert(nbSeq_1 > 1);
+ assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
+ (void)entropyWorkspaceSize;
+ FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
+ { size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog); /* overflow protected */
+ FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
+ return NCountSize;
+ }
+ }
+ default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
+ }
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_encodeSequences_body(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ BIT_CStream_t blockStream;
+ FSE_CState_t stateMatchLength;
+ FSE_CState_t stateOffsetBits;
+ FSE_CState_t stateLitLength;
+
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
+ dstSize_tooSmall, "not enough space remaining");
+ DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
+ (int)(blockStream.endPtr - blockStream.startPtr),
+ (unsigned)dstCapacity);
+
+ /* first symbols */
+ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
+ FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
+ FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
+ if (longOffsets) {
+ U32 const ofBits = ofCodeTable[nbSeq-1];
+ unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+ if (extraBits) {
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
+ BIT_flushBits(&blockStream);
+ }
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
+ ofBits - extraBits);
+ } else {
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
+ }
+ BIT_flushBits(&blockStream);
+
+ { size_t n;
+ for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
+ BYTE const llCode = llCodeTable[n];
+ BYTE const ofCode = ofCodeTable[n];
+ BYTE const mlCode = mlCodeTable[n];
+ U32 const llBits = LL_bits[llCode];
+ U32 const ofBits = ofCode;
+ U32 const mlBits = ML_bits[mlCode];
+ DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
+ (unsigned)sequences[n].litLength,
+ (unsigned)sequences[n].matchLength + MINMATCH,
+ (unsigned)sequences[n].offset);
+ /* 32b*/ /* 64b*/
+ /* (7)*/ /* (7)*/
+ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
+ FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
+ FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
+ if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_flushBits(&blockStream); /* (7)*/
+ BIT_addBits(&blockStream, sequences[n].litLength, llBits);
+ if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
+ BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
+ if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
+ if (longOffsets) {
+ unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+ if (extraBits) {
+ BIT_addBits(&blockStream, sequences[n].offset, extraBits);
+ BIT_flushBits(&blockStream); /* (7)*/
+ }
+ BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
+ ofBits - extraBits); /* 31 */
+ } else {
+ BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
+ }
+ BIT_flushBits(&blockStream); /* (7)*/
+ DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
+ } }
+
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
+ FSE_flushCState(&blockStream, &stateMatchLength);
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
+ FSE_flushCState(&blockStream, &stateOffsetBits);
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
+ FSE_flushCState(&blockStream, &stateLitLength);
+
+ { size_t const streamSize = BIT_closeCStream(&blockStream);
+ RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
+ return streamSize;
+ }
+}
+
+static size_t
+ZSTD_encodeSequences_default(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ return ZSTD_encodeSequences_body(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
+
+
+#if DYNAMIC_BMI2
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_encodeSequences_bmi2(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ return ZSTD_encodeSequences_body(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
+
+#endif
+
+size_t ZSTD_encodeSequences(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
+{
+ DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+ }
+#endif
+ (void)bmi2;
+ return ZSTD_encodeSequences_default(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h
new file mode 100644
index 00000000000000..7991364c2f71ff
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_sequences.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_SEQUENCES_H
+#define ZSTD_COMPRESS_SEQUENCES_H
+
+#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
+#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
+
+typedef enum {
+ ZSTD_defaultDisallowed = 0,
+ ZSTD_defaultAllowed = 1
+} ZSTD_defaultPolicy_e;
+
+symbolEncodingType_e
+ZSTD_selectEncodingType(
+ FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+ size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+ FSE_CTable const* prevCTable,
+ short const* defaultNorm, U32 defaultNormLog,
+ ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_strategy const strategy);
+
+size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+ FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ unsigned* count, U32 max,
+ const BYTE* codeTable, size_t nbSeq,
+ const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ const FSE_CTable* prevCTable, size_t prevCTableSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize);
+
+size_t ZSTD_encodeSequences(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
+
+size_t ZSTD_fseBitCost(
+ FSE_CTable const* ctable,
+ unsigned const* count,
+ unsigned const max);
+
+size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+ unsigned const* count, unsigned const max);
+#endif /* ZSTD_COMPRESS_SEQUENCES_H */
diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
new file mode 100644
index 00000000000000..ee03e0aedb0304
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_superblock.c
@@ -0,0 +1,850 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ * Dependencies
+ ***************************************/
+#include "zstd_compress_superblock.h"
+
+#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
+#include "hist.h" /* HIST_countFast_wksp */
+#include "zstd_compress_internal.h"
+#include "zstd_compress_sequences.h"
+#include "zstd_compress_literals.h"
+
+/*-*************************************
+* Superblock entropy buffer structs
+***************************************/
+/* ZSTD_hufCTablesMetadata_t :
+ * Stores Literals Block Type for a super-block in hType, and
+ * huffman tree description in hufDesBuffer.
+ * hufDesSize refers to the size of huffman tree description in bytes.
+ * This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
+typedef struct {
+ symbolEncodingType_e hType;
+ BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
+ size_t hufDesSize;
+} ZSTD_hufCTablesMetadata_t;
+
+/* ZSTD_fseCTablesMetadata_t :
+ * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
+ * fse tables in fseTablesBuffer.
+ * fseTablesSize refers to the size of fse tables in bytes.
+ * This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */
+typedef struct {
+ symbolEncodingType_e llType;
+ symbolEncodingType_e ofType;
+ symbolEncodingType_e mlType;
+ BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
+ size_t fseTablesSize;
+ size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
+} ZSTD_fseCTablesMetadata_t;
+
+typedef struct {
+ ZSTD_hufCTablesMetadata_t hufMetadata;
+ ZSTD_fseCTablesMetadata_t fseMetadata;
+} ZSTD_entropyCTablesMetadata_t;
+
+
+/* ZSTD_buildSuperBlockEntropy_literal() :
+ * Builds entropy for the super-block literals.
+ * Stores literals block type (raw, rle, compressed, repeat) and
+ * huffman description table to hufMetadata.
+ * @return : size of huffman description table or error code */
+static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const int disableLiteralsCompression,
+ void* workspace, size_t wkspSize)
+{
+ BYTE* const wkspStart = (BYTE*)workspace;
+ BYTE* const wkspEnd = wkspStart + wkspSize;
+ BYTE* const countWkspStart = wkspStart;
+ unsigned* const countWksp = (unsigned*)workspace;
+ const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
+ BYTE* const nodeWksp = countWkspStart + countWkspSize;
+ const size_t nodeWkspSize = wkspEnd-nodeWksp;
+ unsigned maxSymbolValue = 255;
+ unsigned huffLog = HUF_TABLELOG_DEFAULT;
+ HUF_repeat repeat = prevHuf->repeatMode;
+
+ DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize);
+
+ /* Prepare nextEntropy assuming reusing the existing table */
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+ if (disableLiteralsCompression) {
+ DEBUGLOG(5, "set_basic - disabled");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+
+ /* small ? don't even attempt compression (speed opt) */
+# define COMPRESS_LITERALS_SIZE_MIN 63
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ if (srcSize <= minLitSize) {
+ DEBUGLOG(5, "set_basic - too small");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ }
+
+ /* Scan input and build symbol stats */
+ { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
+ FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
+ if (largest == srcSize) {
+ DEBUGLOG(5, "set_rle");
+ hufMetadata->hType = set_rle;
+ return 0;
+ }
+ if (largest <= (srcSize >> 7)+4) {
+ DEBUGLOG(5, "set_basic - no gain");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ }
+
+ /* Validate the previous Huffman table */
+ if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
+ repeat = HUF_repeat_none;
+ }
+
+ /* Build Huffman Tree */
+ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
+ maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
+ huffLog = (U32)maxBits;
+ { /* Build and write the CTable */
+ size_t const newCSize = HUF_estimateCompressedSize(
+ (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
+ size_t const hSize = HUF_writeCTable_wksp(
+ hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
+ (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ /* Check against repeating the previous CTable */
+ if (repeat != HUF_repeat_none) {
+ size_t const oldCSize = HUF_estimateCompressedSize(
+ (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
+ if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
+ DEBUGLOG(5, "set_repeat - smaller");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_repeat;
+ return 0;
+ }
+ }
+ if (newCSize + hSize >= srcSize) {
+ DEBUGLOG(5, "set_basic - no gains");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
+ hufMetadata->hType = set_compressed;
+ nextHuf->repeatMode = HUF_repeat_check;
+ return hSize;
+ }
+ }
+}
+
+/* ZSTD_buildSuperBlockEntropy_sequences() :
+ * Builds entropy for the super-block sequences.
+ * Stores symbol compression modes and fse table to fseMetadata.
+ * @return : size of fse tables or error code */
+static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,
+ const ZSTD_fseCTables_t* prevEntropy,
+ ZSTD_fseCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize)
+{
+ BYTE* const wkspStart = (BYTE*)workspace;
+ BYTE* const wkspEnd = wkspStart + wkspSize;
+ BYTE* const countWkspStart = wkspStart;
+ unsigned* const countWksp = (unsigned*)workspace;
+ const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);
+ BYTE* const cTableWksp = countWkspStart + countWkspSize;
+ const size_t cTableWkspSize = wkspEnd-cTableWksp;
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ BYTE* const ostart = fseMetadata->fseTablesBuffer;
+ BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
+ BYTE* op = ostart;
+
+ assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));
+ DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq);
+ ZSTD_memset(workspace, 0, wkspSize);
+
+ fseMetadata->lastCountSize = 0;
+ /* convert length/distances into codes */
+ ZSTD_seqToCodes(seqStorePtr);
+ /* build CTable for Literal Lengths */
+ { U32 LLtype;
+ unsigned max = MaxLL;
+ size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ DEBUGLOG(5, "Building LL table");
+ nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
+ LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
+ countWksp, max, mostFrequent, nbSeq,
+ LLFSELog, prevEntropy->litlengthCTable,
+ LL_defaultNorm, LL_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(set_basic < set_compressed && set_rle < set_compressed);
+ assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
+ countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
+ cTableWksp, cTableWkspSize);
+ FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
+ if (LLtype == set_compressed)
+ fseMetadata->lastCountSize = countSize;
+ op += countSize;
+ fseMetadata->llType = (symbolEncodingType_e) LLtype;
+ } }
+ /* build CTable for Offsets */
+ { U32 Offtype;
+ unsigned max = MaxOff;
+ size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
+ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ DEBUGLOG(5, "Building OF table");
+ nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
+ Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
+ countWksp, max, mostFrequent, nbSeq,
+ OffFSELog, prevEntropy->offcodeCTable,
+ OF_defaultNorm, OF_defaultNormLog,
+ defaultPolicy, strategy);
+ assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
+ countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
+ cTableWksp, cTableWkspSize);
+ FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
+ if (Offtype == set_compressed)
+ fseMetadata->lastCountSize = countSize;
+ op += countSize;
+ fseMetadata->ofType = (symbolEncodingType_e) Offtype;
+ } }
+ /* build CTable for MatchLengths */
+ { U32 MLtype;
+ unsigned max = MaxML;
+ size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
+ nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
+ MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
+ countWksp, max, mostFrequent, nbSeq,
+ MLFSELog, prevEntropy->matchlengthCTable,
+ ML_defaultNorm, ML_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
+ countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
+ prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
+ cTableWksp, cTableWkspSize);
+ FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
+ if (MLtype == set_compressed)
+ fseMetadata->lastCountSize = countSize;
+ op += countSize;
+ fseMetadata->mlType = (symbolEncodingType_e) MLtype;
+ } }
+ assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));
+ return op-ostart;
+}
+
+
+/* ZSTD_buildSuperBlockEntropy() :
+ * Builds entropy for the super-block.
+ * @return : 0 on success or error code */
+static size_t
+ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize)
+{
+ size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
+ DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy");
+ entropyMetadata->hufMetadata.hufDesSize =
+ ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,
+ &prevEntropy->huf, &nextEntropy->huf,
+ &entropyMetadata->hufMetadata,
+ ZSTD_disableLiteralsCompression(cctxParams),
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed");
+ entropyMetadata->fseMetadata.fseTablesSize =
+ ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,
+ &prevEntropy->fse, &nextEntropy->fse,
+ cctxParams,
+ &entropyMetadata->fseMetadata,
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed");
+ return 0;
+}
+
+/* ZSTD_compressSubBlock_literal() :
+ * Compresses literals section for a sub-block.
+ * When we have to write the Huffman table we will sometimes choose a header
+ * size larger than necessary. This is because we have to pick the header size
+ * before we know the table size + compressed size, so we have a bound on the
+ * table size. If we guessed incorrectly, we fall back to uncompressed literals.
+ *
+ * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
+ * in writing the header, otherwise it is set to 0.
+ *
+ * hufMetadata->hType has literals block type info.
+ * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
+ * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
+ * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
+ * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
+ * and the following sub-blocks' literals sections will be Treeless_Literals_Block.
+ * @return : compressed size of literals section of a sub-block
+ * Or 0 if it unable to compress.
+ * Or error code */
+static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const BYTE* literals, size_t litSize,
+ void* dst, size_t dstSize,
+ const int bmi2, int writeEntropy, int* entropyWritten)
+{
+ size_t const header = writeEntropy ? 200 : 0;
+ size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart + lhSize;
+ U32 const singleStream = lhSize == 3;
+ symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
+ size_t cLitSize = 0;
+
+ (void)bmi2; /* TODO bmi2... */
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
+
+ *entropyWritten = 0;
+ if (litSize == 0 || hufMetadata->hType == set_basic) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
+ return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
+ } else if (hufMetadata->hType == set_rle) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
+ return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
+ }
+
+ assert(litSize > 0);
+ assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
+
+ if (writeEntropy && hufMetadata->hType == set_compressed) {
+ ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
+ op += hufMetadata->hufDesSize;
+ cLitSize += hufMetadata->hufDesSize;
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
+ }
+
+ /* TODO bmi2 */
+ { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
+ : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
+ op += cSize;
+ cLitSize += cSize;
+ if (cSize == 0 || ERR_isError(cSize)) {
+ DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
+ return 0;
+ }
+ /* If we expand and we aren't writing a header then emit uncompressed */
+ if (!writeEntropy && cLitSize >= litSize) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
+ return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
+ }
+ /* If we are writing headers then allow expansion that doesn't change our header size. */
+ if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
+ assert(cLitSize > litSize);
+ DEBUGLOG(5, "Literals expanded beyond allowed header size");
+ return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
+ }
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
+ }
+
+ /* Build header */
+ switch(lhSize)
+ {
+ case 3: /* 2 - 2 - 10 - 10 */
+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
+ MEM_writeLE24(ostart, lhc);
+ break;
+ }
+ case 4: /* 2 - 2 - 14 - 14 */
+ { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
+ MEM_writeLE32(ostart, lhc);
+ break;
+ }
+ case 5: /* 2 - 2 - 18 - 18 */
+ { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
+ MEM_writeLE32(ostart, lhc);
+ ostart[4] = (BYTE)(cLitSize >> 10);
+ break;
+ }
+ default: /* not possible : lhSize is {3,4,5} */
+ assert(0);
+ }
+ *entropyWritten = 1;
+ DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
+ return op-ostart;
+}
+
+static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
+ const seqDef* const sstart = sequences;
+ const seqDef* const send = sequences + nbSeq;
+ const seqDef* sp = sstart;
+ size_t matchLengthSum = 0;
+ size_t litLengthSum = 0;
+ while (send-sp > 0) {
+ ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
+ litLengthSum += seqLen.litLength;
+ matchLengthSum += seqLen.matchLength;
+ sp++;
+ }
+ assert(litLengthSum <= litSize);
+ if (!lastSequence) {
+ assert(litLengthSum == litSize);
+ }
+ return matchLengthSum + litSize;
+}
+
+/* ZSTD_compressSubBlock_sequences() :
+ * Compresses sequences section for a sub-block.
+ * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
+ * symbol compression modes for the super-block.
+ * The first successfully compressed block will have these in its header.
+ * We set entropyWritten=1 when we succeed in compressing the sequences.
+ * The following sub-blocks will always have repeat mode.
+ * @return : compressed size of sequences section of a sub-block
+ * Or 0 if it is unable to compress
+ * Or error code. */
+static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ const seqDef* sequences, size_t nbSeq,
+ const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const int bmi2, int writeEntropy, int* entropyWritten)
+{
+ const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ BYTE* seqHead;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
+
+ *entropyWritten = 0;
+ /* Sequences Header */
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+ dstSize_tooSmall, "");
+ if (nbSeq < 0x7F)
+ *op++ = (BYTE)nbSeq;
+ else if (nbSeq < LONGNBSEQ)
+ op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
+ else
+ op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+ if (nbSeq==0) {
+ return op - ostart;
+ }
+
+ /* seqHead : flags for FSE encoding type */
+ seqHead = op++;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
+
+ if (writeEntropy) {
+ const U32 LLtype = fseMetadata->llType;
+ const U32 Offtype = fseMetadata->ofType;
+ const U32 MLtype = fseMetadata->mlType;
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
+ *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+ ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
+ op += fseMetadata->fseTablesSize;
+ } else {
+ const U32 repeat = set_repeat;
+ *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
+ }
+
+ { size_t const bitstreamSize = ZSTD_encodeSequences(
+ op, oend - op,
+ fseTables->matchlengthCTable, mlCode,
+ fseTables->offcodeCTable, ofCode,
+ fseTables->litlengthCTable, llCode,
+ sequences, nbSeq,
+ longOffsets, bmi2);
+ FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
+ op += bitstreamSize;
+ /* zstd versions <= 1.3.4 mistakenly report corruption when
+ * FSE_readNCount() receives a buffer < 4 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1146.
+ * This can happen when the last set_compressed table present is 2
+ * bytes and the bitstream is only one byte.
+ * In this exceedingly rare case, we will simply emit an uncompressed
+ * block, since it isn't worth optimizing.
+ */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
+ /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(fseMetadata->lastCountSize + bitstreamSize == 3);
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+ "emitting an uncompressed block.");
+ return 0;
+ }
+#endif
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
+ }
+
+ /* zstd versions <= 1.4.0 mistakenly report error when
+ * sequences section body size is less than 3 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1664.
+ * This can happen when the previous sequences section block is compressed
+ * with rle mode and the current block's sequences section is compressed
+ * with repeat mode where sequences section body size can be 1 byte.
+ */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (op-seqHead < 4) {
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
+ "an uncompressed block when sequences are < 4 bytes");
+ return 0;
+ }
+#endif
+
+ *entropyWritten = 1;
+ return op - ostart;
+}
+
+/* ZSTD_compressSubBlock() :
+ * Compresses a single sub-block.
+ * @return : compressed size of the sub-block
+ * Or 0 if it failed to compress. */
+static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ const seqDef* sequences, size_t nbSeq,
+ const BYTE* literals, size_t litSize,
+ const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const int bmi2,
+ int writeLitEntropy, int writeSeqEntropy,
+ int* litEntropyWritten, int* seqEntropyWritten,
+ U32 lastBlock)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart + ZSTD_blockHeaderSize;
+ DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
+ litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
+ { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
+ &entropyMetadata->hufMetadata, literals, litSize,
+ op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
+ FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
+ if (cLitSize == 0) return 0;
+ op += cLitSize;
+ }
+ { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
+ &entropyMetadata->fseMetadata,
+ sequences, nbSeq,
+ llCode, mlCode, ofCode,
+ cctxParams,
+ op, oend-op,
+ bmi2, writeSeqEntropy, seqEntropyWritten);
+ FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
+ if (cSeqSize == 0) return 0;
+ op += cSeqSize;
+ }
+ /* Write block header */
+ { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(ostart, cBlockHeader24);
+ }
+ return op-ostart;
+}
+
+static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
+ const ZSTD_hufCTables_t* huf,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ unsigned maxSymbolValue = 255;
+ size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
+
+ if (hufMetadata->hType == set_basic) return litSize;
+ else if (hufMetadata->hType == set_rle) return 1;
+ else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
+ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
+ if (ZSTD_isError(largest)) return litSize;
+ { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
+ if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
+ return cLitSizeEstimate + literalSectionHeaderSize;
+ } }
+ assert(0); /* impossible */
+ return 0;
+}
+
+static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
+ const BYTE* codeTable, unsigned maxCode,
+ size_t nbSeq, const FSE_CTable* fseCTable,
+ const U32* additionalBits,
+ short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ void* workspace, size_t wkspSize)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ const BYTE* ctp = codeTable;
+ const BYTE* const ctStart = ctp;
+ const BYTE* const ctEnd = ctStart + nbSeq;
+ size_t cSymbolTypeSizeEstimateInBits = 0;
+ unsigned max = maxCode;
+
+ HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ if (type == set_basic) {
+ /* We selected this encoding type, so it must be valid. */
+ assert(max <= defaultMax);
+ cSymbolTypeSizeEstimateInBits = max <= defaultMax
+ ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
+ : ERROR(GENERIC);
+ } else if (type == set_rle) {
+ cSymbolTypeSizeEstimateInBits = 0;
+ } else if (type == set_compressed || type == set_repeat) {
+ cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
+ }
+ if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
+ while (ctp < ctEnd) {
+ if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
+ else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
+ ctp++;
+ }
+ return cSymbolTypeSizeEstimateInBits / 8;
+}
+
+static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
+ size_t cSeqSizeEstimate = 0;
+ cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
+ nbSeq, fseTables->offcodeCTable, NULL,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
+ nbSeq, fseTables->litlengthCTable, LL_bits,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
+ nbSeq, fseTables->matchlengthCTable, ML_bits,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ workspace, wkspSize);
+ if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
+ return cSeqSizeEstimate + sequencesSectionHeaderSize;
+}
+
+static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
+ const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize,
+ int writeLitEntropy, int writeSeqEntropy) {
+ size_t cSizeEstimate = 0;
+ cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
+ cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
+ workspace, wkspSize, writeSeqEntropy);
+ return cSizeEstimate + ZSTD_blockHeaderSize;
+}
+
+static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
+{
+ if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
+ return 1;
+ if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
+ return 1;
+ if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
+ return 1;
+ return 0;
+}
+
+/* ZSTD_compressSubBlock_multi() :
+ * Breaks super-block into multiple sub-blocks and compresses them.
+ * Entropy will be written to the first block.
+ * The following blocks will use repeat mode to compress.
+ * All sub-blocks are compressed blocks (no raw or rle blocks).
+ * @return : compressed size of the super block (which is multiple ZSTD blocks)
+ * Or 0 if it failed to compress. */
+static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
+ const ZSTD_compressedBlockState_t* prevCBlock,
+ ZSTD_compressedBlockState_t* nextCBlock,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const int bmi2, U32 lastBlock,
+ void* workspace, size_t wkspSize)
+{
+ const seqDef* const sstart = seqStorePtr->sequencesStart;
+ const seqDef* const send = seqStorePtr->sequences;
+ const seqDef* sp = sstart;
+ const BYTE* const lstart = seqStorePtr->litStart;
+ const BYTE* const lend = seqStorePtr->lit;
+ const BYTE* lp = lstart;
+ BYTE const* ip = (BYTE const*)src;
+ BYTE const* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ const BYTE* llCodePtr = seqStorePtr->llCode;
+ const BYTE* mlCodePtr = seqStorePtr->mlCode;
+ const BYTE* ofCodePtr = seqStorePtr->ofCode;
+ size_t targetCBlockSize = cctxParams->targetCBlockSize;
+ size_t litSize, seqCount;
+ int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
+ int writeSeqEntropy = 1;
+ int lastSequence = 0;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
+ (unsigned)(lend-lp), (unsigned)(send-sstart));
+
+ litSize = 0;
+ seqCount = 0;
+ do {
+ size_t cBlockSizeEstimate = 0;
+ if (sstart == send) {
+ lastSequence = 1;
+ } else {
+ const seqDef* const sequence = sp + seqCount;
+ lastSequence = sequence == send - 1;
+ litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
+ seqCount++;
+ }
+ if (lastSequence) {
+ assert(lp <= lend);
+ assert(litSize <= (size_t)(lend - lp));
+ litSize = (size_t)(lend - lp);
+ }
+ /* I think there is an optimization opportunity here.
+ * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
+ * since it recalculates estimate from scratch.
+ * For example, it would recount literal distribution and symbol codes everytime.
+ */
+ cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
+ &nextCBlock->entropy, entropyMetadata,
+ workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
+ if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
+ int litEntropyWritten = 0;
+ int seqEntropyWritten = 0;
+ const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
+ const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
+ sp, seqCount,
+ lp, litSize,
+ llCodePtr, mlCodePtr, ofCodePtr,
+ cctxParams,
+ op, oend-op,
+ bmi2, writeLitEntropy, writeSeqEntropy,
+ &litEntropyWritten, &seqEntropyWritten,
+ lastBlock && lastSequence);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
+ if (cSize > 0 && cSize < decompressedSize) {
+ DEBUGLOG(5, "Committed the sub-block");
+ assert(ip + decompressedSize <= iend);
+ ip += decompressedSize;
+ sp += seqCount;
+ lp += litSize;
+ op += cSize;
+ llCodePtr += seqCount;
+ mlCodePtr += seqCount;
+ ofCodePtr += seqCount;
+ litSize = 0;
+ seqCount = 0;
+ /* Entropy only needs to be written once */
+ if (litEntropyWritten) {
+ writeLitEntropy = 0;
+ }
+ if (seqEntropyWritten) {
+ writeSeqEntropy = 0;
+ }
+ }
+ }
+ } while (!lastSequence);
+ if (writeLitEntropy) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
+ ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
+ }
+ if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
+ /* If we haven't written our entropy tables, then we've violated our contract and
+ * must emit an uncompressed block.
+ */
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
+ return 0;
+ }
+ if (ip < iend) {
+ size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ assert(cSize != 0);
+ op += cSize;
+ /* We have to regenerate the repcodes because we've skipped some sequences */
+ if (sp < send) {
+ seqDef const* seq;
+ repcodes_t rep;
+ ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
+ for (seq = sstart; seq < sp; ++seq) {
+ rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
+ }
+ ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
+ }
+ }
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
+ return op-ostart;
+}
+
+size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ void const* src, size_t srcSize,
+ unsigned lastBlock) {
+ ZSTD_entropyCTablesMetadata_t entropyMetadata;
+
+ FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore,
+ &zc->blockState.prevCBlock->entropy,
+ &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ &entropyMetadata,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+
+ return ZSTD_compressSubBlock_multi(&zc->seqStore,
+ zc->blockState.prevCBlock,
+ zc->blockState.nextCBlock,
+ &entropyMetadata,
+ &zc->appliedParams,
+ dst, dstCapacity,
+ src, srcSize,
+ zc->bmi2, lastBlock,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
+}
diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
new file mode 100644
index 00000000000000..224ece79546ebb
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_superblock.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_ADVANCED_H
+#define ZSTD_COMPRESS_ADVANCED_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+
+#include <linux/zstd.h> /* ZSTD_CCtx */
+
+/*-*************************************
+* Target Compressed Block Size
+***************************************/
+
+/* ZSTD_compressSuperBlock() :
+ * Used to compress a super block when targetCBlockSize is being used.
+ * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
+size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ void const* src, size_t srcSize,
+ unsigned lastBlock);
+
+#endif /* ZSTD_COMPRESS_ADVANCED_H */
diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
new file mode 100644
index 00000000000000..98e359adf5d449
--- /dev/null
+++ b/lib/zstd/compress/zstd_cwksp.h
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CWKSP_H
+#define ZSTD_CWKSP_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "../common/zstd_internal.h"
+
+
+/*-*************************************
+* Constants
+***************************************/
+
+/* Since the workspace is effectively its own little malloc implementation /
+ * arena, when we run under ASAN, we should similarly insert redzones between
+ * each internal element of the workspace, so ASAN will catch overruns that
+ * reach outside an object but that stay inside the workspace.
+ *
+ * This defines the size of that redzone.
+ */
+#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
+#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
+#endif
+
+/*-*************************************
+* Structures
+***************************************/
+typedef enum {
+ ZSTD_cwksp_alloc_objects,
+ ZSTD_cwksp_alloc_buffers,
+ ZSTD_cwksp_alloc_aligned
+} ZSTD_cwksp_alloc_phase_e;
+
+/*
+ * Used to describe whether the workspace is statically allocated (and will not
+ * necessarily ever be freed), or if it's dynamically allocated and we can
+ * expect a well-formed caller to free this.
+ */
+typedef enum {
+ ZSTD_cwksp_dynamic_alloc,
+ ZSTD_cwksp_static_alloc
+} ZSTD_cwksp_static_alloc_e;
+
+/*
+ * Zstd fits all its internal datastructures into a single continuous buffer,
+ * so that it only needs to perform a single OS allocation (or so that a buffer
+ * can be provided to it and it can perform no allocations at all). This buffer
+ * is called the workspace.
+ *
+ * Several optimizations complicate that process of allocating memory ranges
+ * from this workspace for each internal datastructure:
+ *
+ * - These different internal datastructures have different setup requirements:
+ *
+ * - The static objects need to be cleared once and can then be trivially
+ * reused for each compression.
+ *
+ * - Various buffers don't need to be initialized at all--they are always
+ * written into before they're read.
+ *
+ * - The matchstate tables have a unique requirement that they don't need
+ * their memory to be totally cleared, but they do need the memory to have
+ * some bound, i.e., a guarantee that all values in the memory they've been
+ * allocated is less than some maximum value (which is the starting value
+ * for the indices that they will then use for compression). When this
+ * guarantee is provided to them, they can use the memory without any setup
+ * work. When it can't, they have to clear the area.
+ *
+ * - These buffers also have different alignment requirements.
+ *
+ * - We would like to reuse the objects in the workspace for multiple
+ * compressions without having to perform any expensive reallocation or
+ * reinitialization work.
+ *
+ * - We would like to be able to efficiently reuse the workspace across
+ * multiple compressions **even when the compression parameters change** and
+ * we need to resize some of the objects (where possible).
+ *
+ * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
+ * abstraction was created. It works as follows:
+ *
+ * Workspace Layout:
+ *
+ * [ ... workspace ... ]
+ * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
+ *
+ * The various objects that live in the workspace are divided into the
+ * following categories, and are allocated separately:
+ *
+ * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
+ * so that literally everything fits in a single buffer. Note: if present,
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
+ * CDict}() rely on a pointer comparison to see whether one or two frees are
+ * required.
+ *
+ * - Fixed size objects: these are fixed-size, fixed-count objects that are
+ * nonetheless "dynamically" allocated in the workspace so that we can
+ * control how they're initialized separately from the broader ZSTD_CCtx.
+ * Examples:
+ * - Entropy Workspace
+ * - 2 x ZSTD_compressedBlockState_t
+ * - CDict dictionary contents
+ *
+ * - Tables: these are any of several different datastructures (hash tables,
+ * chain tables, binary trees) that all respect a common format: they are
+ * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
+ * Their sizes depend on the cparams.
+ *
+ * - Aligned: these buffers are used for various purposes that require 4 byte
+ * alignment, but don't require any initialization before they're used.
+ *
+ * - Buffers: these buffers are used for various purposes that don't require
+ * any alignment or initialization before they're used. This means they can
+ * be moved around at no cost for a new compression.
+ *
+ * Allocating Memory:
+ *
+ * The various types of objects must be allocated in order, so they can be
+ * correctly packed into the workspace buffer. That order is:
+ *
+ * 1. Objects
+ * 2. Buffers
+ * 3. Aligned
+ * 4. Tables
+ *
+ * Attempts to reserve objects of different types out of order will fail.
+ */
+typedef struct {
+ void* workspace;
+ void* workspaceEnd;
+
+ void* objectEnd;
+ void* tableEnd;
+ void* tableValidEnd;
+ void* allocStart;
+
+ BYTE allocFailed;
+ int workspaceOversizedDuration;
+ ZSTD_cwksp_alloc_phase_e phase;
+ ZSTD_cwksp_static_alloc_e isStatic;
+} ZSTD_cwksp;
+
+/*-*************************************
+* Functions
+***************************************/
+
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
+
+MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
+ (void)ws;
+ assert(ws->workspace <= ws->objectEnd);
+ assert(ws->objectEnd <= ws->tableEnd);
+ assert(ws->objectEnd <= ws->tableValidEnd);
+ assert(ws->tableEnd <= ws->allocStart);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ assert(ws->allocStart <= ws->workspaceEnd);
+}
+
+/*
+ * Align must be a power of 2.
+ */
+MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
+ size_t const mask = align - 1;
+ assert((align & mask) == 0);
+ return (size + mask) & ~mask;
+}
+
+/*
+ * Use this to determine how much space in the workspace we will consume to
+ * allocate this object. (Normally it should be exactly the size of the object,
+ * but under special conditions, like ASAN, where we pad each object, it might
+ * be larger.)
+ *
+ * Since tables aren't currently redzoned, you don't need to call through this
+ * to figure out how much space you need for the matchState tables. Everything
+ * else is though.
+ */
+MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
+ if (size == 0)
+ return 0;
+ return size;
+}
+
+MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
+ ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
+ assert(phase >= ws->phase);
+ if (phase > ws->phase) {
+ if (ws->phase < ZSTD_cwksp_alloc_buffers &&
+ phase >= ZSTD_cwksp_alloc_buffers) {
+ ws->tableValidEnd = ws->objectEnd;
+ }
+ if (ws->phase < ZSTD_cwksp_alloc_aligned &&
+ phase >= ZSTD_cwksp_alloc_aligned) {
+ /* If unaligned allocations down from a too-large top have left us
+ * unaligned, we need to realign our alloc ptr. Technically, this
+ * can consume space that is unaccounted for in the neededSpace
+ * calculation. However, I believe this can only happen when the
+ * workspace is too large, and specifically when it is too large
+ * by a larger margin than the space that will be consumed. */
+ /* TODO: cleaner, compiler warning friendly way to do this??? */
+ ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
+ if (ws->allocStart < ws->tableValidEnd) {
+ ws->tableValidEnd = ws->allocStart;
+ }
+ }
+ ws->phase = phase;
+ }
+}
+
+/*
+ * Returns whether this object/buffer/etc was allocated in this workspace.
+ */
+MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
+ return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
+}
+
+/*
+ * Internal function. Do not use directly.
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_internal(
+ ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
+ void* alloc;
+ void* bottom = ws->tableEnd;
+ ZSTD_cwksp_internal_advance_phase(ws, phase);
+ alloc = (BYTE *)ws->allocStart - bytes;
+
+ if (bytes == 0)
+ return NULL;
+
+
+ DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ assert(alloc >= bottom);
+ if (alloc < bottom) {
+ DEBUGLOG(4, "cwksp: alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ if (alloc < ws->tableValidEnd) {
+ ws->tableValidEnd = alloc;
+ }
+ ws->allocStart = alloc;
+
+
+ return alloc;
+}
+
+/*
+ * Reserves and returns unaligned memory.
+ */
+MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
+ return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
+}
+
+/*
+ * Reserves and returns memory sized on and aligned on sizeof(unsigned).
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
+ assert((bytes & (sizeof(U32)-1)) == 0);
+ return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
+}
+
+/*
+ * Aligned on sizeof(unsigned). These buffers have the special property that
+ * their values remain constrained, allowing us to re-use them without
+ * memset()-ing them.
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
+ const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
+ void* alloc = ws->tableEnd;
+ void* end = (BYTE *)alloc + bytes;
+ void* top = ws->allocStart;
+
+ DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+ assert((bytes & (sizeof(U32)-1)) == 0);
+ ZSTD_cwksp_internal_advance_phase(ws, phase);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ assert(end <= top);
+ if (end > top) {
+ DEBUGLOG(4, "cwksp: table alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ ws->tableEnd = end;
+
+
+ return alloc;
+}
+
+/*
+ * Aligned on sizeof(void*).
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
+ size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
+ void* alloc = ws->objectEnd;
+ void* end = (BYTE*)alloc + roundedBytes;
+
+
+ DEBUGLOG(5,
+ "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
+ alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
+ assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
+ assert((bytes & (sizeof(void*)-1)) == 0);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ /* we must be in the first phase, no advance is possible */
+ if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
+ DEBUGLOG(4, "cwksp: object alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ ws->objectEnd = end;
+ ws->tableEnd = end;
+ ws->tableValidEnd = end;
+
+
+ return alloc;
+}
+
+MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
+
+
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ ws->tableValidEnd = ws->objectEnd;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ws->tableValidEnd = ws->tableEnd;
+ }
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/*
+ * Zero the part of the allocated tables not already marked clean.
+ */
+MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
+ }
+ ZSTD_cwksp_mark_tables_clean(ws);
+}
+
+/*
+ * Invalidates table allocations.
+ * All other allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: clearing tables!");
+
+
+ ws->tableEnd = ws->objectEnd;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/*
+ * Invalidates all buffer, aligned, and table allocations.
+ * Object allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: clearing!");
+
+
+
+ ws->tableEnd = ws->objectEnd;
+ ws->allocStart = ws->workspaceEnd;
+ ws->allocFailed = 0;
+ if (ws->phase > ZSTD_cwksp_alloc_buffers) {
+ ws->phase = ZSTD_cwksp_alloc_buffers;
+ }
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/*
+ * The provided workspace takes ownership of the buffer [start, start+size).
+ * Any existing values in the workspace are ignored (the previously managed
+ * buffer, if present, must be separately freed).
+ */
+MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
+ DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
+ assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
+ ws->workspace = start;
+ ws->workspaceEnd = (BYTE*)start + size;
+ ws->objectEnd = ws->workspace;
+ ws->tableValidEnd = ws->objectEnd;
+ ws->phase = ZSTD_cwksp_alloc_objects;
+ ws->isStatic = isStatic;
+ ZSTD_cwksp_clear(ws);
+ ws->workspaceOversizedDuration = 0;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
+ void* workspace = ZSTD_customMalloc(size, customMem);
+ DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
+ RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
+ return 0;
+}
+
+MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
+ void *ptr = ws->workspace;
+ DEBUGLOG(4, "cwksp: freeing workspace");
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
+ ZSTD_customFree(ptr, customMem);
+}
+
+/*
+ * Moves the management of a workspace from one cwksp to another. The src cwksp
+ * is left in an invalid state (src must be re-init()'ed before it's used again).
+ */
+MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
+ *dst = *src;
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
+}
+
+MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
+}
+
+MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
+ return ws->allocFailed;
+}
+
+/*-*************************************
+* Functions Checking Free Space
+***************************************/
+
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
+}
+
+MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_check_available(
+ ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
+ && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
+}
+
+MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
+ ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
+ ws->workspaceOversizedDuration++;
+ } else {
+ ws->workspaceOversizedDuration = 0;
+ }
+}
+
+
+#endif /* ZSTD_CWKSP_H */
diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
new file mode 100644
index 00000000000000..b0424d23ac57f0
--- /dev/null
+++ b/lib/zstd/compress/zstd_double_fast.c
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_double_fast.h"
+
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashLarge = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
+ U32 const mls = cParams->minMatch;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash tables.
+ * Insert the other positions into the large hash table if their entry
+ * is empty.
+ */
+ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ U32 i;
+ for (i = 0; i < fastHashFillStep; ++i) {
+ size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
+ size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
+ if (i == 0)
+ hashSmall[smHash] = curr + i;
+ if (i == 0 || hashLarge[lgHash] == 0)
+ hashLarge[lgHash] = curr + i;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ if (dtlm == ZSTD_dtlm_fast)
+ break;
+ } }
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_doubleFast_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ const U32 hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ const U32 hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ /* presumes that, if there is a dictionary, it must be using Attach mode */
+ const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams =
+ dictMode == ZSTD_dictMatchState ?
+ &dms->cParams : NULL;
+ const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ?
+ dms->hashTable : NULL;
+ const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
+ dms->chainTable : NULL;
+ const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
+ dms->window.dictLimit : 0;
+ const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
+ dms->window.base : NULL;
+ const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
+ dictBase + dictStartIndex : NULL;
+ const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
+ dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
+ prefixLowestIndex - (U32)(dictEnd - dictBase) :
+ 0;
+ const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ?
+ dictCParams->hashLog : hBitsL;
+ const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ?
+ dictCParams->chainLog : hBitsS;
+ const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
+
+ assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+
+ /* if a dictionary is attached, it must be within window range */
+ if (dictMode == ZSTD_dictMatchState) {
+ assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
+ }
+
+ /* init */
+ ip += (dictAndPrefixLength == 0);
+ if (dictMode == ZSTD_noDict) {
+ U32 const curr = (U32)(ip - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
+ U32 const maxRep = curr - windowLow;
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+ if (dictMode == ZSTD_dictMatchState) {
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+ }
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ U32 offset;
+ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+ size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
+ size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
+ U32 const curr = (U32)(ip-base);
+ U32 const matchIndexL = hashLong[h2];
+ U32 matchIndexS = hashSmall[h];
+ const BYTE* matchLong = base + matchIndexL;
+ const BYTE* match = base + matchIndexS;
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+ && repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
+
+ /* check dictMatchState repcode */
+ if (dictMode == ZSTD_dictMatchState
+ && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
+ goto _match_stored;
+ }
+
+ /* check noDict repcode */
+ if ( dictMode == ZSTD_noDict
+ && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
+ goto _match_stored;
+ }
+
+ if (matchIndexL > prefixLowestIndex) {
+ /* check prefix long match */
+ if (MEM_read64(matchLong) == MEM_read64(ip)) {
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+ offset = (U32)(ip-matchLong);
+ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* check dictMatchState long match */
+ U32 const dictMatchIndexL = dictHashLong[dictHL];
+ const BYTE* dictMatchL = dictBase + dictMatchIndexL;
+ assert(dictMatchL < dictEnd);
+
+ if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
+ mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
+ offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
+ goto _match_found;
+ } }
+
+ if (matchIndexS > prefixLowestIndex) {
+ /* check prefix short match */
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* check dictMatchState short match */
+ U32 const dictMatchIndexS = dictHashSmall[dictHS];
+ match = dictBase + dictMatchIndexS;
+ matchIndexS = dictMatchIndexS + dictIndexDelta;
+
+ if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ } }
+
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+#if defined(__aarch64__)
+ PREFETCH_L1(ip+256);
+#endif
+ continue;
+
+_search_next_long:
+
+ { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
+ U32 const matchIndexL3 = hashLong[hl3];
+ const BYTE* matchL3 = base + matchIndexL3;
+ hashLong[hl3] = curr + 1;
+
+ /* check prefix long +1 match */
+ if (matchIndexL3 > prefixLowestIndex) {
+ if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+ ip++;
+ offset = (U32)(ip-matchL3);
+ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* check dict long +1 match */
+ U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
+ const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
+ assert(dictMatchL3 < dictEnd);
+ if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
+ mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
+ ip++;
+ offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ } } }
+
+ /* if no long +1 match, explore the short match we found */
+ if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
+ offset = (U32)(curr - matchIndexS);
+ while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ } else {
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ offset = (U32)(ip - match);
+ while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+
+_match_found:
+ offset_2 = offset_1;
+ offset_1 = offset;
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+_match_stored:
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Complementary insertion */
+ /* done after iLimit test, as candidates could be > iend-8 */
+ { U32 const indexToInsert = curr+2;
+ hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+ hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+ }
+
+ /* check immediate repcode */
+ if (dictMode == ZSTD_dictMatchState) {
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
+ && repIndex2 < prefixLowestIndex ?
+ dictBase + repIndex2 - dictIndexDelta :
+ base + repIndex2;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } }
+
+ if (dictMode == ZSTD_noDict) {
+ while ( (ip <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
+ ip += rLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } }
+ } /* while (ip < ilimit) */
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const U32 mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
+ }
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const U32 mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls /* template */)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+ const U32 dictStartIndex = lowLimit;
+ const U32 dictLimit = ms->window.dictLimit;
+ const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
+ U32 offset_1=rep[0], offset_2=rep[1];
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
+
+ /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
+ if (prefixStartIndex == dictStartIndex)
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
+ const U32 matchIndex = hashSmall[hSmall];
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+
+ const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
+ const U32 matchLongIndex = hashLong[hLong];
+ const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* matchLong = matchLongBase + matchLongIndex;
+
+ const U32 curr = (U32)(ip-base);
+ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ size_t mLength;
+ hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
+
+ if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
+ & (repIndex > dictStartIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
+ } else {
+ if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+ const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
+ U32 offset;
+ mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
+ offset = curr - matchLongIndex;
+ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+ } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ U32 const matchIndex3 = hashLong[h3];
+ const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
+ const BYTE* match3 = match3Base + matchIndex3;
+ U32 offset;
+ hashLong[h3] = curr + 1;
+ if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+ const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
+ ip++;
+ offset = curr+1 - matchIndex3;
+ while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+ } else {
+ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+ offset = curr - matchIndex;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+ } else {
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+ continue;
+ } }
+
+ /* move to next sequence start */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Complementary insertion */
+ /* done after iLimit test, as candidates could be > iend-8 */
+ { U32 const indexToInsert = curr+2;
+ hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+ hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
+ & (repIndex2 > dictStartIndex))
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
+ }
+}
diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
new file mode 100644
index 00000000000000..6822bde65a1d8d
--- /dev/null
+++ b/lib/zstd/compress/zstd_double_fast.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_DOUBLE_FAST_H
+#define ZSTD_DOUBLE_FAST_H
+
+
+#include "../common/mem.h" /* U32 */
+#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+
+#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
new file mode 100644
index 00000000000000..96b7d48e2868ef
--- /dev/null
+++ b/lib/zstd/compress/zstd_fast.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
+#include "zstd_fast.h"
+
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hBits = cParams->hashLog;
+ U32 const mls = cParams->minMatch;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash table.
+ * Insert the other positions if their hash entry is empty.
+ */
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
+ hashTable[hash0] = curr;
+ if (dtlm == ZSTD_dtlm_fast) continue;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ { U32 p;
+ for (p = 1; p < fastHashFillStep; ++p) {
+ size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
+ if (hashTable[hash] == 0) { /* not yet filled */
+ hashTable[hash] = curr + p;
+ } } } }
+}
+
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_fast_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
+ const BYTE* ip0 = istart;
+ const BYTE* ip1;
+ const BYTE* anchor = istart;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
+ ip0 += (ip0 == prefixStart);
+ ip1 = ip0 + 1;
+ { U32 const curr = (U32)(ip0 - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
+ U32 const maxRep = curr - windowLow;
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+
+ /* Main Search Loop */
+#ifdef __INTEL_COMPILER
+ /* From intel 'The vector pragma indicates that the loop should be
+ * vectorized if it is legal to do so'. Can be used together with
+ * #pragma ivdep (but have opted to exclude that because intel
+ * warns against using it).*/
+ #pragma vector always
+#endif
+ while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
+ size_t mLength;
+ BYTE const* ip2 = ip0 + 2;
+ size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
+ U32 const val0 = MEM_read32(ip0);
+ size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
+ U32 const val1 = MEM_read32(ip1);
+ U32 const current0 = (U32)(ip0-base);
+ U32 const current1 = (U32)(ip1-base);
+ U32 const matchIndex0 = hashTable[h0];
+ U32 const matchIndex1 = hashTable[h1];
+ BYTE const* repMatch = ip2 - offset_1;
+ const BYTE* match0 = base + matchIndex0;
+ const BYTE* match1 = base + matchIndex1;
+ U32 offcode;
+
+#if defined(__aarch64__)
+ PREFETCH_L1(ip0+256);
+#endif
+
+ hashTable[h0] = current0; /* update hash table */
+ hashTable[h1] = current1; /* update hash table */
+
+ assert(ip0 + 1 == ip1);
+
+ if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
+ mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
+ ip0 = ip2 - mLength;
+ match0 = repMatch - mLength;
+ mLength += 4;
+ offcode = 0;
+ goto _match;
+ }
+ if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
+ /* found a regular match */
+ goto _offset;
+ }
+ if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
+ /* found a regular match after one literal */
+ ip0 = ip1;
+ match0 = match1;
+ goto _offset;
+ }
+ { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
+ assert(step >= 2);
+ ip0 += step;
+ ip1 += step;
+ continue;
+ }
+_offset: /* Requires: ip0, match0 */
+ /* Compute the offset code */
+ offset_2 = offset_1;
+ offset_1 = (U32)(ip0-match0);
+ offcode = offset_1 + ZSTD_REP_MOVE;
+ mLength = 4;
+ /* Count the backwards match length */
+ while (((ip0>anchor) & (match0>prefixStart))
+ && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
+
+_match: /* Requires: ip0, match0, offcode */
+ /* Count the forward length */
+ mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
+ ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
+ /* match found */
+ ip0 += mLength;
+ anchor = ip0;
+
+ if (ip0 <= ilimit) {
+ /* Fill Table */
+ assert(base+current0+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+
+ if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
+ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
+ { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+ ip0 += rLength;
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
+ anchor = ip0;
+ continue; /* faster when present (confirmed on gcc-8) ... (?) */
+ } } }
+ ip1 = ip0 + 1;
+ }
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ assert(ms->dictMatchState == NULL);
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
+ const U32* const dictHashTable = dms->hashTable;
+ const U32 dictStartIndex = dms->window.dictLimit;
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
+ const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
+ const U32 dictHLog = dictCParams->hashLog;
+
+ /* if a dictionary is still attached, it necessarily means that
+ * it is within window size. So we just check it. */
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
+ assert(endIndex - prefixStartIndex <= maxDistance);
+ (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
+
+ /* ensure there will be no underflow
+ * when translating a dict index into a local index */
+ assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
+
+ /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
+ ip += (dictAndPrefixLength == 0);
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ size_t const h = ZSTD_hashPtr(ip, hlog, mls);
+ U32 const curr = (U32)(ip-base);
+ U32 const matchIndex = hashTable[h];
+ const BYTE* match = base + matchIndex;
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ hashTable[h] = curr; /* update hash table */
+
+ if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
+ } else if ( (matchIndex <= prefixStartIndex) ) {
+ size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
+ U32 const dictMatchIndex = dictHashTable[dictHash];
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
+ if (dictMatchIndex <= dictStartIndex ||
+ MEM_read32(dictMatch) != MEM_read32(ip)) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a dict match */
+ U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
+ mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
+ while (((ip>anchor) & (dictMatch>dictStart))
+ && (ip[-1] == dictMatch[-1])) {
+ ip--; dictMatch--; mLength++;
+ } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ }
+ } else if (MEM_read32(match) != MEM_read32(ip)) {
+ /* it's not a match, and we're not going to check the dictionary */
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a regular match */
+ U32 const offset = (U32)(ip-match);
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ while (((ip>anchor) & (match>prefixStart))
+ && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ }
+
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ assert(base+curr+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
+ dictBase - dictIndexDelta + repIndex2 :
+ base + repIndex2;
+ if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+ }
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ assert(ms->dictMatchState != NULL);
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_fast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+ const U32 dictStartIndex = lowLimit;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const U32 dictLimit = ms->window.dictLimit;
+ const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ U32 offset_1=rep[0], offset_2=rep[1];
+
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
+
+ /* switch to "regular" variant if extDict is invalidated due to maxDistance */
+ if (prefixStartIndex == dictStartIndex)
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t h = ZSTD_hashPtr(ip, hlog, mls);
+ const U32 matchIndex = hashTable[h];
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+ const U32 curr = (U32)(ip-base);
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ hashTable[h] = curr; /* update hash table */
+ DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
+ assert(offset_1 <= curr +1); /* check repIndex */
+
+ if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
+ ip += rLength;
+ anchor = ip;
+ } else {
+ if ( (matchIndex < dictStartIndex) ||
+ (MEM_read32(match) != MEM_read32(ip)) ) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ }
+ { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ U32 const offset = curr - matchIndex;
+ size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset_2 = offset_1; offset_1 = offset; /* update offset history */
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ ip += mLength;
+ anchor = ip;
+ } }
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
+ }
+}
diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h
new file mode 100644
index 00000000000000..fddc2f532d21d1
--- /dev/null
+++ b/lib/zstd/compress/zstd_fast.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_FAST_H
+#define ZSTD_FAST_H
+
+
+#include "../common/mem.h" /* U32 */
+#include "zstd_compress_internal.h"
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+#endif /* ZSTD_FAST_H */
diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
new file mode 100644
index 00000000000000..fb54d4e28a2bce
--- /dev/null
+++ b/lib/zstd/compress/zstd_lazy.c
@@ -0,0 +1,1414 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_lazy.h"
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+
+static void
+ZSTD_updateDUBT(ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* iend,
+ U32 mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ if (idx != target)
+ DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
+ idx, target, ms->window.dictLimit);
+ assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
+ (void)iend;
+
+ assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
+ for ( ; idx < target ; idx++) {
+ size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
+ U32 const matchIndex = hashTable[h];
+
+ U32* const nextCandidatePtr = bt + 2*(idx&btMask);
+ U32* const sortMarkPtr = nextCandidatePtr + 1;
+
+ DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
+ hashTable[h] = idx; /* Update Hash Table */
+ *nextCandidatePtr = matchIndex; /* update BT like a chain */
+ *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
+ }
+ ms->nextToUpdate = target;
+}
+
+
+/* ZSTD_insertDUBT1() :
+ * sort one already inserted but unsorted position
+ * assumption : curr >= btlow == (curr - btmask)
+ * doesn't fail */
+static void
+ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
+ U32 curr, const BYTE* inputEnd,
+ U32 nbCompares, U32 btLow,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
+ const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
+ U32 dummy32; /* to be nullified at the end */
+ U32 const windowValid = ms->window.lowLimit;
+ U32 const maxDistance = 1U << cParams->windowLog;
+ U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
+
+
+ DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
+ curr, dictLimit, windowLow);
+ assert(curr >= btLow);
+ assert(ip < iend); /* condition for ZSTD_count */
+
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(matchIndex < curr);
+ /* note : all candidates are now supposed sorted,
+ * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
+ * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
+
+ if ( (dictMode != ZSTD_extDict)
+ || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
+ || (curr < dictLimit) /* both in extDict */) {
+ const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
+ || (matchIndex+matchLength >= dictLimit)) ?
+ base : dictBase;
+ assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
+ || (curr < dictLimit) );
+ match = mBase + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* preparation for next read of match[matchLength] */
+ }
+
+ DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
+ curr, matchIndex, (U32)matchLength);
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
+ matchIndex, btLow, nextPtr[1]);
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
+ matchIndex, btLow, nextPtr[0]);
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+}
+
+
+static size_t
+ZSTD_DUBT_findBetterDictMatch (
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ size_t bestLength,
+ U32 nbCompares,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_matchState_t * const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
+ const U32 * const dictHashTable = dms->hashTable;
+ U32 const hashLog = dmsCParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 dictMatchIndex = dictHashTable[h];
+
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
+ U32 const curr = (U32)(ip-base);
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
+ U32 const dictLowLimit = dms->window.lowLimit;
+ U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
+
+ U32* const dictBt = dms->chainTable;
+ U32 const btLog = dmsCParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
+
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+
+ (void)dictMode;
+ assert(dictMode == ZSTD_dictMatchState);
+
+ for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) {
+ U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dictBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dictHighLimit)
+ match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ U32 matchIndex = dictMatchIndex + dictIndexDelta;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
+ DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
+ curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
+ }
+ if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ }
+ }
+
+ if (bestLength >= MINMATCH) {
+ U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+ DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ }
+ return bestLength;
+
+}
+
+
+static size_t
+ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+
+ const BYTE* const base = ms->window.base;
+ U32 const curr = (U32)(ip-base);
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
+ U32 const unsortLimit = MAX(btLow, windowLow);
+
+ U32* nextCandidate = bt + 2*(matchIndex&btMask);
+ U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ U32 nbCompares = 1U << cParams->searchLog;
+ U32 nbCandidates = nbCompares;
+ U32 previousCandidate = 0;
+
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
+ assert(ip <= iend-8); /* required for h calculation */
+ assert(dictMode != ZSTD_dedicatedDictSearch);
+
+ /* reach end of unsorted candidates list */
+ while ( (matchIndex > unsortLimit)
+ && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
+ && (nbCandidates > 1) ) {
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
+ matchIndex);
+ *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
+ previousCandidate = matchIndex;
+ matchIndex = *nextCandidate;
+ nextCandidate = bt + 2*(matchIndex&btMask);
+ unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ nbCandidates --;
+ }
+
+ /* nullify last candidate if it's still unsorted
+ * simplification, detrimental to compression ratio, beneficial for speed */
+ if ( (matchIndex > unsortLimit)
+ && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
+ matchIndex);
+ *nextCandidate = *unsortedMark = 0;
+ }
+
+ /* batch sort stacked candidates */
+ matchIndex = previousCandidate;
+ while (matchIndex) { /* will end on matchIndex == 0 */
+ U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
+ U32 const nextCandidateIdx = *nextCandidateIdxPtr;
+ ZSTD_insertDUBT1(ms, matchIndex, iend,
+ nbCandidates, unsortLimit, dictMode);
+ matchIndex = nextCandidateIdx;
+ nbCandidates++;
+ }
+
+ /* find longest match */
+ { size_t commonLengthSmaller = 0, commonLengthLarger = 0;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
+ U32 matchEndIdx = curr + 8 + 1;
+ U32 dummy32; /* to be nullified at the end */
+ size_t bestLength = 0;
+
+ matchIndex = hashTable[h];
+ hashTable[h] = curr; /* Update Hash Table */
+
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match;
+
+ if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ if (dictMode == ZSTD_dictMatchState) {
+ nbCompares = 0; /* in addition to avoiding checking any
+ * further in this loop, make sure we
+ * skip checking in the dictionary. */
+ }
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+ assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ bestLength = ZSTD_DUBT_findBetterDictMatch(
+ ms, ip, iend,
+ offsetPtr, bestLength, nbCompares,
+ mls, dictMode);
+ }
+
+ assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ if (bestLength >= MINMATCH) {
+ U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ }
+ return bestLength;
+ }
+}
+
+
+/* ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls /* template */,
+ const ZSTD_dictMode_e dictMode)
+{
+ DEBUGLOG(7, "ZSTD_BtFindBestMatch");
+ if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateDUBT(ms, ip, iLimit, mls);
+ return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
+}
+
+
+static size_t
+ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+ case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
+ case 7 :
+ case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
+ }
+}
+
+
+static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+ case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
+ case 7 :
+ case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
+ }
+}
+
+
+static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+ case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
+ case 7 :
+ case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
+ }
+}
+
+
+
+/* *********************************
+* Hash Chain
+***********************************/
+#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
+
+/* Update chains up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
+ ZSTD_matchState_t* ms,
+ const ZSTD_compressionParameters* const cParams,
+ const BYTE* ip, U32 const mls)
+{
+ U32* const hashTable = ms->hashTable;
+ const U32 hashLog = cParams->hashLog;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainMask = (1 << cParams->chainLog) - 1;
+ const BYTE* const base = ms->window.base;
+ const U32 target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ while(idx < target) { /* catch up */
+ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
+ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ ms->nextToUpdate = target;
+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
+}
+
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
+}
+
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
+{
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32* const hashTable = ms->hashTable;
+ U32* const chainTable = ms->chainTable;
+ U32 const chainSize = 1 << ms->cParams.chainLog;
+ U32 idx = ms->nextToUpdate;
+ U32 const minChain = chainSize < target ? target - chainSize : idx;
+ U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 const cacheSize = bucketSize - 1;
+ U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
+ U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
+
+ /* We know the hashtable is oversized by a factor of `bucketSize`.
+ * We are going to temporarily pretend `bucketSize == 1`, keeping only a
+ * single entry. We will use the rest of the space to construct a temporary
+ * chaintable.
+ */
+ U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32* const tmpHashTable = hashTable;
+ U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
+ U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
+ U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
+
+ U32 hashIdx;
+
+ assert(ms->cParams.chainLog <= 24);
+ assert(ms->cParams.hashLog >= ms->cParams.chainLog);
+ assert(idx != 0);
+ assert(tmpMinChain <= minChain);
+
+ /* fill conventional hash table and conventional chain table */
+ for ( ; idx < target; idx++) {
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
+ if (idx >= tmpMinChain) {
+ tmpChainTable[idx - tmpMinChain] = hashTable[h];
+ }
+ tmpHashTable[h] = idx;
+ }
+
+ /* sort chains into ddss chain table */
+ {
+ U32 chainPos = 0;
+ for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
+ U32 count;
+ U32 countBeyondMinChain = 0;
+ U32 i = tmpHashTable[hashIdx];
+ for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
+ /* skip through the chain to the first position that won't be
+ * in the hash cache bucket */
+ if (i < minChain) {
+ countBeyondMinChain++;
+ }
+ i = tmpChainTable[i - tmpMinChain];
+ }
+ if (count == cacheSize) {
+ for (count = 0; count < chainLimit;) {
+ if (i < minChain) {
+ if (!i || countBeyondMinChain++ > cacheSize) {
+ /* only allow pulling `cacheSize` number of entries
+ * into the cache or chainTable beyond `minChain`,
+ * to replace the entries pulled out of the
+ * chainTable into the cache. This lets us reach
+ * back further without increasing the total number
+ * of entries in the chainTable, guaranteeing the
+ * DDSS chain table will fit into the space
+ * allocated for the regular one. */
+ break;
+ }
+ }
+ chainTable[chainPos++] = i;
+ count++;
+ if (i < tmpMinChain) {
+ break;
+ }
+ i = tmpChainTable[i - tmpMinChain];
+ }
+ } else {
+ count = 0;
+ }
+ if (count) {
+ tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
+ } else {
+ tmpHashTable[hashIdx] = 0;
+ }
+ }
+ assert(chainPos <= chainSize); /* I believe this is guaranteed... */
+ }
+
+ /* move chain pointers into the last entry of each hash bucket */
+ for (hashIdx = (1 << hashLog); hashIdx; ) {
+ U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 const chainPackedPointer = tmpHashTable[hashIdx];
+ U32 i;
+ for (i = 0; i < cacheSize; i++) {
+ hashTable[bucketIdx + i] = 0;
+ }
+ hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
+ }
+
+ /* fill the buckets of the hash table */
+ for (idx = ms->nextToUpdate; idx < target; idx++) {
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
+ << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 i;
+ /* Shift hash cache down 1. */
+ for (i = cacheSize - 1; i; i--)
+ hashTable[h + i] = hashTable[h + i - 1];
+ hashTable[h] = idx;
+ }
+
+ ms->nextToUpdate = target;
+}
+
+
+/* inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_HcFindBestMatch_generic (
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls, const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainSize = (1 << cParams->chainLog);
+ const U32 chainMask = chainSize-1;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 curr = (U32)(ip-base);
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 lowestValid = ms->window.lowLimit;
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
+ const U32 minChain = curr > chainSize ? curr - chainSize : 0;
+ U32 nbAttempts = 1U << cParams->searchLog;
+ size_t ml=4-1;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
+ ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+ const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
+ ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+
+ U32 matchIndex;
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32* entry = &dms->hashTable[ddsIdx];
+ PREFETCH_L1(entry);
+ }
+
+ /* HC4 match finder */
+ matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
+
+ for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
+ if (match[ml] == ip[ml]) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= minChain) break;
+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+ }
+
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32 ddsLowestIndex = dms->window.dictLimit;
+ const BYTE* const ddsBase = dms->window.base;
+ const BYTE* const ddsEnd = dms->window.nextSrc;
+ const U32 ddsSize = (U32)(ddsEnd - ddsBase);
+ const U32 ddsIndexDelta = dictLimit - ddsSize;
+ const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
+ const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
+ U32 ddsAttempt;
+
+ for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
+ PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
+ }
+
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 const chainIndex = chainPackedPointer >> 8;
+
+ PREFETCH_L1(&dms->chainTable[chainIndex]);
+ }
+
+ for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
+ match = ddsBase + matchIndex;
+
+ if (!matchIndex) {
+ return ml;
+ }
+
+ /* guaranteed by table construction */
+ (void)ddsLowestIndex;
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) {
+ /* best possible, avoids read overflow on next attempt */
+ return ml;
+ }
+ }
+ }
+
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 chainIndex = chainPackedPointer >> 8;
+ U32 const chainLength = chainPackedPointer & 0xFF;
+ U32 const chainAttempts = nbAttempts - ddsAttempt;
+ U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
+ U32 chainAttempt;
+
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
+ PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
+ }
+
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->chainTable[chainIndex];
+ match = ddsBase + matchIndex;
+
+ /* guaranteed by table construction */
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+ }
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ const U32* const dmsChainTable = dms->chainTable;
+ const U32 dmsChainSize = (1 << dms->cParams.chainLog);
+ const U32 dmsChainMask = dmsChainSize - 1;
+ const U32 dmsLowestIndex = dms->window.dictLimit;
+ const BYTE* const dmsBase = dms->window.base;
+ const BYTE* const dmsEnd = dms->window.nextSrc;
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
+ const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
+
+ matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
+
+ for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ const BYTE* const match = dmsBase + matchIndex;
+ assert(match+4 <= dmsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= dmsMinChain) break;
+
+ matchIndex = dmsChainTable[matchIndex & dmsChainMask];
+ }
+ }
+
+ return ml;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
+ }
+}
+
+
+static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
+ }
+}
+
+
+static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
+ }
+}
+
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
+ }
+}
+
+
+/* *******************************
+* Common parser - lazy strategy
+*********************************/
+typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_lazy_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const searchMethod_e searchMethod, const U32 depth,
+ ZSTD_dictMode_e const dictMode)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 prefixLowestIndex = ms->window.dictLimit;
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+
+ typedef size_t (*searchMax_f)(
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
+
+ /*
+ * This table is indexed first by the four ZSTD_dictMode_e values, and then
+ * by the two searchMethod_e values. NULLs are placed for configurations
+ * that should never occur (extDict modes go to the other implementation
+ * below and there is no DDSS for binary tree search yet).
+ */
+ const searchMax_f searchFuncs[4][2] = {
+ {
+ ZSTD_HcFindBestMatch_selectMLS,
+ ZSTD_BtFindBestMatch_selectMLS
+ },
+ {
+ NULL,
+ NULL
+ },
+ {
+ ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
+ ZSTD_BtFindBestMatch_dictMatchState_selectMLS
+ },
+ {
+ ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
+ NULL
+ }
+ };
+
+ searchMax_f const searchMax = searchFuncs[dictMode][searchMethod == search_binaryTree];
+ U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+
+ const int isDMS = dictMode == ZSTD_dictMatchState;
+ const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
+ const int isDxS = isDMS || isDDS;
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
+ const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
+ const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
+ const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = isDxS ?
+ prefixLowestIndex - (U32)(dictEnd - dictBase) :
+ 0;
+ const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
+
+ assert(searchMax != NULL);
+
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode);
+
+ /* init */
+ ip += (dictAndPrefixLength == 0);
+ if (dictMode == ZSTD_noDict) {
+ U32 const curr = (U32)(ip - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
+ U32 const maxRep = curr - windowLow;
+ if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+ }
+ if (isDxS) {
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+ }
+
+ /* Match Loop */
+#if defined(__x86_64__)
+ /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
+ * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
+ */
+ __asm__(".p2align 5");
+#endif
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offset=0;
+ const BYTE* start=ip+1;
+
+ /* check repCode */
+ if (isDxS) {
+ const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
+ const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
+ && repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+ }
+ if ( dictMode == ZSTD_noDict
+ && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
+ matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offset=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
+ if ( (dictMode == ZSTD_noDict)
+ && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ if (isDxS) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
+ if ( (dictMode == ZSTD_noDict)
+ && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ if (isDxS) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* NOTE:
+ * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
+ * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
+ * overflows the pointer, which is undefined behavior.
+ */
+ /* catch up */
+ if (offset) {
+ if (dictMode == ZSTD_noDict) {
+ while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
+ && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
+ { start--; matchLength++; }
+ }
+ if (isDxS) {
+ U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+ const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ }
+ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+ }
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = start - anchor;
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ if (isDxS) {
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex = current2 - offset_2;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase - dictIndexDelta + repIndex :
+ base + repIndex;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+
+ if (dictMode == ZSTD_noDict) {
+ while ( ((ip <= ilimit) & (offset_2>0))
+ && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
+ /* store sequence */
+ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } }
+
+ /* Save reps for next block */
+ rep[0] = offset_1 ? offset_1 : savedOffset;
+ rep[1] = offset_2 ? offset_2 : savedOffset;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
+}
+
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const searchMethod_e searchMethod, const U32 depth)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const dictStart = dictBase + ms->window.lowLimit;
+ const U32 windowLog = ms->cParams.windowLog;
+
+ typedef size_t (*searchMax_f)(
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
+ searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
+
+ U32 offset_1 = rep[0], offset_2 = rep[1];
+
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic");
+
+ /* init */
+ ip += (ip == prefixStart);
+
+ /* Match Loop */
+#if defined(__x86_64__)
+ /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
+ * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
+ */
+ __asm__(".p2align 5");
+#endif
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offset=0;
+ const BYTE* start=ip+1;
+ U32 curr = (U32)(ip-base);
+
+ /* check repCode */
+ { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
+ const U32 repIndex = (U32)(curr+1 - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ if (depth==0) goto _storeSequence;
+ } }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offset=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
+ curr++;
+ /* check repCode */
+ if (offset) {
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
+ const U32 repIndex = (U32)(curr - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offset = 0, start = ip;
+ } }
+
+ /* search match, depth 1 */
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
+ curr++;
+ /* check repCode */
+ if (offset) {
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
+ const U32 repIndex = (U32)(curr - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offset = 0, start = ip;
+ } }
+
+ /* search match, depth 2 */
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offset = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* catch up */
+ if (offset) {
+ U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+ const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+ }
+
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = start - anchor;
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ const U32 repCurrent = (U32)(ip-base);
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
+ const U32 repIndex = repCurrent - offset_2;
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ }
+ break;
+ } }
+
+ /* Save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
+}
+
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
+}
+
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
+}
diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
new file mode 100644
index 00000000000000..2fc5a61821344c
--- /dev/null
+++ b/lib/zstd/compress/zstd_lazy.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LAZY_H
+#define ZSTD_LAZY_H
+
+
+#include "zstd_compress_internal.h"
+
+/*
+ * Dedicated Dictionary Search Structure bucket log. In the
+ * ZSTD_dedicatedDictSearch mode, the hashTable has
+ * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
+ * one.
+ */
+#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
+
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
+
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
+
+void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+#endif /* ZSTD_LAZY_H */
diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
new file mode 100644
index 00000000000000..8ef7e88a5addea
--- /dev/null
+++ b/lib/zstd/compress/zstd_ldm.c
@@ -0,0 +1,686 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_ldm.h"
+
+#include "../common/debug.h"
+#include <linux/xxhash.h>
+#include "zstd_fast.h" /* ZSTD_fillHashTable() */
+#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
+#include "zstd_ldm_geartab.h"
+
+#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_MIN_MATCH_LENGTH 64
+#define LDM_HASH_RLOG 7
+
+typedef struct {
+ U64 rolling;
+ U64 stopMask;
+} ldmRollingHashState_t;
+
+/* ZSTD_ldm_gear_init():
+ *
+ * Initializes the rolling hash state such that it will honor the
+ * settings in params. */
+static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
+{
+ unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
+ unsigned hashRateLog = params->hashRateLog;
+
+ state->rolling = ~(U32)0;
+
+ /* The choice of the splitting criterion is subject to two conditions:
+ * 1. it has to trigger on average every 2^(hashRateLog) bytes;
+ * 2. ideally, it has to depend on a window of minMatchLength bytes.
+ *
+ * In the gear hash algorithm, bit n depends on the last n bytes;
+ * so in order to obtain a good quality splitting criterion it is
+ * preferable to use bits with high weight.
+ *
+ * To match condition 1 we use a mask with hashRateLog bits set
+ * and, because of the previous remark, we make sure these bits
+ * have the highest possible weight while still respecting
+ * condition 2.
+ */
+ if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
+ state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
+ } else {
+ /* In this degenerate case we simply honor the hash rate. */
+ state->stopMask = ((U64)1 << hashRateLog) - 1;
+ }
+}
+
+/* ZSTD_ldm_gear_feed():
+ *
+ * Registers in the splits array all the split points found in the first
+ * size bytes following the data pointer. This function terminates when
+ * either all the data has been processed or LDM_BATCH_SIZE splits are
+ * present in the splits array.
+ *
+ * Precondition: The splits array must not be full.
+ * Returns: The number of bytes processed. */
+static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
+ BYTE const* data, size_t size,
+ size_t* splits, unsigned* numSplits)
+{
+ size_t n;
+ U64 hash, mask;
+
+ hash = state->rolling;
+ mask = state->stopMask;
+ n = 0;
+
+#define GEAR_ITER_ONCE() do { \
+ hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
+ n += 1; \
+ if (UNLIKELY((hash & mask) == 0)) { \
+ splits[*numSplits] = n; \
+ *numSplits += 1; \
+ if (*numSplits == LDM_BATCH_SIZE) \
+ goto done; \
+ } \
+ } while (0)
+
+ while (n + 3 < size) {
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ }
+ while (n < size) {
+ GEAR_ITER_ONCE();
+ }
+
+#undef GEAR_ITER_ONCE
+
+done:
+ state->rolling = hash;
+ return n;
+}
+
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams)
+{
+ params->windowLog = cParams->windowLog;
+ ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
+ DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
+ if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
+ if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (params->hashLog == 0) {
+ params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
+ assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ }
+ if (params->hashRateLog == 0) {
+ params->hashRateLog = params->windowLog < params->hashLog
+ ? 0
+ : params->windowLog - params->hashLog;
+ }
+ params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
+}
+
+size_t ZSTD_ldm_getTableSize(ldmParams_t params)
+{
+ size_t const ldmHSize = ((size_t)1) << params.hashLog;
+ size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
+ size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
+ size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
+ + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
+ return params.enableLdm ? totalSize : 0;
+}
+
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
+{
+ return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
+}
+
+/* ZSTD_ldm_getBucket() :
+ * Returns a pointer to the start of the bucket associated with hash. */
+static ldmEntry_t* ZSTD_ldm_getBucket(
+ ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+{
+ return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+}
+
+/* ZSTD_ldm_insertEntry() :
+ * Insert the entry with corresponding hash into the hash table */
+static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
+ size_t const hash, const ldmEntry_t entry,
+ ldmParams_t const ldmParams)
+{
+ BYTE* const pOffset = ldmState->bucketOffsets + hash;
+ unsigned const offset = *pOffset;
+
+ *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
+ *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
+
+}
+
+/* ZSTD_ldm_countBackwardsMatch() :
+ * Returns the number of bytes that match backwards before pIn and pMatch.
+ *
+ * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
+static size_t ZSTD_ldm_countBackwardsMatch(
+ const BYTE* pIn, const BYTE* pAnchor,
+ const BYTE* pMatch, const BYTE* pMatchBase)
+{
+ size_t matchLength = 0;
+ while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
+ pIn--;
+ pMatch--;
+ matchLength++;
+ }
+ return matchLength;
+}
+
+/* ZSTD_ldm_countBackwardsMatch_2segments() :
+ * Returns the number of bytes that match backwards from pMatch,
+ * even with the backwards match spanning 2 different segments.
+ *
+ * On reaching `pMatchBase`, start counting from mEnd */
+static size_t ZSTD_ldm_countBackwardsMatch_2segments(
+ const BYTE* pIn, const BYTE* pAnchor,
+ const BYTE* pMatch, const BYTE* pMatchBase,
+ const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
+{
+ size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
+ if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
+ /* If backwards match is entirely in the extDict or prefix, immediately return */
+ return matchLength;
+ }
+ DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
+ matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
+ DEBUGLOG(7, "final backwards match length = %zu", matchLength);
+ return matchLength;
+}
+
+/* ZSTD_ldm_fillFastTables() :
+ *
+ * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
+ * This is similar to ZSTD_loadDictionaryContent.
+ *
+ * The tables for the other strategies are filled within their
+ * block compressors. */
+static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+ void const* end)
+{
+ const BYTE* const iend = (const BYTE*)end;
+
+ switch(ms->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
+ break;
+
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ return 0;
+}
+
+void ZSTD_ldm_fillHashTable(
+ ldmState_t* ldmState, const BYTE* ip,
+ const BYTE* iend, ldmParams_t const* params)
+{
+ U32 const minMatchLength = params->minMatchLength;
+ U32 const hBits = params->hashLog - params->bucketSizeLog;
+ BYTE const* const base = ldmState->window.base;
+ BYTE const* const istart = ip;
+ ldmRollingHashState_t hashState;
+ size_t* const splits = ldmState->splitIndices;
+ unsigned numSplits;
+
+ DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
+
+ ZSTD_ldm_gear_init(&hashState, params);
+ while (ip < iend) {
+ size_t hashed;
+ unsigned n;
+
+ numSplits = 0;
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
+
+ for (n = 0; n < numSplits; n++) {
+ if (ip + splits[n] >= istart + minMatchLength) {
+ BYTE const* const split = ip + splits[n] - minMatchLength;
+ U64 const xxhash = xxh64(split, minMatchLength, 0);
+ U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
+ ldmEntry_t entry;
+
+ entry.offset = (U32)(split - base);
+ entry.checksum = (U32)(xxhash >> 32);
+ ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
+ }
+ }
+
+ ip += hashed;
+ }
+}
+
+
+/* ZSTD_ldm_limitTableUpdate() :
+ *
+ * Sets cctx->nextToUpdate to a position corresponding closer to anchor
+ * if it is far way
+ * (after a long match, only update tables a limited amount). */
+static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+{
+ U32 const curr = (U32)(anchor - ms->window.base);
+ if (curr > ms->nextToUpdate + 1024) {
+ ms->nextToUpdate =
+ curr - MIN(512, curr - ms->nextToUpdate - 1024);
+ }
+}
+
+static size_t ZSTD_ldm_generateSequences_internal(
+ ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ /* LDM parameters */
+ int const extDict = ZSTD_window_hasExtDict(ldmState->window);
+ U32 const minMatchLength = params->minMatchLength;
+ U32 const entsPerBucket = 1U << params->bucketSizeLog;
+ U32 const hBits = params->hashLog - params->bucketSizeLog;
+ /* Prefix and extDict parameters */
+ U32 const dictLimit = ldmState->window.dictLimit;
+ U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
+ BYTE const* const base = ldmState->window.base;
+ BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
+ BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
+ BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
+ BYTE const* const lowPrefixPtr = base + dictLimit;
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ BYTE const* const ilimit = iend - HASH_READ_SIZE;
+ /* Input positions */
+ BYTE const* anchor = istart;
+ BYTE const* ip = istart;
+ /* Rolling hash state */
+ ldmRollingHashState_t hashState;
+ /* Arrays for staged-processing */
+ size_t* const splits = ldmState->splitIndices;
+ ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
+ unsigned numSplits;
+
+ if (srcSize < minMatchLength)
+ return iend - anchor;
+
+ /* Initialize the rolling hash state with the first minMatchLength bytes */
+ ZSTD_ldm_gear_init(&hashState, params);
+ {
+ size_t n = 0;
+
+ while (n < minMatchLength) {
+ numSplits = 0;
+ n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n,
+ splits, &numSplits);
+ }
+ ip += minMatchLength;
+ }
+
+ while (ip < ilimit) {
+ size_t hashed;
+ unsigned n;
+
+ numSplits = 0;
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
+ splits, &numSplits);
+
+ for (n = 0; n < numSplits; n++) {
+ BYTE const* const split = ip + splits[n] - minMatchLength;
+ U64 const xxhash = xxh64(split, minMatchLength, 0);
+ U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
+
+ candidates[n].split = split;
+ candidates[n].hash = hash;
+ candidates[n].checksum = (U32)(xxhash >> 32);
+ candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
+ PREFETCH_L1(candidates[n].bucket);
+ }
+
+ for (n = 0; n < numSplits; n++) {
+ size_t forwardMatchLength = 0, backwardMatchLength = 0,
+ bestMatchLength = 0, mLength;
+ BYTE const* const split = candidates[n].split;
+ U32 const checksum = candidates[n].checksum;
+ U32 const hash = candidates[n].hash;
+ ldmEntry_t* const bucket = candidates[n].bucket;
+ ldmEntry_t const* cur;
+ ldmEntry_t const* bestEntry = NULL;
+ ldmEntry_t newEntry;
+
+ newEntry.offset = (U32)(split - base);
+ newEntry.checksum = checksum;
+
+ /* If a split point would generate a sequence overlapping with
+ * the previous one, we merely register it in the hash table and
+ * move on */
+ if (split < anchor) {
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ continue;
+ }
+
+ for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
+ size_t curForwardMatchLength, curBackwardMatchLength,
+ curTotalMatchLength;
+ if (cur->checksum != checksum || cur->offset <= lowestIndex) {
+ continue;
+ }
+ if (extDict) {
+ BYTE const* const curMatchBase =
+ cur->offset < dictLimit ? dictBase : base;
+ BYTE const* const pMatch = curMatchBase + cur->offset;
+ BYTE const* const matchEnd =
+ cur->offset < dictLimit ? dictEnd : iend;
+ BYTE const* const lowMatchPtr =
+ cur->offset < dictLimit ? dictStart : lowPrefixPtr;
+ curForwardMatchLength =
+ ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
+ split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
+ } else { /* !extDict */
+ BYTE const* const pMatch = base + cur->offset;
+ curForwardMatchLength = ZSTD_count(split, pMatch, iend);
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength =
+ ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
+ }
+ curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
+
+ if (curTotalMatchLength > bestMatchLength) {
+ bestMatchLength = curTotalMatchLength;
+ forwardMatchLength = curForwardMatchLength;
+ backwardMatchLength = curBackwardMatchLength;
+ bestEntry = cur;
+ }
+ }
+
+ /* No match found -- insert an entry into the hash table
+ * and process the next candidate match */
+ if (bestEntry == NULL) {
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ continue;
+ }
+
+ /* Match found */
+ mLength = forwardMatchLength + backwardMatchLength;
+ {
+ U32 const offset = (U32)(split - base) - bestEntry->offset;
+ rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
+
+ /* Out of sequence storage */
+ if (rawSeqStore->size == rawSeqStore->capacity)
+ return ERROR(dstSize_tooSmall);
+ seq->litLength = (U32)(split - backwardMatchLength - anchor);
+ seq->matchLength = (U32)mLength;
+ seq->offset = offset;
+ rawSeqStore->size++;
+ }
+
+ /* Insert the current entry into the hash table --- it must be
+ * done after the previous block to avoid clobbering bestEntry */
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+
+ anchor = split + forwardMatchLength;
+ }
+
+ ip += hashed;
+ }
+
+ return iend - anchor;
+}
+
+/*! ZSTD_ldm_reduceTable() :
+ * reduce table indexes by `reducerValue` */
+static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
+ U32 const reducerValue)
+{
+ U32 u;
+ for (u = 0; u < size; u++) {
+ if (table[u].offset < reducerValue) table[u].offset = 0;
+ else table[u].offset -= reducerValue;
+ }
+}
+
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldmState, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ U32 const maxDist = 1U << params->windowLog;
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ size_t const kMaxChunkSize = 1 << 20;
+ size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
+ size_t chunk;
+ size_t leftoverSize = 0;
+
+ assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
+ /* Check that ZSTD_window_update() has been called for this chunk prior
+ * to passing it to this function.
+ */
+ assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
+ /* The input could be very large (in zstdmt), so it must be broken up into
+ * chunks to enforce the maximum distance and handle overflow correction.
+ */
+ assert(sequences->pos <= sequences->size);
+ assert(sequences->size <= sequences->capacity);
+ for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
+ BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
+ size_t const remaining = (size_t)(iend - chunkStart);
+ BYTE const *const chunkEnd =
+ (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
+ size_t const chunkSize = chunkEnd - chunkStart;
+ size_t newLeftoverSize;
+ size_t const prevSize = sequences->size;
+
+ assert(chunkStart < iend);
+ /* 1. Perform overflow correction if necessary. */
+ if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
+ U32 const ldmHSize = 1U << params->hashLog;
+ U32 const correction = ZSTD_window_correctOverflow(
+ &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
+ ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
+ /* invalidate dictionaries on overflow correction */
+ ldmState->loadedDictEnd = 0;
+ }
+ /* 2. We enforce the maximum offset allowed.
+ *
+ * kMaxChunkSize should be small enough that we don't lose too much of
+ * the window through early invalidation.
+ * TODO: * Test the chunk size.
+ * * Try invalidation after the sequence generation and test the
+ * the offset against maxDist directly.
+ *
+ * NOTE: Because of dictionaries + sequence splitting we MUST make sure
+ * that any offset used is valid at the END of the sequence, since it may
+ * be split into two sequences. This condition holds when using
+ * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
+ * against maxDist directly, we'll have to carefully handle that case.
+ */
+ ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
+ /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
+ newLeftoverSize = ZSTD_ldm_generateSequences_internal(
+ ldmState, sequences, params, chunkStart, chunkSize);
+ if (ZSTD_isError(newLeftoverSize))
+ return newLeftoverSize;
+ /* 4. We add the leftover literals from previous iterations to the first
+ * newly generated sequence, or add the `newLeftoverSize` if none are
+ * generated.
+ */
+ /* Prepend the leftover literals from the last call */
+ if (prevSize < sequences->size) {
+ sequences->seq[prevSize].litLength += (U32)leftoverSize;
+ leftoverSize = newLeftoverSize;
+ } else {
+ assert(newLeftoverSize == chunkSize);
+ leftoverSize += chunkSize;
+ }
+ }
+ return 0;
+}
+
+void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
+ while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
+ if (srcSize <= seq->litLength) {
+ /* Skip past srcSize literals */
+ seq->litLength -= (U32)srcSize;
+ return;
+ }
+ srcSize -= seq->litLength;
+ seq->litLength = 0;
+ if (srcSize < seq->matchLength) {
+ /* Skip past the first srcSize of the match */
+ seq->matchLength -= (U32)srcSize;
+ if (seq->matchLength < minMatch) {
+ /* The match is too short, omit it */
+ if (rawSeqStore->pos + 1 < rawSeqStore->size) {
+ seq[1].litLength += seq[0].matchLength;
+ }
+ rawSeqStore->pos++;
+ }
+ return;
+ }
+ srcSize -= seq->matchLength;
+ seq->matchLength = 0;
+ rawSeqStore->pos++;
+ }
+}
+
+/*
+ * If the sequence length is longer than remaining then the sequence is split
+ * between this block and the next.
+ *
+ * Returns the current sequence to handle, or if the rest of the block should
+ * be literals, it returns a sequence with offset == 0.
+ */
+static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
+ U32 const remaining, U32 const minMatch)
+{
+ rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
+ assert(sequence.offset > 0);
+ /* Likely: No partial sequence */
+ if (remaining >= sequence.litLength + sequence.matchLength) {
+ rawSeqStore->pos++;
+ return sequence;
+ }
+ /* Cut the sequence short (offset == 0 ==> rest is literals). */
+ if (remaining <= sequence.litLength) {
+ sequence.offset = 0;
+ } else if (remaining < sequence.litLength + sequence.matchLength) {
+ sequence.matchLength = remaining - sequence.litLength;
+ if (sequence.matchLength < minMatch) {
+ sequence.offset = 0;
+ }
+ }
+ /* Skip past `remaining` bytes for the future sequences. */
+ ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
+ return sequence;
+}
+
+void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
+ while (currPos && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
+ if (currPos >= currSeq.litLength + currSeq.matchLength) {
+ currPos -= currSeq.litLength + currSeq.matchLength;
+ rawSeqStore->pos++;
+ } else {
+ rawSeqStore->posInSequence = currPos;
+ break;
+ }
+ }
+ if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
+ rawSeqStore->posInSequence = 0;
+ }
+}
+
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ unsigned const minMatch = cParams->minMatch;
+ ZSTD_blockCompressor const blockCompressor =
+ ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ /* Input positions */
+ BYTE const* ip = istart;
+
+ DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
+ /* If using opt parser, use LDMs only as candidates rather than always accepting them */
+ if (cParams->strategy >= ZSTD_btopt) {
+ size_t lastLLSize;
+ ms->ldmSeqStore = rawSeqStore;
+ lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
+ ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
+ return lastLLSize;
+ }
+
+ assert(rawSeqStore->pos <= rawSeqStore->size);
+ assert(rawSeqStore->size <= rawSeqStore->capacity);
+ /* Loop through each sequence and apply the block compressor to the literals */
+ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
+ /* maybeSplitSequence updates rawSeqStore->pos */
+ rawSeq const sequence = maybeSplitSequence(rawSeqStore,
+ (U32)(iend - ip), minMatch);
+ int i;
+ /* End signal */
+ if (sequence.offset == 0)
+ break;
+
+ assert(ip + sequence.litLength + sequence.matchLength <= iend);
+
+ /* Fill tables for block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
+ /* Run the block compressor */
+ DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
+ {
+ size_t const newLitLength =
+ blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
+ ip += sequence.litLength;
+ /* Update the repcodes */
+ for (i = ZSTD_REP_NUM - 1; i > 0; i--)
+ rep[i] = rep[i-1];
+ rep[0] = sequence.offset;
+ /* Store the sequence */
+ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
+ sequence.offset + ZSTD_REP_MOVE,
+ sequence.matchLength - MINMATCH);
+ ip += sequence.matchLength;
+ }
+ }
+ /* Fill the tables for the block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
+ /* Compress the last literals */
+ return blockCompressor(ms, seqStore, rep, ip, iend - ip);
+}
diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
new file mode 100644
index 00000000000000..25b25270b72e22
--- /dev/null
+++ b/lib/zstd/compress/zstd_ldm.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LDM_H
+#define ZSTD_LDM_H
+
+
+#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
+#include <linux/zstd.h> /* ZSTD_CCtx, size_t */
+
+/*-*************************************
+* Long distance matching
+***************************************/
+
+#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
+
+void ZSTD_ldm_fillHashTable(
+ ldmState_t* state, const BYTE* ip,
+ const BYTE* iend, ldmParams_t const* params);
+
+/*
+ * ZSTD_ldm_generateSequences():
+ *
+ * Generates the sequences using the long distance match finder.
+ * Generates long range matching sequences in `sequences`, which parse a prefix
+ * of the source. `sequences` must be large enough to store every sequence,
+ * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
+ * @returns 0 or an error code.
+ *
+ * NOTE: The user must have called ZSTD_window_update() for all of the input
+ * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
+ * NOTE: This function returns an error if it runs out of space to store
+ * sequences.
+ */
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldms, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize);
+
+/*
+ * ZSTD_ldm_blockCompress():
+ *
+ * Compresses a block using the predefined sequences, along with a secondary
+ * block compressor. The literals section of every sequence is passed to the
+ * secondary block compressor, and those sequences are interspersed with the
+ * predefined sequences. Returns the length of the last literals.
+ * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
+ * `rawSeqStore.seq` may also be updated to split the last sequence between two
+ * blocks.
+ * @return The length of the last literals.
+ *
+ * NOTE: The source must be at most the maximum block size, but the predefined
+ * sequences can be any size, and may be longer than the block. In the case that
+ * they are longer than the block, the last sequences may need to be split into
+ * two. We handle that case correctly, and update `rawSeqStore` appropriately.
+ * NOTE: This function does not return any errors.
+ */
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+/*
+ * ZSTD_ldm_skipSequences():
+ *
+ * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
+ * Avoids emitting matches less than `minMatch` bytes.
+ * Must be called for data that is not passed to ZSTD_ldm_blockCompress().
+ */
+void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+ U32 const minMatch);
+
+/* ZSTD_ldm_skipRawSeqStoreBytes():
+ * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
+ * Not to be used in conjunction with ZSTD_ldm_skipSequences().
+ * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
+ */
+void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
+
+/* ZSTD_ldm_getTableSize() :
+ * Estimate the space needed for long distance matching tables or 0 if LDM is
+ * disabled.
+ */
+size_t ZSTD_ldm_getTableSize(ldmParams_t params);
+
+/* ZSTD_ldm_getSeqSpace() :
+ * Return an upper bound on the number of sequences that can be produced by
+ * the long distance matcher, or 0 if LDM is disabled.
+ */
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
+
+/* ZSTD_ldm_adjustParameters() :
+ * If the params->hashRateLog is not set, set it to its default value based on
+ * windowLog and params->hashLog.
+ *
+ * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
+ * params->hashLog if it is not).
+ *
+ * Ensures that the minMatchLength >= targetLength during optimal parsing.
+ */
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams);
+
+
+#endif /* ZSTD_FAST_H */
diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h
new file mode 100644
index 00000000000000..e5c24d856b0a83
--- /dev/null
+++ b/lib/zstd/compress/zstd_ldm_geartab.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LDM_GEARTAB_H
+#define ZSTD_LDM_GEARTAB_H
+
+static U64 ZSTD_ldm_gearTab[256] = {
+ 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
+ 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
+ 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
+ 0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889,
+ 0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e,
+ 0x37b628620b628, 0x49a8d455d88caf5, 0x8556d711e6958140,
+ 0x4f7ae74fc605c1f, 0x829f0c3468bd3a20, 0x4ffdc885c625179e,
+ 0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f,
+ 0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391,
+ 0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210,
+ 0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be,
+ 0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a,
+ 0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b,
+ 0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4,
+ 0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb,
+ 0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312,
+ 0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01,
+ 0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc,
+ 0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967,
+ 0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553,
+ 0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f,
+ 0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2,
+ 0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d,
+ 0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a,
+ 0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74,
+ 0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3,
+ 0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1,
+ 0xff452823dbb010a, 0x9d42ed614f3dd267, 0x5b9313c06257c57b,
+ 0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568,
+ 0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a,
+ 0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1,
+ 0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9,
+ 0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463,
+ 0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba,
+ 0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9,
+ 0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61,
+ 0x24a5483879c453e3, 0x88026889192b4b9, 0x28da96671782dbec,
+ 0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6,
+ 0xbc135a0a704b70ba, 0x69cd868f7622ada, 0xbc37ba89e0b9c0ab,
+ 0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5,
+ 0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59,
+ 0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7,
+ 0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc,
+ 0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb,
+ 0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be,
+ 0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312,
+ 0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1,
+ 0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc,
+ 0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d,
+ 0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445,
+ 0x820d471e20b348e, 0x1874383cb83d46dc, 0x97edeec7a1efe11c,
+ 0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5,
+ 0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5,
+ 0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28,
+ 0xaf846af6ab7d0bf4, 0xe5af208eb666e49, 0x5e6622f73534cd6a,
+ 0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9,
+ 0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15,
+ 0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef,
+ 0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2,
+ 0x9f90e4c5fd508d8, 0xa34e5956fbaf3385, 0x2e2f8e151d3ef375,
+ 0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3,
+ 0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595,
+ 0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389,
+ 0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4,
+ 0x4228e364c5b5ed7, 0x9d7a3edf0da43911, 0x8edcfeda24686756,
+ 0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc,
+ 0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45,
+ 0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea,
+ 0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f,
+ 0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc,
+ 0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c,
+ 0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a,
+ 0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17,
+ 0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3,
+ 0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4,
+ 0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91,
+ 0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40,
+ 0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741,
+ 0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f,
+ 0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4,
+ 0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad,
+ 0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047,
+ 0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2,
+ 0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e,
+ 0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b,
+ 0x2b4da14f2613d8f4
+};
+
+#endif /* ZSTD_LDM_GEARTAB_H */
diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
new file mode 100644
index 00000000000000..04337050fe9a05
--- /dev/null
+++ b/lib/zstd/compress/zstd_opt.c
@@ -0,0 +1,1346 @@
+/*
+ * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "hist.h"
+#include "zstd_opt.h"
+
+
+#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
+#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */
+#define ZSTD_MAX_PRICE (1<<30)
+
+#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
+
+
+/*-*************************************
+* Price functions for optimal parser
+***************************************/
+
+#if 0 /* approximation at bit level */
+# define BITCOST_ACCURACY 0
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat))
+#elif 0 /* fractional bit accuracy */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
+#else /* opt==approx, ultra==accurate */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
+#endif
+
+MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
+{
+ return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
+}
+
+MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
+{
+ U32 const stat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(stat);
+ U32 const BWeight = hb * BITCOST_MULTIPLIER;
+ U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + BITCOST_ACCURACY < 31);
+ return weight;
+}
+
+#if (DEBUGLEVEL>=2)
+/* debugging function,
+ * @return price in bytes as fractional value
+ * for debug messages only */
+MEM_STATIC double ZSTD_fCost(U32 price)
+{
+ return (double)price / (BITCOST_MULTIPLIER*8);
+}
+#endif
+
+static int ZSTD_compressedLiterals(optState_t const* const optPtr)
+{
+ return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
+}
+
+static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
+{
+ if (ZSTD_compressedLiterals(optPtr))
+ optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
+ optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
+ optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
+ optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
+}
+
+
+/* ZSTD_downscaleStat() :
+ * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
+ * return the resulting sum of elements */
+static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
+{
+ U32 s, sum=0;
+ DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
+ assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
+ for (s=0; s<lastEltIndex+1; s++) {
+ table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
+ sum += table[s];
+ }
+ return sum;
+}
+
+/* ZSTD_rescaleFreqs() :
+ * if first block (detected by optPtr->litLengthSum == 0) : init statistics
+ * take hints from dictionary if there is one
+ * or init from zero, using src for literals stats, or flat 1 for match symbols
+ * otherwise downscale existing stats, to be used as seed for next block.
+ */
+static void
+ZSTD_rescaleFreqs(optState_t* const optPtr,
+ const BYTE* const src, size_t const srcSize,
+ int const optLevel)
+{
+ int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
+ DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
+ optPtr->priceType = zop_dynamic;
+
+ if (optPtr->litLengthSum == 0) { /* first block : init */
+ if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
+ DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
+ optPtr->priceType = zop_predef;
+ }
+
+ assert(optPtr->symbolCosts != NULL);
+ if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
+ /* huffman table presumed generated by dictionary */
+ optPtr->priceType = zop_dynamic;
+
+ if (compressedLiterals) {
+ unsigned lit;
+ assert(optPtr->litFreq != NULL);
+ optPtr->litSum = 0;
+ for (lit=0; lit<=MaxLit; lit++) {
+ U32 const scaleLog = 11; /* scale to 2K */
+ U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
+ assert(bitCost <= scaleLog);
+ optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litSum += optPtr->litFreq[lit];
+ } }
+
+ { unsigned ll;
+ FSE_CState_t llstate;
+ FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
+ optPtr->litLengthSum = 0;
+ for (ll=0; ll<=MaxLL; ll++) {
+ U32 const scaleLog = 10; /* scale to 1K */
+ U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
+ assert(bitCost < scaleLog);
+ optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litLengthSum += optPtr->litLengthFreq[ll];
+ } }
+
+ { unsigned ml;
+ FSE_CState_t mlstate;
+ FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
+ optPtr->matchLengthSum = 0;
+ for (ml=0; ml<=MaxML; ml++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
+ assert(bitCost < scaleLog);
+ optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
+ } }
+
+ { unsigned of;
+ FSE_CState_t ofstate;
+ FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
+ optPtr->offCodeSum = 0;
+ for (of=0; of<=MaxOff; of++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
+ assert(bitCost < scaleLog);
+ optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->offCodeSum += optPtr->offCodeFreq[of];
+ } }
+
+ } else { /* not a dictionary */
+
+ assert(optPtr->litFreq != NULL);
+ if (compressedLiterals) {
+ unsigned lit = MaxLit;
+ HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
+ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+ }
+
+ { unsigned ll;
+ for (ll=0; ll<=MaxLL; ll++)
+ optPtr->litLengthFreq[ll] = 1;
+ }
+ optPtr->litLengthSum = MaxLL+1;
+
+ { unsigned ml;
+ for (ml=0; ml<=MaxML; ml++)
+ optPtr->matchLengthFreq[ml] = 1;
+ }
+ optPtr->matchLengthSum = MaxML+1;
+
+ { unsigned of;
+ for (of=0; of<=MaxOff; of++)
+ optPtr->offCodeFreq[of] = 1;
+ }
+ optPtr->offCodeSum = MaxOff+1;
+
+ }
+
+ } else { /* new block : re-use previous statistics, scaled down */
+
+ if (compressedLiterals)
+ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+ optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
+ optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
+ optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
+ }
+
+ ZSTD_setBasePrices(optPtr, optLevel);
+}
+
+/* ZSTD_rawLiteralsCost() :
+ * price of literals (only) in specified segment (which length can be 0).
+ * does not include price of literalLength symbol */
+static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
+ const optState_t* const optPtr,
+ int optLevel)
+{
+ if (litLength == 0) return 0;
+
+ if (!ZSTD_compressedLiterals(optPtr))
+ return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
+
+ if (optPtr->priceType == zop_predef)
+ return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
+
+ /* dynamic statistics */
+ { U32 price = litLength * optPtr->litSumBasePrice;
+ U32 u;
+ for (u=0; u < litLength; u++) {
+ assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
+ price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+ }
+ return price;
+ }
+}
+
+/* ZSTD_litLengthPrice() :
+ * cost of literalLength symbol */
+static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
+{
+ if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
+
+ /* dynamic statistics */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ return (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ + optPtr->litLengthSumBasePrice
+ - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
+ }
+}
+
+/* ZSTD_getMatchPrice() :
+ * Provides the cost of the match part (offset + matchLength) of a sequence
+ * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
+ * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
+FORCE_INLINE_TEMPLATE U32
+ZSTD_getMatchPrice(U32 const offset,
+ U32 const matchLength,
+ const optState_t* const optPtr,
+ int const optLevel)
+{
+ U32 price;
+ U32 const offCode = ZSTD_highbit32(offset+1);
+ U32 const mlBase = matchLength - MINMATCH;
+ assert(matchLength >= MINMATCH);
+
+ if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
+ return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
+
+ /* dynamic statistics */
+ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
+ if ((optLevel<2) /*static*/ && offCode >= 20)
+ price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
+
+ /* match Length */
+ { U32 const mlCode = ZSTD_MLcode(mlBase);
+ price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
+ }
+
+ price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
+
+ DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
+ return price;
+}
+
+/* ZSTD_updateStats() :
+ * assumption : literals + litLengtn <= iend */
+static void ZSTD_updateStats(optState_t* const optPtr,
+ U32 litLength, const BYTE* literals,
+ U32 offsetCode, U32 matchLength)
+{
+ /* literals */
+ if (ZSTD_compressedLiterals(optPtr)) {
+ U32 u;
+ for (u=0; u < litLength; u++)
+ optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+ optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
+ }
+
+ /* literal Length */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ optPtr->litLengthFreq[llCode]++;
+ optPtr->litLengthSum++;
+ }
+
+ /* match offset code (0-2=>repCode; 3+=>offset+2) */
+ { U32 const offCode = ZSTD_highbit32(offsetCode+1);
+ assert(offCode <= MaxOff);
+ optPtr->offCodeFreq[offCode]++;
+ optPtr->offCodeSum++;
+ }
+
+ /* match Length */
+ { U32 const mlBase = matchLength - MINMATCH;
+ U32 const mlCode = ZSTD_MLcode(mlBase);
+ optPtr->matchLengthFreq[mlCode]++;
+ optPtr->matchLengthSum++;
+ }
+}
+
+
+/* ZSTD_readMINMATCH() :
+ * function safe only for comparisons
+ * assumption : memPtr must be at least 4 bytes before end of buffer */
+MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
+{
+ switch (length)
+ {
+ default :
+ case 4 : return MEM_read32(memPtr);
+ case 3 : if (MEM_isLittleEndian())
+ return MEM_read32(memPtr)<<8;
+ else
+ return MEM_read32(memPtr)>>8;
+ }
+}
+
+
+/* Update hashTable3 up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip)
+{
+ U32* const hashTable3 = ms->hashTable3;
+ U32 const hashLog3 = ms->hashLog3;
+ const BYTE* const base = ms->window.base;
+ U32 idx = *nextToUpdate3;
+ U32 const target = (U32)(ip - base);
+ size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
+ assert(hashLog3 > 0);
+
+ while(idx < target) {
+ hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
+ idx++;
+ }
+
+ *nextToUpdate3 = target;
+ return hashTable3[hash3];
+}
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+/* ZSTD_insertBt1() : add one or multiple positions to tree.
+ * ip : assumed <= iend-8 .
+ * @return : nb of positions added */
+static U32 ZSTD_insertBt1(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ U32 const mls, const int extDict)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 matchIndex = hashTable[h];
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
+ const U32 curr = (U32)(ip-base);
+ const U32 btLow = btMask >= curr ? 0 : curr - btMask;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 dummy32; /* to be nullified at the end */
+ U32 const windowLow = ms->window.lowLimit;
+ U32 matchEndIdx = curr+8+1;
+ size_t bestLength = 8;
+ U32 nbCompares = 1U << cParams->searchLog;
+#ifdef ZSTD_C_PREDICT
+ U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
+ U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
+ predictedSmall += (predictedSmall>0);
+ predictedLarge += (predictedLarge>0);
+#endif /* ZSTD_C_PREDICT */
+
+ DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
+
+ assert(ip <= iend-8); /* required for h calculation */
+ hashTable[h] = curr; /* Update Hash Table */
+
+ assert(windowLow > 0);
+ for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(matchIndex < curr);
+
+#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
+ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
+ if (matchIndex == predictedSmall) {
+ /* no need to check length, result known */
+ *smallerPtr = matchIndex;
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ predictedSmall = predictPtr[1] + (predictPtr[1]>0);
+ continue;
+ }
+ if (matchIndex == predictedLarge) {
+ *largerPtr = matchIndex;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ predictedLarge = predictPtr[0] + (predictPtr[0]>0);
+ continue;
+ }
+#endif
+
+ if (!extDict || (matchIndex+matchLength >= dictLimit)) {
+ assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ bestLength = matchLength;
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ }
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+ { U32 positions = 0;
+ if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
+ assert(matchEndIdx > curr + 8);
+ return MAX(positions, matchEndIdx - (curr + 8));
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+void ZSTD_updateTree_internal(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ const U32 mls, const ZSTD_dictMode_e dictMode)
+{
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+ DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
+ idx, target, dictMode);
+
+ while(idx < target) {
+ U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
+ assert(idx < (U32)(idx + forward));
+ idx += forward;
+ }
+ assert((size_t)(ip - base) <= (size_t)(U32)(-1));
+ assert((size_t)(iend - base) <= (size_t)(U32)(-1));
+ ms->nextToUpdate = target;
+}
+
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+ ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
+}
+
+FORCE_INLINE_TEMPLATE
+U32 ZSTD_insertBtAndGetAllMatches (
+ ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
+ ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
+ const U32 lengthToBeat,
+ U32 const mls /* template */)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ const BYTE* const base = ms->window.base;
+ U32 const curr = (U32)(ip-base);
+ U32 const hashLog = cParams->hashLog;
+ U32 const minMatch = (mls==3) ? 3 : 4;
+ U32* const hashTable = ms->hashTable;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask= (1U << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const dictBase = ms->window.dictBase;
+ U32 const dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
+ U32 const matchLow = windowLow ? windowLow : 1;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
+ U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */
+ U32 dummy32; /* to be nullified at the end */
+ U32 mnum = 0;
+ U32 nbCompares = 1U << cParams->searchLog;
+
+ const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+ const ZSTD_compressionParameters* const dmsCParams =
+ dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
+ const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
+ const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
+ U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
+ U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
+ U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
+ U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
+ U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
+ U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
+ U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
+
+ size_t bestLength = lengthToBeat-1;
+ DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
+
+ /* check repCode */
+ assert(ll0 <= 1); /* necessarily 1 or 0 */
+ { U32 const lastR = ZSTD_REP_NUM + ll0;
+ U32 repCode;
+ for (repCode = ll0; repCode < lastR; repCode++) {
+ U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ U32 const repIndex = curr - repOffset;
+ U32 repLen = 0;
+ assert(curr >= dictLimit);
+ if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */
+ /* We must validate the repcode offset because when we're using a dictionary the
+ * valid offset range shrinks when the dictionary goes out of bounds.
+ */
+ if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
+ repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
+ }
+ } else { /* repIndex < dictLimit || repIndex >= curr */
+ const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
+ dmsBase + repIndex - dmsIndexDelta :
+ dictBase + repIndex;
+ assert(curr >= windowLow);
+ if ( dictMode == ZSTD_extDict
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
+ & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
+ }
+ if (dictMode == ZSTD_dictMatchState
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
+ & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
+ } }
+ /* save longer solution */
+ if (repLen > bestLength) {
+ DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
+ repCode, ll0, repOffset, repLen);
+ bestLength = repLen;
+ matches[mnum].off = repCode - ll0;
+ matches[mnum].len = (U32)repLen;
+ mnum++;
+ if ( (repLen > sufficient_len)
+ | (ip+repLen == iLimit) ) { /* best possible */
+ return mnum;
+ } } } }
+
+ /* HC3 match finder */
+ if ((mls == 3) /*static*/ && (bestLength < mls)) {
+ U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
+ if ((matchIndex3 >= matchLow)
+ & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
+ size_t mlen;
+ if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
+ const BYTE* const match = base + matchIndex3;
+ mlen = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex3;
+ mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
+ }
+
+ /* save best solution */
+ if (mlen >= mls /* == 3 > bestLength */) {
+ DEBUGLOG(8, "found small match with hlog3, of length %u",
+ (U32)mlen);
+ bestLength = mlen;
+ assert(curr > matchIndex3);
+ assert(mnum==0); /* no prior solution */
+ matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE;
+ matches[0].len = (U32)mlen;
+ mnum = 1;
+ if ( (mlen > sufficient_len) |
+ (ip+mlen == iLimit) ) { /* best possible length */
+ ms->nextToUpdate = curr+1; /* skip insertion */
+ return 1;
+ } } }
+ /* no dictMatchState lookup: dicts don't have a populated HC3 table */
+ }
+
+ hashTable[h] = curr; /* Update Hash Table */
+
+ for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ const BYTE* match;
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(curr > matchIndex);
+
+ if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
+ assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
+ match = base + matchIndex;
+ if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
+ } else {
+ match = dictBase + matchIndex;
+ assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* prepare for match[matchLength] read */
+ }
+
+ if (matchLength > bestLength) {
+ DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
+ assert(matchEndIdx > matchIndex);
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
+ break; /* drop, to preserve bt consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
+ } else {
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+ assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
+ U32 dictMatchIndex = dms->hashTable[dmsH];
+ const U32* const dmsBt = dms->chainTable;
+ commonLengthSmaller = commonLengthLarger = 0;
+ for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) {
+ const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dmsBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dmsHighLimit)
+ match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ matchIndex = dictMatchIndex + dmsIndexDelta;
+ DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
+ if (match[matchLength] < ip[matchLength]) {
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ }
+ }
+ }
+
+ assert(matchEndIdx > curr+8);
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ return mnum;
+}
+
+
+FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
+ ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */
+ ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0,
+ U32 const lengthToBeat)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32 const matchLengthSearch = cParams->minMatch;
+ DEBUGLOG(8, "ZSTD_BtGetAllMatches");
+ if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
+ switch(matchLengthSearch)
+ {
+ case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
+ default :
+ case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
+ case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
+ case 7 :
+ case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
+ }
+}
+
+/* ***********************
+* LDM helper functions *
+*************************/
+
+/* Struct containing info needed to make decision about ldm inclusion */
+typedef struct {
+ rawSeqStore_t seqStore; /* External match candidates store for this block */
+ U32 startPosInBlock; /* Start position of the current match candidate */
+ U32 endPosInBlock; /* End position of the current match candidate */
+ U32 offset; /* Offset of the match candidate */
+} ZSTD_optLdm_t;
+
+/* ZSTD_optLdm_skipRawSeqStoreBytes():
+ * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'.
+ */
+static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
+ while (currPos && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
+ if (currPos >= currSeq.litLength + currSeq.matchLength) {
+ currPos -= currSeq.litLength + currSeq.matchLength;
+ rawSeqStore->pos++;
+ } else {
+ rawSeqStore->posInSequence = currPos;
+ break;
+ }
+ }
+ if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
+ rawSeqStore->posInSequence = 0;
+ }
+}
+
+/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
+ * Calculates the beginning and end of the next match in the current block.
+ * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
+ */
+static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
+ U32 blockBytesRemaining) {
+ rawSeq currSeq;
+ U32 currBlockEndPos;
+ U32 literalsBytesRemaining;
+ U32 matchBytesRemaining;
+
+ /* Setting match end position to MAX to ensure we never use an LDM during this block */
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
+ optLdm->startPosInBlock = UINT_MAX;
+ optLdm->endPosInBlock = UINT_MAX;
+ return;
+ }
+ /* Calculate appropriate bytes left in matchLength and litLength after adjusting
+ based on ldmSeqStore->posInSequence */
+ currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
+ assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
+ currBlockEndPos = currPosInBlock + blockBytesRemaining;
+ literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
+ currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
+ 0;
+ matchBytesRemaining = (literalsBytesRemaining == 0) ?
+ currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
+ currSeq.matchLength;
+
+ /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
+ if (literalsBytesRemaining >= blockBytesRemaining) {
+ optLdm->startPosInBlock = UINT_MAX;
+ optLdm->endPosInBlock = UINT_MAX;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
+ return;
+ }
+
+ /* Matches may be < MINMATCH by this process. In that case, we will reject them
+ when we are deciding whether or not to add the ldm */
+ optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
+ optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
+ optLdm->offset = currSeq.offset;
+
+ if (optLdm->endPosInBlock > currBlockEndPos) {
+ /* Match ends after the block ends, we can't use the whole match */
+ optLdm->endPosInBlock = currBlockEndPos;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
+ } else {
+ /* Consume nb of bytes equal to size of sequence left */
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
+ }
+}
+
+/* ZSTD_optLdm_maybeAddMatch():
+ * Adds a match if it's long enough, based on it's 'matchStartPosInBlock'
+ * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches'
+ */
+static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
+ ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
+ U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
+ /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
+ U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
+ U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
+
+ /* Ensure that current block position is not outside of the match */
+ if (currPosInBlock < optLdm->startPosInBlock
+ || currPosInBlock >= optLdm->endPosInBlock
+ || candidateMatchLength < MINMATCH) {
+ return;
+ }
+
+ if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
+ DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
+ candidateOffCode, candidateMatchLength, currPosInBlock);
+ matches[*nbMatches].len = candidateMatchLength;
+ matches[*nbMatches].off = candidateOffCode;
+ (*nbMatches)++;
+ }
+}
+
+/* ZSTD_optLdm_processMatchCandidate():
+ * Wrapper function to update ldm seq store and call ldm functions as necessary.
+ */
+static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches,
+ U32 currPosInBlock, U32 remainingBytes) {
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
+ return;
+ }
+
+ if (currPosInBlock >= optLdm->endPosInBlock) {
+ if (currPosInBlock > optLdm->endPosInBlock) {
+ /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
+ * at the end of a match from the ldm seq store, and will often be some bytes
+ * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
+ */
+ U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
+ }
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
+ }
+ ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
+}
+
+/*-*******************************
+* Optimal parser
+*********************************/
+
+
+static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
+{
+ return sol.litlen + sol.mlen;
+}
+
+#if 0 /* debug */
+
+static void
+listStats(const U32* table, int lastEltID)
+{
+ int const nbElts = lastEltID + 1;
+ int enb;
+ for (enb=0; enb < nbElts; enb++) {
+ (void)table;
+ /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
+ RAWLOG(2, "%4i,", table[enb]);
+ }
+ RAWLOG(2, " \n");
+}
+
+#endif
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const int optLevel,
+ const ZSTD_dictMode_e dictMode)
+{
+ optState_t* const optStatePtr = &ms->opt;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
+ U32 nextToUpdate3 = ms->nextToUpdate;
+
+ ZSTD_optimal_t* const opt = optStatePtr->priceTable;
+ ZSTD_match_t* const matches = optStatePtr->matchTable;
+ ZSTD_optimal_t lastSequence;
+ ZSTD_optLdm_t optLdm;
+
+ optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
+ optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
+
+ /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
+ (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
+ assert(optLevel <= 2);
+ ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
+ ip += (ip==prefixStart);
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ U32 cur, last_pos = 0;
+
+ /* find first match */
+ { U32 const litlen = (U32)(ip - anchor);
+ U32 const ll0 = !litlen;
+ U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
+ (U32)(ip-istart), (U32)(iend - ip));
+ if (!nbMatches) { ip++; continue; }
+
+ /* initialize opt[0] */
+ { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+ opt[0].mlen = 0; /* means is_a_literal */
+ opt[0].litlen = litlen;
+ /* We don't need to include the actual price of the literals because
+ * it is static for the duration of the forward pass, and is included
+ * in every price. We include the literal length to avoid negative
+ * prices when we subtract the previous literal length.
+ */
+ opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
+
+ /* large match -> immediate encoding */
+ { U32 const maxML = matches[nbMatches-1].len;
+ U32 const maxOffset = matches[nbMatches-1].off;
+ DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
+ nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
+
+ if (maxML > sufficient_len) {
+ lastSequence.litlen = litlen;
+ lastSequence.mlen = maxML;
+ lastSequence.off = maxOffset;
+ DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+ maxML, sufficient_len);
+ cur = 0;
+ last_pos = ZSTD_totalLen(lastSequence);
+ goto _shortestPath;
+ } }
+
+ /* set prices for first matches starting position == 0 */
+ { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ U32 pos;
+ U32 matchNb;
+ for (pos = 1; pos < minMatch; pos++) {
+ opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
+ }
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ U32 const offset = matches[matchNb].off;
+ U32 const end = matches[matchNb].len;
+ for ( ; pos <= end ; pos++ ) {
+ U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
+ U32 const sequencePrice = literalsPrice + matchPrice;
+ DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
+ pos, ZSTD_fCost(sequencePrice));
+ opt[pos].mlen = pos;
+ opt[pos].off = offset;
+ opt[pos].litlen = litlen;
+ opt[pos].price = sequencePrice;
+ } }
+ last_pos = pos-1;
+ }
+ }
+
+ /* check further positions */
+ for (cur = 1; cur <= last_pos; cur++) {
+ const BYTE* const inr = ip + cur;
+ assert(cur < ZSTD_OPT_NUM);
+ DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
+
+ /* Fix current position with one literal if cheaper */
+ { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
+ int const price = opt[cur-1].price
+ + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+ + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
+ - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
+ assert(price < 1000000000); /* overflow check */
+ if (price <= opt[cur].price) {
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+ opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
+ opt[cur].mlen = 0;
+ opt[cur].off = 0;
+ opt[cur].litlen = litlen;
+ opt[cur].price = price;
+ } else {
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
+ opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
+ }
+ }
+
+ /* Set the repcodes of the current position. We must do it here
+ * because we rely on the repcodes of the 2nd to last sequence being
+ * correct to set the next chunks repcodes during the backward
+ * traversal.
+ */
+ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
+ assert(cur >= opt[cur].mlen);
+ if (opt[cur].mlen != 0) {
+ U32 const prev = cur - opt[cur].mlen;
+ repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
+ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
+ } else {
+ ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
+ }
+
+ /* last match must start at a minimum distance of 8 from oend */
+ if (inr > ilimit) continue;
+
+ if (cur == last_pos) break;
+
+ if ( (optLevel==0) /*static_test*/
+ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
+ DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
+ continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+ }
+
+ { U32 const ll0 = (opt[cur].mlen != 0);
+ U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
+ U32 const previousPrice = opt[cur].price;
+ U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
+ U32 matchNb;
+
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
+ (U32)(inr-istart), (U32)(iend-inr));
+
+ if (!nbMatches) {
+ DEBUGLOG(7, "rPos:%u : no match found", cur);
+ continue;
+ }
+
+ { U32 const maxML = matches[nbMatches-1].len;
+ DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
+ inr-istart, cur, nbMatches, maxML);
+
+ if ( (maxML > sufficient_len)
+ || (cur + maxML >= ZSTD_OPT_NUM) ) {
+ lastSequence.mlen = maxML;
+ lastSequence.off = matches[nbMatches-1].off;
+ lastSequence.litlen = litlen;
+ cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
+ last_pos = cur + ZSTD_totalLen(lastSequence);
+ if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
+ goto _shortestPath;
+ } }
+
+ /* set prices using matches found at position == cur */
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ U32 const offset = matches[matchNb].off;
+ U32 const lastML = matches[matchNb].len;
+ U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
+ U32 mlen;
+
+ DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
+ matchNb, matches[matchNb].off, lastML, litlen);
+
+ for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
+ U32 const pos = cur + mlen;
+ int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
+
+ if ((pos > last_pos) || (price < opt[pos].price)) {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
+ opt[pos].mlen = mlen;
+ opt[pos].off = offset;
+ opt[pos].litlen = litlen;
+ opt[pos].price = price;
+ } else {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
+ }
+ } } }
+ } /* for (cur = 1; cur <= last_pos; cur++) */
+
+ lastSequence = opt[last_pos];
+ cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
+ assert(cur < ZSTD_OPT_NUM); /* control overflow*/
+
+_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
+ assert(opt[0].mlen == 0);
+
+ /* Set the next chunk's repcodes based on the repcodes of the beginning
+ * of the last match, and the last sequence. This avoids us having to
+ * update them while traversing the sequences.
+ */
+ if (lastSequence.mlen != 0) {
+ repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
+ ZSTD_memcpy(rep, &reps, sizeof(reps));
+ } else {
+ ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
+ }
+
+ { U32 const storeEnd = cur + 1;
+ U32 storeStart = storeEnd;
+ U32 seqPos = cur;
+
+ DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
+ last_pos, cur); (void)last_pos;
+ assert(storeEnd < ZSTD_OPT_NUM);
+ DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
+ opt[storeEnd] = lastSequence;
+ while (seqPos > 0) {
+ U32 const backDist = ZSTD_totalLen(opt[seqPos]);
+ storeStart--;
+ DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
+ opt[storeStart] = opt[seqPos];
+ seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
+ }
+
+ /* save sequences */
+ DEBUGLOG(6, "sending selected sequences into seqStore")
+ { U32 storePos;
+ for (storePos=storeStart; storePos <= storeEnd; storePos++) {
+ U32 const llen = opt[storePos].litlen;
+ U32 const mlen = opt[storePos].mlen;
+ U32 const offCode = opt[storePos].off;
+ U32 const advance = llen + mlen;
+ DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
+ anchor - istart, (unsigned)llen, (unsigned)mlen);
+
+ if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
+ assert(storePos == storeEnd); /* must be last sequence */
+ ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */
+ continue; /* will finish */
+ }
+
+ assert(anchor + llen <= iend);
+ ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
+ ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
+ anchor += advance;
+ ip = anchor;
+ } }
+ ZSTD_setBasePrices(optStatePtr, optLevel);
+ }
+ } /* while (ip < ilimit) */
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btopt");
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
+}
+
+
+/* used in 2-pass strategy */
+static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
+{
+ U32 s, sum=0;
+ assert(ZSTD_FREQ_DIV+bonus >= 0);
+ for (s=0; s<lastEltIndex+1; s++) {
+ table[s] <<= ZSTD_FREQ_DIV+bonus;
+ table[s]--;
+ sum += table[s];
+ }
+ return sum;
+}
+
+/* used in 2-pass strategy */
+MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
+{
+ if (ZSTD_compressedLiterals(optPtr))
+ optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
+ optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
+ optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
+ optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
+}
+
+/* ZSTD_initStats_ultra():
+ * make a first compression pass, just to seed stats with more accurate starting values.
+ * only works on first block, with no dictionary and no ldm.
+ * this function cannot error, hence its contract must be respected.
+ */
+static void
+ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
+ ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
+
+ DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
+ assert(ms->opt.litLengthSum == 0); /* first block */
+ assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */
+ assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
+ assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
+
+ ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/
+
+ /* invalidate first scan from history */
+ ZSTD_resetSeqStore(seqStore);
+ ms->window.base -= srcSize;
+ ms->window.dictLimit += (U32)srcSize;
+ ms->window.lowLimit = ms->window.dictLimit;
+ ms->nextToUpdate = ms->window.dictLimit;
+
+ /* re-inforce weight of collected statistics */
+ ZSTD_upscaleStats(&ms->opt);
+}
+
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 const curr = (U32)((const BYTE*)src - ms->window.base);
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
+
+ /* 2-pass strategy:
+ * this strategy makes a first pass over first block to collect statistics
+ * and seed next round's statistics with it.
+ * After 1st pass, function forgets everything, and starts a new block.
+ * Consequently, this can only work if no data has been previously loaded in tables,
+ * aka, no dictionary, no prefix, no ldm preprocessing.
+ * The compression ratio gain is generally small (~0.5% on first block),
+ * the cost is 2x cpu time on first block. */
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+ if ( (ms->opt.litLengthSum==0) /* first block */
+ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
+ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
+ && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
+ && (srcSize > ZSTD_PREDEF_THRESHOLD)
+ ) {
+ ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
+ }
+
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
+}
+
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
+}
+
+/* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
new file mode 100644
index 00000000000000..22b862858ba7a3
--- /dev/null
+++ b/lib/zstd/compress/zstd_opt.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_OPT_H
+#define ZSTD_OPT_H
+
+
+#include "zstd_compress_internal.h"
+
+/* used in ZSTD_loadDictionaryContent() */
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
+
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+ /* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
+
+
+#endif /* ZSTD_OPT_H */
diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
deleted file mode 100644
index 66cd487a326a81..00000000000000
--- a/lib/zstd/decompress.c
+++ /dev/null
@@ -1,2531 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/* ***************************************************************
-* Tuning parameters
-*****************************************************************/
-/*!
-* MAXWINDOWSIZE_DEFAULT :
-* maximum window size accepted by DStream, by default.
-* Frames requiring more memory will be rejected.
-*/
-#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
-#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */
-#endif
-
-/*-*******************************************************
-* Dependencies
-*********************************************************/
-#include "fse.h"
-#include "huf.h"
-#include "mem.h" /* low level memory routines */
-#include "zstd_internal.h"
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h> /* memcpy, memmove, memset */
-
-#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
-
-/*-*************************************
-* Macros
-***************************************/
-#define ZSTD_isError ERR_isError /* for inlining */
-#define FSE_isError ERR_isError
-#define HUF_isError ERR_isError
-
-/*_*******************************************************
-* Memory operations
-**********************************************************/
-static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); }
-
-/*-*************************************************************
-* Context management
-***************************************************************/
-typedef enum {
- ZSTDds_getFrameHeaderSize,
- ZSTDds_decodeFrameHeader,
- ZSTDds_decodeBlockHeader,
- ZSTDds_decompressBlock,
- ZSTDds_decompressLastBlock,
- ZSTDds_checkChecksum,
- ZSTDds_decodeSkippableHeader,
- ZSTDds_skipFrame
-} ZSTD_dStage;
-
-typedef struct {
- FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
- FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
- FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
- HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
- U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2];
- U32 rep[ZSTD_REP_NUM];
-} ZSTD_entropyTables_t;
-
-struct ZSTD_DCtx_s {
- const FSE_DTable *LLTptr;
- const FSE_DTable *MLTptr;
- const FSE_DTable *OFTptr;
- const HUF_DTable *HUFptr;
- ZSTD_entropyTables_t entropy;
- const void *previousDstEnd; /* detect continuity */
- const void *base; /* start of curr segment */
- const void *vBase; /* virtual start of previous segment if it was just before curr one */
- const void *dictEnd; /* end of previous segment */
- size_t expected;
- ZSTD_frameParams fParams;
- blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
- ZSTD_dStage stage;
- U32 litEntropy;
- U32 fseEntropy;
- struct xxh64_state xxhState;
- size_t headerSize;
- U32 dictID;
- const BYTE *litPtr;
- ZSTD_customMem customMem;
- size_t litSize;
- size_t rleSize;
- BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
- BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
-}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
-
-size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); }
-
-size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx)
-{
- dctx->expected = ZSTD_frameHeaderSize_prefix;
- dctx->stage = ZSTDds_getFrameHeaderSize;
- dctx->previousDstEnd = NULL;
- dctx->base = NULL;
- dctx->vBase = NULL;
- dctx->dictEnd = NULL;
- dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
- dctx->litEntropy = dctx->fseEntropy = 0;
- dctx->dictID = 0;
- ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
- memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
- dctx->LLTptr = dctx->entropy.LLTable;
- dctx->MLTptr = dctx->entropy.MLTable;
- dctx->OFTptr = dctx->entropy.OFTable;
- dctx->HUFptr = dctx->entropy.hufTable;
- return 0;
-}
-
-ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
-{
- ZSTD_DCtx *dctx;
-
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem);
- if (!dctx)
- return NULL;
- memcpy(&dctx->customMem, &customMem, sizeof(customMem));
- ZSTD_decompressBegin(dctx);
- return dctx;
-}
-
-ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- return ZSTD_createDCtx_advanced(stackMem);
-}
-
-size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx)
-{
- if (dctx == NULL)
- return 0; /* support free on NULL */
- ZSTD_free(dctx, dctx->customMem);
- return 0; /* reserved as a potential error code in the future */
-}
-
-void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx)
-{
- size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max;
- memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */
-}
-
-static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict);
-
-/*-*************************************************************
-* Decompression section
-***************************************************************/
-
-/*! ZSTD_isFrame() :
- * Tells if the content of `buffer` starts with a valid Frame Identifier.
- * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
- * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
- * Note 3 : Skippable Frame Identifiers are considered valid. */
-unsigned ZSTD_isFrame(const void *buffer, size_t size)
-{
- if (size < 4)
- return 0;
- {
- U32 const magic = ZSTD_readLE32(buffer);
- if (magic == ZSTD_MAGICNUMBER)
- return 1;
- if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START)
- return 1;
- }
- return 0;
-}
-
-/** ZSTD_frameHeaderSize() :
-* srcSize must be >= ZSTD_frameHeaderSize_prefix.
-* @return : size of the Frame Header */
-static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize)
-{
- if (srcSize < ZSTD_frameHeaderSize_prefix)
- return ERROR(srcSize_wrong);
- {
- BYTE const fhd = ((const BYTE *)src)[4];
- U32 const dictID = fhd & 3;
- U32 const singleSegment = (fhd >> 5) & 1;
- U32 const fcsId = fhd >> 6;
- return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId);
- }
-}
-
-/** ZSTD_getFrameParams() :
-* decode Frame Header, or require larger `srcSize`.
-* @return : 0, `fparamsPtr` is correctly filled,
-* >0, `srcSize` is too small, result is expected `srcSize`,
-* or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize)
-{
- const BYTE *ip = (const BYTE *)src;
-
- if (srcSize < ZSTD_frameHeaderSize_prefix)
- return ZSTD_frameHeaderSize_prefix;
- if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) {
- if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
- if (srcSize < ZSTD_skippableHeaderSize)
- return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */
- memset(fparamsPtr, 0, sizeof(*fparamsPtr));
- fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4);
- fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
- return 0;
- }
- return ERROR(prefix_unknown);
- }
-
- /* ensure there is enough `srcSize` to fully read/decode frame header */
- {
- size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
- if (srcSize < fhsize)
- return fhsize;
- }
-
- {
- BYTE const fhdByte = ip[4];
- size_t pos = 5;
- U32 const dictIDSizeCode = fhdByte & 3;
- U32 const checksumFlag = (fhdByte >> 2) & 1;
- U32 const singleSegment = (fhdByte >> 5) & 1;
- U32 const fcsID = fhdByte >> 6;
- U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
- U32 windowSize = 0;
- U32 dictID = 0;
- U64 frameContentSize = 0;
- if ((fhdByte & 0x08) != 0)
- return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */
- if (!singleSegment) {
- BYTE const wlByte = ip[pos++];
- U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
- if (windowLog > ZSTD_WINDOWLOG_MAX)
- return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */
- windowSize = (1U << windowLog);
- windowSize += (windowSize >> 3) * (wlByte & 7);
- }
-
- switch (dictIDSizeCode) {
- default: /* impossible */
- case 0: break;
- case 1:
- dictID = ip[pos];
- pos++;
- break;
- case 2:
- dictID = ZSTD_readLE16(ip + pos);
- pos += 2;
- break;
- case 3:
- dictID = ZSTD_readLE32(ip + pos);
- pos += 4;
- break;
- }
- switch (fcsID) {
- default: /* impossible */
- case 0:
- if (singleSegment)
- frameContentSize = ip[pos];
- break;
- case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break;
- case 2: frameContentSize = ZSTD_readLE32(ip + pos); break;
- case 3: frameContentSize = ZSTD_readLE64(ip + pos); break;
- }
- if (!windowSize)
- windowSize = (U32)frameContentSize;
- if (windowSize > windowSizeMax)
- return ERROR(frameParameter_windowTooLarge);
- fparamsPtr->frameContentSize = frameContentSize;
- fparamsPtr->windowSize = windowSize;
- fparamsPtr->dictID = dictID;
- fparamsPtr->checksumFlag = checksumFlag;
- }
- return 0;
-}
-
-/** ZSTD_getFrameContentSize() :
-* compatible with legacy mode
-* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
-* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
-* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
-unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
-{
- {
- ZSTD_frameParams fParams;
- if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0)
- return ZSTD_CONTENTSIZE_ERROR;
- if (fParams.windowSize == 0) {
- /* Either skippable or empty frame, size == 0 either way */
- return 0;
- } else if (fParams.frameContentSize != 0) {
- return fParams.frameContentSize;
- } else {
- return ZSTD_CONTENTSIZE_UNKNOWN;
- }
- }
-}
-
-/** ZSTD_findDecompressedSize() :
- * compatible with legacy mode
- * `srcSize` must be the exact length of some number of ZSTD compressed and/or
- * skippable frames
- * @return : decompressed size of the frames contained */
-unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize)
-{
- {
- unsigned long long totalDstSize = 0;
- while (srcSize >= ZSTD_frameHeaderSize_prefix) {
- const U32 magicNumber = ZSTD_readLE32(src);
-
- if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
- size_t skippableSize;
- if (srcSize < ZSTD_skippableHeaderSize)
- return ERROR(srcSize_wrong);
- skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
- if (srcSize < skippableSize) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
-
- src = (const BYTE *)src + skippableSize;
- srcSize -= skippableSize;
- continue;
- }
-
- {
- unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
- if (ret >= ZSTD_CONTENTSIZE_ERROR)
- return ret;
-
- /* check for overflow */
- if (totalDstSize + ret < totalDstSize)
- return ZSTD_CONTENTSIZE_ERROR;
- totalDstSize += ret;
- }
- {
- size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
- if (ZSTD_isError(frameSrcSize)) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
-
- src = (const BYTE *)src + frameSrcSize;
- srcSize -= frameSrcSize;
- }
- }
-
- if (srcSize) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
-
- return totalDstSize;
- }
-}
-
-/** ZSTD_decodeFrameHeader() :
-* `headerSize` must be the size provided by ZSTD_frameHeaderSize().
-* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
-static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize)
-{
- size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize);
- if (ZSTD_isError(result))
- return result; /* invalid header */
- if (result > 0)
- return ERROR(srcSize_wrong); /* headerSize too small */
- if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
- return ERROR(dictionary_wrong);
- if (dctx->fParams.checksumFlag)
- xxh64_reset(&dctx->xxhState, 0);
- return 0;
-}
-
-typedef struct {
- blockType_e blockType;
- U32 lastBlock;
- U32 origSize;
-} blockProperties_t;
-
-/*! ZSTD_getcBlockSize() :
-* Provides the size of compressed block from block header `src` */
-size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr)
-{
- if (srcSize < ZSTD_blockHeaderSize)
- return ERROR(srcSize_wrong);
- {
- U32 const cBlockHeader = ZSTD_readLE24(src);
- U32 const cSize = cBlockHeader >> 3;
- bpPtr->lastBlock = cBlockHeader & 1;
- bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
- bpPtr->origSize = cSize; /* only useful for RLE */
- if (bpPtr->blockType == bt_rle)
- return 1;
- if (bpPtr->blockType == bt_reserved)
- return ERROR(corruption_detected);
- return cSize;
- }
-}
-
-static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- if (srcSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
- memcpy(dst, src, srcSize);
- return srcSize;
-}
-
-static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize)
-{
- if (srcSize != 1)
- return ERROR(srcSize_wrong);
- if (regenSize > dstCapacity)
- return ERROR(dstSize_tooSmall);
- memset(dst, *(const BYTE *)src, regenSize);
- return regenSize;
-}
-
-/*! ZSTD_decodeLiteralsBlock() :
- @return : nb of bytes read from src (< srcSize ) */
-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
-{
- if (srcSize < MIN_CBLOCK_SIZE)
- return ERROR(corruption_detected);
-
- {
- const BYTE *const istart = (const BYTE *)src;
- symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
-
- switch (litEncType) {
- case set_repeat:
- if (dctx->litEntropy == 0)
- return ERROR(dictionary_corrupted);
- fallthrough;
- case set_compressed:
- if (srcSize < 5)
- return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
- {
- size_t lhSize, litSize, litCSize;
- U32 singleStream = 0;
- U32 const lhlCode = (istart[0] >> 2) & 3;
- U32 const lhc = ZSTD_readLE32(istart);
- switch (lhlCode) {
- case 0:
- case 1:
- default: /* note : default is impossible, since lhlCode into [0..3] */
- /* 2 - 2 - 10 - 10 */
- singleStream = !lhlCode;
- lhSize = 3;
- litSize = (lhc >> 4) & 0x3FF;
- litCSize = (lhc >> 14) & 0x3FF;
- break;
- case 2:
- /* 2 - 2 - 14 - 14 */
- lhSize = 4;
- litSize = (lhc >> 4) & 0x3FFF;
- litCSize = lhc >> 18;
- break;
- case 3:
- /* 2 - 2 - 18 - 18 */
- lhSize = 5;
- litSize = (lhc >> 4) & 0x3FFFF;
- litCSize = (lhc >> 22) + (istart[4] << 10);
- break;
- }
- if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
- return ERROR(corruption_detected);
- if (litCSize + lhSize > srcSize)
- return ERROR(corruption_detected);
-
- if (HUF_isError(
- (litEncType == set_repeat)
- ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)
- : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr))
- : (singleStream
- ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
- dctx->entropy.workspace, sizeof(dctx->entropy.workspace))
- : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
- dctx->entropy.workspace, sizeof(dctx->entropy.workspace)))))
- return ERROR(corruption_detected);
-
- dctx->litPtr = dctx->litBuffer;
- dctx->litSize = litSize;
- dctx->litEntropy = 1;
- if (litEncType == set_compressed)
- dctx->HUFptr = dctx->entropy.hufTable;
- memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
- return litCSize + lhSize;
- }
-
- case set_basic: {
- size_t litSize, lhSize;
- U32 const lhlCode = ((istart[0]) >> 2) & 3;
- switch (lhlCode) {
- case 0:
- case 2:
- default: /* note : default is impossible, since lhlCode into [0..3] */
- lhSize = 1;
- litSize = istart[0] >> 3;
- break;
- case 1:
- lhSize = 2;
- litSize = ZSTD_readLE16(istart) >> 4;
- break;
- case 3:
- lhSize = 3;
- litSize = ZSTD_readLE24(istart) >> 4;
- break;
- }
-
- if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
- if (litSize + lhSize > srcSize)
- return ERROR(corruption_detected);
- memcpy(dctx->litBuffer, istart + lhSize, litSize);
- dctx->litPtr = dctx->litBuffer;
- dctx->litSize = litSize;
- memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
- return lhSize + litSize;
- }
- /* direct reference into compressed stream */
- dctx->litPtr = istart + lhSize;
- dctx->litSize = litSize;
- return lhSize + litSize;
- }
-
- case set_rle: {
- U32 const lhlCode = ((istart[0]) >> 2) & 3;
- size_t litSize, lhSize;
- switch (lhlCode) {
- case 0:
- case 2:
- default: /* note : default is impossible, since lhlCode into [0..3] */
- lhSize = 1;
- litSize = istart[0] >> 3;
- break;
- case 1:
- lhSize = 2;
- litSize = ZSTD_readLE16(istart) >> 4;
- break;
- case 3:
- lhSize = 3;
- litSize = ZSTD_readLE24(istart) >> 4;
- if (srcSize < 4)
- return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
- break;
- }
- if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
- return ERROR(corruption_detected);
- memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
- dctx->litPtr = dctx->litBuffer;
- dctx->litSize = litSize;
- return lhSize + 1;
- }
- default:
- return ERROR(corruption_detected); /* impossible */
- }
- }
-}
-
-typedef union {
- FSE_decode_t realData;
- U32 alignedBy4;
-} FSE_decode_t4;
-
-static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = {
- {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
- {{0, 0, 4}}, /* 0 : base, symbol, bits */
- {{16, 0, 4}},
- {{32, 1, 5}},
- {{0, 3, 5}},
- {{0, 4, 5}},
- {{0, 6, 5}},
- {{0, 7, 5}},
- {{0, 9, 5}},
- {{0, 10, 5}},
- {{0, 12, 5}},
- {{0, 14, 6}},
- {{0, 16, 5}},
- {{0, 18, 5}},
- {{0, 19, 5}},
- {{0, 21, 5}},
- {{0, 22, 5}},
- {{0, 24, 5}},
- {{32, 25, 5}},
- {{0, 26, 5}},
- {{0, 27, 6}},
- {{0, 29, 6}},
- {{0, 31, 6}},
- {{32, 0, 4}},
- {{0, 1, 4}},
- {{0, 2, 5}},
- {{32, 4, 5}},
- {{0, 5, 5}},
- {{32, 7, 5}},
- {{0, 8, 5}},
- {{32, 10, 5}},
- {{0, 11, 5}},
- {{0, 13, 6}},
- {{32, 16, 5}},
- {{0, 17, 5}},
- {{32, 19, 5}},
- {{0, 20, 5}},
- {{32, 22, 5}},
- {{0, 23, 5}},
- {{0, 25, 4}},
- {{16, 25, 4}},
- {{32, 26, 5}},
- {{0, 28, 6}},
- {{0, 30, 6}},
- {{48, 0, 4}},
- {{16, 1, 4}},
- {{32, 2, 5}},
- {{32, 3, 5}},
- {{32, 5, 5}},
- {{32, 6, 5}},
- {{32, 8, 5}},
- {{32, 9, 5}},
- {{32, 11, 5}},
- {{32, 12, 5}},
- {{0, 15, 6}},
- {{32, 17, 5}},
- {{32, 18, 5}},
- {{32, 20, 5}},
- {{32, 21, 5}},
- {{32, 23, 5}},
- {{32, 24, 5}},
- {{0, 35, 6}},
- {{0, 34, 6}},
- {{0, 33, 6}},
- {{0, 32, 6}},
-}; /* LL_defaultDTable */
-
-static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = {
- {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
- {{0, 0, 6}}, /* 0 : base, symbol, bits */
- {{0, 1, 4}},
- {{32, 2, 5}},
- {{0, 3, 5}},
- {{0, 5, 5}},
- {{0, 6, 5}},
- {{0, 8, 5}},
- {{0, 10, 6}},
- {{0, 13, 6}},
- {{0, 16, 6}},
- {{0, 19, 6}},
- {{0, 22, 6}},
- {{0, 25, 6}},
- {{0, 28, 6}},
- {{0, 31, 6}},
- {{0, 33, 6}},
- {{0, 35, 6}},
- {{0, 37, 6}},
- {{0, 39, 6}},
- {{0, 41, 6}},
- {{0, 43, 6}},
- {{0, 45, 6}},
- {{16, 1, 4}},
- {{0, 2, 4}},
- {{32, 3, 5}},
- {{0, 4, 5}},
- {{32, 6, 5}},
- {{0, 7, 5}},
- {{0, 9, 6}},
- {{0, 12, 6}},
- {{0, 15, 6}},
- {{0, 18, 6}},
- {{0, 21, 6}},
- {{0, 24, 6}},
- {{0, 27, 6}},
- {{0, 30, 6}},
- {{0, 32, 6}},
- {{0, 34, 6}},
- {{0, 36, 6}},
- {{0, 38, 6}},
- {{0, 40, 6}},
- {{0, 42, 6}},
- {{0, 44, 6}},
- {{32, 1, 4}},
- {{48, 1, 4}},
- {{16, 2, 4}},
- {{32, 4, 5}},
- {{32, 5, 5}},
- {{32, 7, 5}},
- {{32, 8, 5}},
- {{0, 11, 6}},
- {{0, 14, 6}},
- {{0, 17, 6}},
- {{0, 20, 6}},
- {{0, 23, 6}},
- {{0, 26, 6}},
- {{0, 29, 6}},
- {{0, 52, 6}},
- {{0, 51, 6}},
- {{0, 50, 6}},
- {{0, 49, 6}},
- {{0, 48, 6}},
- {{0, 47, 6}},
- {{0, 46, 6}},
-}; /* ML_defaultDTable */
-
-static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = {
- {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
- {{0, 0, 5}}, /* 0 : base, symbol, bits */
- {{0, 6, 4}},
- {{0, 9, 5}},
- {{0, 15, 5}},
- {{0, 21, 5}},
- {{0, 3, 5}},
- {{0, 7, 4}},
- {{0, 12, 5}},
- {{0, 18, 5}},
- {{0, 23, 5}},
- {{0, 5, 5}},
- {{0, 8, 4}},
- {{0, 14, 5}},
- {{0, 20, 5}},
- {{0, 2, 5}},
- {{16, 7, 4}},
- {{0, 11, 5}},
- {{0, 17, 5}},
- {{0, 22, 5}},
- {{0, 4, 5}},
- {{16, 8, 4}},
- {{0, 13, 5}},
- {{0, 19, 5}},
- {{0, 1, 5}},
- {{16, 6, 4}},
- {{0, 10, 5}},
- {{0, 16, 5}},
- {{0, 28, 5}},
- {{0, 27, 5}},
- {{0, 26, 5}},
- {{0, 25, 5}},
- {{0, 24, 5}},
-}; /* OF_defaultDTable */
-
-/*! ZSTD_buildSeqTable() :
- @return : nb bytes read from src,
- or an error code if it fails, testable with ZSTD_isError()
-*/
-static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src,
- size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize)
-{
- const void *const tmpPtr = defaultTable; /* bypass strict aliasing */
- switch (type) {
- case set_rle:
- if (!srcSize)
- return ERROR(srcSize_wrong);
- if ((*(const BYTE *)src) > max)
- return ERROR(corruption_detected);
- FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src);
- *DTablePtr = DTableSpace;
- return 1;
- case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0;
- case set_repeat:
- if (!flagRepeatTable)
- return ERROR(corruption_detected);
- return 0;
- default: /* impossible */
- case set_compressed: {
- U32 tableLog;
- S16 *norm = (S16 *)workspace;
- size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(GENERIC);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
- {
- size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
- if (FSE_isError(headerSize))
- return ERROR(corruption_detected);
- if (tableLog > maxLog)
- return ERROR(corruption_detected);
- FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize);
- *DTablePtr = DTableSpace;
- return headerSize;
- }
- }
- }
-}
-
-size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
-{
- const BYTE *const istart = (const BYTE *const)src;
- const BYTE *const iend = istart + srcSize;
- const BYTE *ip = istart;
-
- /* check */
- if (srcSize < MIN_SEQUENCES_SIZE)
- return ERROR(srcSize_wrong);
-
- /* SeqHead */
- {
- int nbSeq = *ip++;
- if (!nbSeq) {
- *nbSeqPtr = 0;
- return 1;
- }
- if (nbSeq > 0x7F) {
- if (nbSeq == 0xFF) {
- if (ip + 2 > iend)
- return ERROR(srcSize_wrong);
- nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2;
- } else {
- if (ip >= iend)
- return ERROR(srcSize_wrong);
- nbSeq = ((nbSeq - 0x80) << 8) + *ip++;
- }
- }
- *nbSeqPtr = nbSeq;
- }
-
- /* FSE table descriptors */
- if (ip + 4 > iend)
- return ERROR(srcSize_wrong); /* minimum possible size */
- {
- symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
- symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
- symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
- ip++;
-
- /* Build DTables */
- {
- size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip,
- LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
- if (ZSTD_isError(llhSize))
- return ERROR(corruption_detected);
- ip += llhSize;
- }
- {
- size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip,
- OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
- if (ZSTD_isError(ofhSize))
- return ERROR(corruption_detected);
- ip += ofhSize;
- }
- {
- size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip,
- ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
- if (ZSTD_isError(mlhSize))
- return ERROR(corruption_detected);
- ip += mlhSize;
- }
- }
-
- return ip - istart;
-}
-
-typedef struct {
- size_t litLength;
- size_t matchLength;
- size_t offset;
- const BYTE *match;
-} seq_t;
-
-typedef struct {
- BIT_DStream_t DStream;
- FSE_DState_t stateLL;
- FSE_DState_t stateOffb;
- FSE_DState_t stateML;
- size_t prevOffset[ZSTD_REP_NUM];
- const BYTE *base;
- size_t pos;
- uPtrDiff gotoDict;
-} seqState_t;
-
-FORCE_NOINLINE
-size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
- const BYTE *const vBase, const BYTE *const dictEnd)
-{
- BYTE *const oLitEnd = op + sequence.litLength;
- size_t const sequenceLength = sequence.litLength + sequence.matchLength;
- BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
- BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
- const BYTE *const iLitEnd = *litPtr + sequence.litLength;
- const BYTE *match = oLitEnd - sequence.offset;
-
- /* check */
- if (oMatchEnd > oend)
- return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
- if (iLitEnd > litLimit)
- return ERROR(corruption_detected); /* over-read beyond lit buffer */
- if (oLitEnd <= oend_w)
- return ERROR(GENERIC); /* Precondition */
-
- /* copy literals */
- if (op < oend_w) {
- ZSTD_wildcopy(op, *litPtr, oend_w - op);
- *litPtr += oend_w - op;
- op = oend_w;
- }
- while (op < oLitEnd)
- *op++ = *(*litPtr)++;
-
- /* copy Match */
- if (sequence.offset > (size_t)(oLitEnd - base)) {
- /* offset beyond prefix */
- if (sequence.offset > (size_t)(oLitEnd - vBase))
- return ERROR(corruption_detected);
- match = dictEnd - (base - match);
- if (match + sequence.matchLength <= dictEnd) {
- memmove(oLitEnd, match, sequence.matchLength);
- return sequenceLength;
- }
- /* span extDict & currPrefixSegment */
- {
- size_t const length1 = dictEnd - match;
- memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = base;
- }
- }
- while (op < oMatchEnd)
- *op++ = *match++;
- return sequenceLength;
-}
-
-static seq_t ZSTD_decodeSequence(seqState_t *seqState)
-{
- seq_t seq;
-
- U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
- U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
- U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
-
- U32 const llBits = LL_bits[llCode];
- U32 const mlBits = ML_bits[mlCode];
- U32 const ofBits = ofCode;
- U32 const totalBits = llBits + mlBits + ofBits;
-
- static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18,
- 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
-
- static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41,
- 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
-
- static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD,
- 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD,
- 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
-
- /* sequence */
- {
- size_t offset;
- if (!ofCode)
- offset = 0;
- else {
- offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
- if (ZSTD_32bits())
- BIT_reloadDStream(&seqState->DStream);
- }
-
- if (ofCode <= 1) {
- offset += (llCode == 0);
- if (offset) {
- size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
- temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
- if (offset != 1)
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset = temp;
- } else {
- offset = seqState->prevOffset[0];
- }
- } else {
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset;
- }
- seq.offset = offset;
- }
-
- seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
- if (ZSTD_32bits() && (mlBits + llBits > 24))
- BIT_reloadDStream(&seqState->DStream);
-
- seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
- if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
- BIT_reloadDStream(&seqState->DStream);
-
- /* ANS state update */
- FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
- FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
- if (ZSTD_32bits())
- BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
- FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
-
- seq.match = NULL;
-
- return seq;
-}
-
-FORCE_INLINE
-size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
- const BYTE *const vBase, const BYTE *const dictEnd)
-{
- BYTE *const oLitEnd = op + sequence.litLength;
- size_t const sequenceLength = sequence.litLength + sequence.matchLength;
- BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
- BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
- const BYTE *const iLitEnd = *litPtr + sequence.litLength;
- const BYTE *match = oLitEnd - sequence.offset;
-
- /* check */
- if (oMatchEnd > oend)
- return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
- if (iLitEnd > litLimit)
- return ERROR(corruption_detected); /* over-read beyond lit buffer */
- if (oLitEnd > oend_w)
- return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
-
- /* copy Literals */
- ZSTD_copy8(op, *litPtr);
- if (sequence.litLength > 8)
- ZSTD_wildcopy(op + 8, (*litPtr) + 8,
- sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
- op = oLitEnd;
- *litPtr = iLitEnd; /* update for next sequence */
-
- /* copy Match */
- if (sequence.offset > (size_t)(oLitEnd - base)) {
- /* offset beyond prefix */
- if (sequence.offset > (size_t)(oLitEnd - vBase))
- return ERROR(corruption_detected);
- match = dictEnd + (match - base);
- if (match + sequence.matchLength <= dictEnd) {
- memmove(oLitEnd, match, sequence.matchLength);
- return sequenceLength;
- }
- /* span extDict & currPrefixSegment */
- {
- size_t const length1 = dictEnd - match;
- memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = base;
- if (op > oend_w || sequence.matchLength < MINMATCH) {
- U32 i;
- for (i = 0; i < sequence.matchLength; ++i)
- op[i] = match[i];
- return sequenceLength;
- }
- }
- }
- /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
-
- /* match within prefix */
- if (sequence.offset < 8) {
- /* close range match, overlap */
- static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
- static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
- int const sub2 = dec64table[sequence.offset];
- op[0] = match[0];
- op[1] = match[1];
- op[2] = match[2];
- op[3] = match[3];
- match += dec32table[sequence.offset];
- ZSTD_copy4(op + 4, match);
- match -= sub2;
- } else {
- ZSTD_copy8(op, match);
- }
- op += 8;
- match += 8;
-
- if (oMatchEnd > oend - (16 - MINMATCH)) {
- if (op < oend_w) {
- ZSTD_wildcopy(op, match, oend_w - op);
- match += oend_w - op;
- op = oend_w;
- }
- while (op < oMatchEnd)
- *op++ = *match++;
- } else {
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
- }
- return sequenceLength;
-}
-
-static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
-{
- const BYTE *ip = (const BYTE *)seqStart;
- const BYTE *const iend = ip + seqSize;
- BYTE *const ostart = (BYTE * const)dst;
- BYTE *const oend = ostart + maxDstSize;
- BYTE *op = ostart;
- const BYTE *litPtr = dctx->litPtr;
- const BYTE *const litEnd = litPtr + dctx->litSize;
- const BYTE *const base = (const BYTE *)(dctx->base);
- const BYTE *const vBase = (const BYTE *)(dctx->vBase);
- const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
- int nbSeq;
-
- /* Build Decoding Tables */
- {
- size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
- if (ZSTD_isError(seqHSize))
- return seqHSize;
- ip += seqHSize;
- }
-
- /* Regen sequences */
- if (nbSeq) {
- seqState_t seqState;
- dctx->fseEntropy = 1;
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- seqState.prevOffset[i] = dctx->entropy.rep[i];
- }
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
- FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
- FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
- FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
-
- for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) {
- nbSeq--;
- {
- seq_t const sequence = ZSTD_decodeSequence(&seqState);
- size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
- if (ZSTD_isError(oneSeqSize))
- return oneSeqSize;
- op += oneSeqSize;
- }
- }
-
- /* check if reached exact end */
- if (nbSeq)
- return ERROR(corruption_detected);
- /* save reps for next block */
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
- }
- }
-
- /* last literal segment */
- {
- size_t const lastLLSize = litEnd - litPtr;
- if (lastLLSize > (size_t)(oend - op))
- return ERROR(dstSize_tooSmall);
- memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
- }
-
- return op - ostart;
-}
-
-FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets)
-{
- seq_t seq;
-
- U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
- U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
- U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
-
- U32 const llBits = LL_bits[llCode];
- U32 const mlBits = ML_bits[mlCode];
- U32 const ofBits = ofCode;
- U32 const totalBits = llBits + mlBits + ofBits;
-
- static const U32 LL_base[MaxLL + 1] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18,
- 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
-
- static const U32 ML_base[MaxML + 1] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41,
- 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
-
- static const U32 OF_base[MaxOff + 1] = {0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD,
- 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD,
- 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
-
- /* sequence */
- {
- size_t offset;
- if (!ofCode)
- offset = 0;
- else {
- if (longOffsets) {
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN);
- offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
- if (ZSTD_32bits() || extraBits)
- BIT_reloadDStream(&seqState->DStream);
- if (extraBits)
- offset += BIT_readBitsFast(&seqState->DStream, extraBits);
- } else {
- offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
- if (ZSTD_32bits())
- BIT_reloadDStream(&seqState->DStream);
- }
- }
-
- if (ofCode <= 1) {
- offset += (llCode == 0);
- if (offset) {
- size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
- temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
- if (offset != 1)
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset = temp;
- } else {
- offset = seqState->prevOffset[0];
- }
- } else {
- seqState->prevOffset[2] = seqState->prevOffset[1];
- seqState->prevOffset[1] = seqState->prevOffset[0];
- seqState->prevOffset[0] = offset;
- }
- seq.offset = offset;
- }
-
- seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
- if (ZSTD_32bits() && (mlBits + llBits > 24))
- BIT_reloadDStream(&seqState->DStream);
-
- seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
- if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
- BIT_reloadDStream(&seqState->DStream);
-
- {
- size_t const pos = seqState->pos + seq.litLength;
- seq.match = seqState->base + pos - seq.offset; /* single memory segment */
- if (seq.offset > pos)
- seq.match += seqState->gotoDict; /* separate memory segment */
- seqState->pos = pos + seq.matchLength;
- }
-
- /* ANS state update */
- FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
- FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
- if (ZSTD_32bits())
- BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
- FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
-
- return seq;
-}
-
-static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize)
-{
- if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) {
- return ZSTD_decodeSequenceLong_generic(seqState, 1);
- } else {
- return ZSTD_decodeSequenceLong_generic(seqState, 0);
- }
-}
-
-FORCE_INLINE
-size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
- const BYTE *const vBase, const BYTE *const dictEnd)
-{
- BYTE *const oLitEnd = op + sequence.litLength;
- size_t const sequenceLength = sequence.litLength + sequence.matchLength;
- BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
- BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
- const BYTE *const iLitEnd = *litPtr + sequence.litLength;
- const BYTE *match = sequence.match;
-
- /* check */
- if (oMatchEnd > oend)
- return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
- if (iLitEnd > litLimit)
- return ERROR(corruption_detected); /* over-read beyond lit buffer */
- if (oLitEnd > oend_w)
- return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
-
- /* copy Literals */
- ZSTD_copy8(op, *litPtr);
- if (sequence.litLength > 8)
- ZSTD_wildcopy(op + 8, (*litPtr) + 8,
- sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
- op = oLitEnd;
- *litPtr = iLitEnd; /* update for next sequence */
-
- /* copy Match */
- if (sequence.offset > (size_t)(oLitEnd - base)) {
- /* offset beyond prefix */
- if (sequence.offset > (size_t)(oLitEnd - vBase))
- return ERROR(corruption_detected);
- if (match + sequence.matchLength <= dictEnd) {
- memmove(oLitEnd, match, sequence.matchLength);
- return sequenceLength;
- }
- /* span extDict & currPrefixSegment */
- {
- size_t const length1 = dictEnd - match;
- memmove(oLitEnd, match, length1);
- op = oLitEnd + length1;
- sequence.matchLength -= length1;
- match = base;
- if (op > oend_w || sequence.matchLength < MINMATCH) {
- U32 i;
- for (i = 0; i < sequence.matchLength; ++i)
- op[i] = match[i];
- return sequenceLength;
- }
- }
- }
- /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
-
- /* match within prefix */
- if (sequence.offset < 8) {
- /* close range match, overlap */
- static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
- static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
- int const sub2 = dec64table[sequence.offset];
- op[0] = match[0];
- op[1] = match[1];
- op[2] = match[2];
- op[3] = match[3];
- match += dec32table[sequence.offset];
- ZSTD_copy4(op + 4, match);
- match -= sub2;
- } else {
- ZSTD_copy8(op, match);
- }
- op += 8;
- match += 8;
-
- if (oMatchEnd > oend - (16 - MINMATCH)) {
- if (op < oend_w) {
- ZSTD_wildcopy(op, match, oend_w - op);
- match += oend_w - op;
- op = oend_w;
- }
- while (op < oMatchEnd)
- *op++ = *match++;
- } else {
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
- }
- return sequenceLength;
-}
-
-static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
-{
- const BYTE *ip = (const BYTE *)seqStart;
- const BYTE *const iend = ip + seqSize;
- BYTE *const ostart = (BYTE * const)dst;
- BYTE *const oend = ostart + maxDstSize;
- BYTE *op = ostart;
- const BYTE *litPtr = dctx->litPtr;
- const BYTE *const litEnd = litPtr + dctx->litSize;
- const BYTE *const base = (const BYTE *)(dctx->base);
- const BYTE *const vBase = (const BYTE *)(dctx->vBase);
- const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
- unsigned const windowSize = dctx->fParams.windowSize;
- int nbSeq;
-
- /* Build Decoding Tables */
- {
- size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
- if (ZSTD_isError(seqHSize))
- return seqHSize;
- ip += seqHSize;
- }
-
- /* Regen sequences */
- if (nbSeq) {
-#define STORED_SEQS 4
-#define STOSEQ_MASK (STORED_SEQS - 1)
-#define ADVANCED_SEQS 4
- seq_t *sequences = (seq_t *)dctx->entropy.workspace;
- int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
- seqState_t seqState;
- int seqNb;
- ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS);
- dctx->fseEntropy = 1;
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- seqState.prevOffset[i] = dctx->entropy.rep[i];
- }
- seqState.base = base;
- seqState.pos = (size_t)(op - base);
- seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
- FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
- FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
- FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
-
- /* prepare in advance */
- for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) {
- sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize);
- }
- if (seqNb < seqAdvance)
- return ERROR(corruption_detected);
-
- /* decode and decompress */
- for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) {
- seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize);
- size_t const oneSeqSize =
- ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
- if (ZSTD_isError(oneSeqSize))
- return oneSeqSize;
- ZSTD_PREFETCH(sequence.match);
- sequences[seqNb & STOSEQ_MASK] = sequence;
- op += oneSeqSize;
- }
- if (seqNb < nbSeq)
- return ERROR(corruption_detected);
-
- /* finish queue */
- seqNb -= seqAdvance;
- for (; seqNb < nbSeq; seqNb++) {
- size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
- if (ZSTD_isError(oneSeqSize))
- return oneSeqSize;
- op += oneSeqSize;
- }
-
- /* save reps for next block */
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
- }
- }
-
- /* last literal segment */
- {
- size_t const lastLLSize = litEnd - litPtr;
- if (lastLLSize > (size_t)(oend - op))
- return ERROR(dstSize_tooSmall);
- memcpy(op, litPtr, lastLLSize);
- op += lastLLSize;
- }
-
- return op - ostart;
-}
-
-static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{ /* blockType == blockCompressed */
- const BYTE *ip = (const BYTE *)src;
-
- if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX)
- return ERROR(srcSize_wrong);
-
- /* Decode literals section */
- {
- size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
- if (ZSTD_isError(litCSize))
- return litCSize;
- ip += litCSize;
- srcSize -= litCSize;
- }
- if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */
- /* likely because of register pressure */
- /* if that's the correct cause, then 32-bits ARM should be affected differently */
- /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */
- if (dctx->fParams.windowSize > (1 << 23))
- return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
- return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
-}
-
-static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst)
-{
- if (dst != dctx->previousDstEnd) { /* not contiguous */
- dctx->dictEnd = dctx->previousDstEnd;
- dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
- dctx->base = dst;
- dctx->previousDstEnd = dst;
- }
-}
-
-size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t dSize;
- ZSTD_checkContinuity(dctx, dst);
- dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
- dctx->previousDstEnd = (char *)dst + dSize;
- return dSize;
-}
-
-/** ZSTD_insertBlock() :
- insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
-size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize)
-{
- ZSTD_checkContinuity(dctx, blockStart);
- dctx->previousDstEnd = (const char *)blockStart + blockSize;
- return blockSize;
-}
-
-size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length)
-{
- if (length > dstCapacity)
- return ERROR(dstSize_tooSmall);
- memset(dst, byte, length);
- return length;
-}
-
-/** ZSTD_findFrameCompressedSize() :
- * compatible with legacy mode
- * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
- * `srcSize` must be at least as large as the frame contained
- * @return : the compressed size of the frame starting at `src` */
-size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
-{
- if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
- return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4);
- } else {
- const BYTE *ip = (const BYTE *)src;
- const BYTE *const ipstart = ip;
- size_t remainingSize = srcSize;
- ZSTD_frameParams fParams;
-
- size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
- if (ZSTD_isError(headerSize))
- return headerSize;
-
- /* Frame Header */
- {
- size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
- if (ZSTD_isError(ret))
- return ret;
- if (ret > 0)
- return ERROR(srcSize_wrong);
- }
-
- ip += headerSize;
- remainingSize -= headerSize;
-
- /* Loop on each block */
- while (1) {
- blockProperties_t blockProperties;
- size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
- if (ZSTD_isError(cBlockSize))
- return cBlockSize;
-
- if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
- return ERROR(srcSize_wrong);
-
- ip += ZSTD_blockHeaderSize + cBlockSize;
- remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
-
- if (blockProperties.lastBlock)
- break;
- }
-
- if (fParams.checksumFlag) { /* Frame content checksum */
- if (remainingSize < 4)
- return ERROR(srcSize_wrong);
- ip += 4;
- remainingSize -= 4;
- }
-
- return ip - ipstart;
- }
-}
-
-/*! ZSTD_decompressFrame() :
-* @dctx must be properly initialized */
-static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr)
-{
- const BYTE *ip = (const BYTE *)(*srcPtr);
- BYTE *const ostart = (BYTE * const)dst;
- BYTE *const oend = ostart + dstCapacity;
- BYTE *op = ostart;
- size_t remainingSize = *srcSizePtr;
-
- /* check */
- if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize)
- return ERROR(srcSize_wrong);
-
- /* Frame Header */
- {
- size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
- if (ZSTD_isError(frameHeaderSize))
- return frameHeaderSize;
- if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize)
- return ERROR(srcSize_wrong);
- CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
- ip += frameHeaderSize;
- remainingSize -= frameHeaderSize;
- }
-
- /* Loop on each block */
- while (1) {
- size_t decodedSize;
- blockProperties_t blockProperties;
- size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
- if (ZSTD_isError(cBlockSize))
- return cBlockSize;
-
- ip += ZSTD_blockHeaderSize;
- remainingSize -= ZSTD_blockHeaderSize;
- if (cBlockSize > remainingSize)
- return ERROR(srcSize_wrong);
-
- switch (blockProperties.blockType) {
- case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break;
- case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break;
- case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break;
- case bt_reserved:
- default: return ERROR(corruption_detected);
- }
-
- if (ZSTD_isError(decodedSize))
- return decodedSize;
- if (dctx->fParams.checksumFlag)
- xxh64_update(&dctx->xxhState, op, decodedSize);
- op += decodedSize;
- ip += cBlockSize;
- remainingSize -= cBlockSize;
- if (blockProperties.lastBlock)
- break;
- }
-
- if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
- U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
- U32 checkRead;
- if (remainingSize < 4)
- return ERROR(checksum_wrong);
- checkRead = ZSTD_readLE32(ip);
- if (checkRead != checkCalc)
- return ERROR(checksum_wrong);
- ip += 4;
- remainingSize -= 4;
- }
-
- /* Allow caller to get size read */
- *srcPtr = ip;
- *srcSizePtr = remainingSize;
- return op - ostart;
-}
-
-static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict);
-static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict);
-
-static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
- const ZSTD_DDict *ddict)
-{
- void *const dststart = dst;
-
- if (ddict) {
- if (dict) {
- /* programmer error, these two cases should be mutually exclusive */
- return ERROR(GENERIC);
- }
-
- dict = ZSTD_DDictDictContent(ddict);
- dictSize = ZSTD_DDictDictSize(ddict);
- }
-
- while (srcSize >= ZSTD_frameHeaderSize_prefix) {
- U32 magicNumber;
-
- magicNumber = ZSTD_readLE32(src);
- if (magicNumber != ZSTD_MAGICNUMBER) {
- if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
- size_t skippableSize;
- if (srcSize < ZSTD_skippableHeaderSize)
- return ERROR(srcSize_wrong);
- skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
- if (srcSize < skippableSize) {
- return ERROR(srcSize_wrong);
- }
-
- src = (const BYTE *)src + skippableSize;
- srcSize -= skippableSize;
- continue;
- } else {
- return ERROR(prefix_unknown);
- }
- }
-
- if (ddict) {
- /* we were called from ZSTD_decompress_usingDDict */
- ZSTD_refDDict(dctx, ddict);
- } else {
- /* this will initialize correctly with no dict if dict == NULL, so
- * use this in all cases but ddict */
- CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
- }
- ZSTD_checkContinuity(dctx, dst);
-
- {
- const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize);
- if (ZSTD_isError(res))
- return res;
- /* don't need to bounds check this, ZSTD_decompressFrame will have
- * already */
- dst = (BYTE *)dst + res;
- dstCapacity -= res;
- }
- }
-
- if (srcSize)
- return ERROR(srcSize_wrong); /* input not entirely consumed */
-
- return (BYTE *)dst - (BYTE *)dststart;
-}
-
-size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize)
-{
- return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
-}
-
-size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
-}
-
-/*-**************************************
-* Advanced Streaming Decompression API
-* Bufferless and synchronous
-****************************************/
-size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; }
-
-ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx)
-{
- switch (dctx->stage) {
- default: /* should not happen */
- case ZSTDds_getFrameHeaderSize:
- case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader;
- case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader;
- case ZSTDds_decompressBlock: return ZSTDnit_block;
- case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock;
- case ZSTDds_checkChecksum: return ZSTDnit_checksum;
- case ZSTDds_decodeSkippableHeader:
- case ZSTDds_skipFrame: return ZSTDnit_skippableFrame;
- }
-}
-
-int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */
-
-/** ZSTD_decompressContinue() :
-* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
-* or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- /* Sanity check */
- if (srcSize != dctx->expected)
- return ERROR(srcSize_wrong);
- if (dstCapacity)
- ZSTD_checkContinuity(dctx, dst);
-
- switch (dctx->stage) {
- case ZSTDds_getFrameHeaderSize:
- if (srcSize != ZSTD_frameHeaderSize_prefix)
- return ERROR(srcSize_wrong); /* impossible */
- if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
- memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
- dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */
- dctx->stage = ZSTDds_decodeSkippableHeader;
- return 0;
- }
- dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
- if (ZSTD_isError(dctx->headerSize))
- return dctx->headerSize;
- memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
- if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) {
- dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix;
- dctx->stage = ZSTDds_decodeFrameHeader;
- return 0;
- }
- dctx->expected = 0; /* not necessary to copy more */
- fallthrough;
-
- case ZSTDds_decodeFrameHeader:
- memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
- CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
- dctx->expected = ZSTD_blockHeaderSize;
- dctx->stage = ZSTDds_decodeBlockHeader;
- return 0;
-
- case ZSTDds_decodeBlockHeader: {
- blockProperties_t bp;
- size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
- if (ZSTD_isError(cBlockSize))
- return cBlockSize;
- dctx->expected = cBlockSize;
- dctx->bType = bp.blockType;
- dctx->rleSize = bp.origSize;
- if (cBlockSize) {
- dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
- return 0;
- }
- /* empty block */
- if (bp.lastBlock) {
- if (dctx->fParams.checksumFlag) {
- dctx->expected = 4;
- dctx->stage = ZSTDds_checkChecksum;
- } else {
- dctx->expected = 0; /* end of frame */
- dctx->stage = ZSTDds_getFrameHeaderSize;
- }
- } else {
- dctx->expected = 3; /* go directly to next header */
- dctx->stage = ZSTDds_decodeBlockHeader;
- }
- return 0;
- }
- case ZSTDds_decompressLastBlock:
- case ZSTDds_decompressBlock: {
- size_t rSize;
- switch (dctx->bType) {
- case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break;
- case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break;
- case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break;
- case bt_reserved: /* should never happen */
- default: return ERROR(corruption_detected);
- }
- if (ZSTD_isError(rSize))
- return rSize;
- if (dctx->fParams.checksumFlag)
- xxh64_update(&dctx->xxhState, dst, rSize);
-
- if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
- if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
- dctx->expected = 4;
- dctx->stage = ZSTDds_checkChecksum;
- } else {
- dctx->expected = 0; /* ends here */
- dctx->stage = ZSTDds_getFrameHeaderSize;
- }
- } else {
- dctx->stage = ZSTDds_decodeBlockHeader;
- dctx->expected = ZSTD_blockHeaderSize;
- dctx->previousDstEnd = (char *)dst + rSize;
- }
- return rSize;
- }
- case ZSTDds_checkChecksum: {
- U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
- U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */
- if (check32 != h32)
- return ERROR(checksum_wrong);
- dctx->expected = 0;
- dctx->stage = ZSTDds_getFrameHeaderSize;
- return 0;
- }
- case ZSTDds_decodeSkippableHeader: {
- memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
- dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4);
- dctx->stage = ZSTDds_skipFrame;
- return 0;
- }
- case ZSTDds_skipFrame: {
- dctx->expected = 0;
- dctx->stage = ZSTDds_getFrameHeaderSize;
- return 0;
- }
- default:
- return ERROR(GENERIC); /* impossible */
- }
-}
-
-static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
-{
- dctx->dictEnd = dctx->previousDstEnd;
- dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
- dctx->base = dict;
- dctx->previousDstEnd = (const char *)dict + dictSize;
- return 0;
-}
-
-/* ZSTD_loadEntropy() :
- * dict : must point at beginning of a valid zstd dictionary
- * @return : size of entropy tables read */
-static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize)
-{
- const BYTE *dictPtr = (const BYTE *)dict;
- const BYTE *const dictEnd = dictPtr + dictSize;
-
- if (dictSize <= 8)
- return ERROR(dictionary_corrupted);
- dictPtr += 8; /* skip header = magic + dictID */
-
- {
- size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace));
- if (HUF_isError(hSize))
- return ERROR(dictionary_corrupted);
- dictPtr += hSize;
- }
-
- {
- short offcodeNCount[MaxOff + 1];
- U32 offcodeMaxValue = MaxOff, offcodeLog;
- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(offcodeHeaderSize))
- return ERROR(dictionary_corrupted);
- if (offcodeLog > OffFSELog)
- return ERROR(dictionary_corrupted);
- CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
- dictPtr += offcodeHeaderSize;
- }
-
- {
- short matchlengthNCount[MaxML + 1];
- unsigned matchlengthMaxValue = MaxML, matchlengthLog;
- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(matchlengthHeaderSize))
- return ERROR(dictionary_corrupted);
- if (matchlengthLog > MLFSELog)
- return ERROR(dictionary_corrupted);
- CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
- dictPtr += matchlengthHeaderSize;
- }
-
- {
- short litlengthNCount[MaxLL + 1];
- unsigned litlengthMaxValue = MaxLL, litlengthLog;
- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
- if (FSE_isError(litlengthHeaderSize))
- return ERROR(dictionary_corrupted);
- if (litlengthLog > LLFSELog)
- return ERROR(dictionary_corrupted);
- CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
- dictPtr += litlengthHeaderSize;
- }
-
- if (dictPtr + 12 > dictEnd)
- return ERROR(dictionary_corrupted);
- {
- int i;
- size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12));
- for (i = 0; i < 3; i++) {
- U32 const rep = ZSTD_readLE32(dictPtr);
- dictPtr += 4;
- if (rep == 0 || rep >= dictContentSize)
- return ERROR(dictionary_corrupted);
- entropy->rep[i] = rep;
- }
- }
-
- return dictPtr - (const BYTE *)dict;
-}
-
-static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
-{
- if (dictSize < 8)
- return ZSTD_refDictContent(dctx, dict, dictSize);
- {
- U32 const magic = ZSTD_readLE32(dict);
- if (magic != ZSTD_DICT_MAGIC) {
- return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
- }
- }
- dctx->dictID = ZSTD_readLE32((const char *)dict + 4);
-
- /* load entropy tables */
- {
- size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
- if (ZSTD_isError(eSize))
- return ERROR(dictionary_corrupted);
- dict = (const char *)dict + eSize;
- dictSize -= eSize;
- }
- dctx->litEntropy = dctx->fseEntropy = 1;
-
- /* reference dictionary content */
- return ZSTD_refDictContent(dctx, dict, dictSize);
-}
-
-size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
-{
- CHECK_F(ZSTD_decompressBegin(dctx));
- if (dict && dictSize)
- CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
- return 0;
-}
-
-/* ====== ZSTD_DDict ====== */
-
-struct ZSTD_DDict_s {
- void *dictBuffer;
- const void *dictContent;
- size_t dictSize;
- ZSTD_entropyTables_t entropy;
- U32 dictID;
- U32 entropyPresent;
- ZSTD_customMem cMem;
-}; /* typedef'd to ZSTD_DDict within "zstd.h" */
-
-size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); }
-
-static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; }
-
-static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; }
-
-static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict)
-{
- ZSTD_decompressBegin(dstDCtx); /* init */
- if (ddict) { /* support refDDict on NULL */
- dstDCtx->dictID = ddict->dictID;
- dstDCtx->base = ddict->dictContent;
- dstDCtx->vBase = ddict->dictContent;
- dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize;
- dstDCtx->previousDstEnd = dstDCtx->dictEnd;
- if (ddict->entropyPresent) {
- dstDCtx->litEntropy = 1;
- dstDCtx->fseEntropy = 1;
- dstDCtx->LLTptr = ddict->entropy.LLTable;
- dstDCtx->MLTptr = ddict->entropy.MLTable;
- dstDCtx->OFTptr = ddict->entropy.OFTable;
- dstDCtx->HUFptr = ddict->entropy.hufTable;
- dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
- dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
- dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
- } else {
- dstDCtx->litEntropy = 0;
- dstDCtx->fseEntropy = 0;
- }
- }
-}
-
-static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict)
-{
- ddict->dictID = 0;
- ddict->entropyPresent = 0;
- if (ddict->dictSize < 8)
- return 0;
- {
- U32 const magic = ZSTD_readLE32(ddict->dictContent);
- if (magic != ZSTD_DICT_MAGIC)
- return 0; /* pure content mode */
- }
- ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4);
-
- /* load entropy tables */
- CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted);
- ddict->entropyPresent = 1;
- return 0;
-}
-
-static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
-{
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- {
- ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
- if (!ddict)
- return NULL;
- ddict->cMem = customMem;
-
- if ((byReference) || (!dict) || (!dictSize)) {
- ddict->dictBuffer = NULL;
- ddict->dictContent = dict;
- } else {
- void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
- if (!internalBuffer) {
- ZSTD_freeDDict(ddict);
- return NULL;
- }
- memcpy(internalBuffer, dict, dictSize);
- ddict->dictBuffer = internalBuffer;
- ddict->dictContent = internalBuffer;
- }
- ddict->dictSize = dictSize;
- ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
- /* parse dictionary content */
- {
- size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
- if (ZSTD_isError(errorCode)) {
- ZSTD_freeDDict(ddict);
- return NULL;
- }
- }
-
- return ddict;
- }
-}
-
-/*! ZSTD_initDDict() :
-* Create a digested dictionary, to start decompression without startup delay.
-* `dict` content is copied inside DDict.
-* Consequently, `dict` can be released after `ZSTD_DDict` creation */
-ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem);
-}
-
-size_t ZSTD_freeDDict(ZSTD_DDict *ddict)
-{
- if (ddict == NULL)
- return 0; /* support free on NULL */
- {
- ZSTD_customMem const cMem = ddict->cMem;
- ZSTD_free(ddict->dictBuffer, cMem);
- ZSTD_free(ddict, cMem);
- return 0;
- }
-}
-
-/*! ZSTD_getDictID_fromDict() :
- * Provides the dictID stored within dictionary.
- * if @return == 0, the dictionary is not conformant with Zstandard specification.
- * It can still be loaded, but as a content-only dictionary. */
-unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize)
-{
- if (dictSize < 8)
- return 0;
- if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC)
- return 0;
- return ZSTD_readLE32((const char *)dict + 4);
-}
-
-/*! ZSTD_getDictID_fromDDict() :
- * Provides the dictID of the dictionary loaded into `ddict`.
- * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
- * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict)
-{
- if (ddict == NULL)
- return 0;
- return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
-}
-
-/*! ZSTD_getDictID_fromFrame() :
- * Provides the dictID required to decompressed the frame stored within `src`.
- * If @return == 0, the dictID could not be decoded.
- * This could for one of the following reasons :
- * - The frame does not require a dictionary to be decoded (most common case).
- * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
- * Note : this use case also happens when using a non-conformant dictionary.
- * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
- * - This is not a Zstandard frame.
- * When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
-unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize)
-{
- ZSTD_frameParams zfp = {0, 0, 0, 0};
- size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
- if (ZSTD_isError(hError))
- return 0;
- return zfp.dictID;
-}
-
-/*! ZSTD_decompress_usingDDict() :
-* Decompression using a pre-digested Dictionary
-* Use dictionary without significant overhead. */
-size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict)
-{
- /* pass content and size in case legacy frames are encountered */
- return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict);
-}
-
-/*=====================================
-* Streaming decompression
-*====================================*/
-
-typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
-
-/* *** Resource management *** */
-struct ZSTD_DStream_s {
- ZSTD_DCtx *dctx;
- ZSTD_DDict *ddictLocal;
- const ZSTD_DDict *ddict;
- ZSTD_frameParams fParams;
- ZSTD_dStreamStage stage;
- char *inBuff;
- size_t inBuffSize;
- size_t inPos;
- size_t maxWindowSize;
- char *outBuff;
- size_t outBuffSize;
- size_t outStart;
- size_t outEnd;
- size_t blockSize;
- BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */
- size_t lhSize;
- ZSTD_customMem customMem;
- void *legacyContext;
- U32 previousLegacyVersion;
- U32 legacyVersion;
- U32 hostageByte;
-}; /* typedef'd to ZSTD_DStream within "zstd.h" */
-
-size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize)
-{
- size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
- size_t const inBuffSize = blockSize;
- size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
- return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
-}
-
-static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem)
-{
- ZSTD_DStream *zds;
-
- if (!customMem.customAlloc || !customMem.customFree)
- return NULL;
-
- zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem);
- if (zds == NULL)
- return NULL;
- memset(zds, 0, sizeof(ZSTD_DStream));
- memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem));
- zds->dctx = ZSTD_createDCtx_advanced(customMem);
- if (zds->dctx == NULL) {
- ZSTD_freeDStream(zds);
- return NULL;
- }
- zds->stage = zdss_init;
- zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
- return zds;
-}
-
-ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
- ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem);
- if (!zds) {
- return NULL;
- }
-
- zds->maxWindowSize = maxWindowSize;
- zds->stage = zdss_loadHeader;
- zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
- ZSTD_freeDDict(zds->ddictLocal);
- zds->ddictLocal = NULL;
- zds->ddict = zds->ddictLocal;
- zds->legacyVersion = 0;
- zds->hostageByte = 0;
-
- {
- size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
- size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
-
- zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem);
- zds->inBuffSize = blockSize;
- zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem);
- zds->outBuffSize = neededOutSize;
- if (zds->inBuff == NULL || zds->outBuff == NULL) {
- ZSTD_freeDStream(zds);
- return NULL;
- }
- }
- return zds;
-}
-
-ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize)
-{
- ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize);
- if (zds) {
- zds->ddict = ddict;
- }
- return zds;
-}
-
-size_t ZSTD_freeDStream(ZSTD_DStream *zds)
-{
- if (zds == NULL)
- return 0; /* support free on null */
- {
- ZSTD_customMem const cMem = zds->customMem;
- ZSTD_freeDCtx(zds->dctx);
- zds->dctx = NULL;
- ZSTD_freeDDict(zds->ddictLocal);
- zds->ddictLocal = NULL;
- ZSTD_free(zds->inBuff, cMem);
- zds->inBuff = NULL;
- ZSTD_free(zds->outBuff, cMem);
- zds->outBuff = NULL;
- ZSTD_free(zds, cMem);
- return 0;
- }
-}
-
-/* *** Initialization *** */
-
-size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; }
-size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
-
-size_t ZSTD_resetDStream(ZSTD_DStream *zds)
-{
- zds->stage = zdss_loadHeader;
- zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
- zds->legacyVersion = 0;
- zds->hostageByte = 0;
- return ZSTD_frameHeaderSize_prefix;
-}
-
-/* ***** Decompression ***** */
-
-ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
-{
- size_t const length = MIN(dstCapacity, srcSize);
- memcpy(dst, src, length);
- return length;
-}
-
-size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
-{
- const char *const istart = (const char *)(input->src) + input->pos;
- const char *const iend = (const char *)(input->src) + input->size;
- const char *ip = istart;
- char *const ostart = (char *)(output->dst) + output->pos;
- char *const oend = (char *)(output->dst) + output->size;
- char *op = ostart;
- U32 someMoreWork = 1;
-
- while (someMoreWork) {
- switch (zds->stage) {
- case zdss_init:
- ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
- fallthrough;
-
- case zdss_loadHeader: {
- size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
- if (ZSTD_isError(hSize))
- return hSize;
- if (hSize != 0) { /* need more input */
- size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
- if (toLoad > (size_t)(iend - ip)) { /* not enough input to load full header */
- memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip);
- zds->lhSize += iend - ip;
- input->pos = input->size;
- return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) +
- ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
- }
- memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad);
- zds->lhSize = hSize;
- ip += toLoad;
- break;
- }
-
- /* check for single-pass mode opportunity */
- if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
- && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) {
- size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart);
- if (cSize <= (size_t)(iend - istart)) {
- size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict);
- if (ZSTD_isError(decompressedSize))
- return decompressedSize;
- ip = istart + cSize;
- op += decompressedSize;
- zds->dctx->expected = 0;
- zds->stage = zdss_init;
- someMoreWork = 0;
- break;
- }
- }
-
- /* Consume header */
- ZSTD_refDDict(zds->dctx, zds->ddict);
- {
- size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
- CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
- {
- size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
- CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size));
- }
- }
-
- zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
- if (zds->fParams.windowSize > zds->maxWindowSize)
- return ERROR(frameParameter_windowTooLarge);
-
- /* Buffers are preallocated, but double check */
- {
- size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
- size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
- if (zds->inBuffSize < blockSize) {
- return ERROR(GENERIC);
- }
- if (zds->outBuffSize < neededOutSize) {
- return ERROR(GENERIC);
- }
- zds->blockSize = blockSize;
- }
- zds->stage = zdss_read;
- }
- fallthrough;
-
- case zdss_read: {
- size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
- if (neededInSize == 0) { /* end of frame */
- zds->stage = zdss_init;
- someMoreWork = 0;
- break;
- }
- if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */
- const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
- size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart,
- (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize);
- if (ZSTD_isError(decodedSize))
- return decodedSize;
- ip += neededInSize;
- if (!decodedSize && !isSkipFrame)
- break; /* this was just a header */
- zds->outEnd = zds->outStart + decodedSize;
- zds->stage = zdss_flush;
- break;
- }
- if (ip == iend) {
- someMoreWork = 0;
- break;
- } /* no more input */
- zds->stage = zdss_load;
- /* pass-through */
- }
- fallthrough;
-
- case zdss_load: {
- size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
- size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */
- size_t loadedSize;
- if (toLoad > zds->inBuffSize - zds->inPos)
- return ERROR(corruption_detected); /* should never happen */
- loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip);
- ip += loadedSize;
- zds->inPos += loadedSize;
- if (loadedSize < toLoad) {
- someMoreWork = 0;
- break;
- } /* not enough input, wait for more */
-
- /* decode loaded input */
- {
- const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
- size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
- zds->inBuff, neededInSize);
- if (ZSTD_isError(decodedSize))
- return decodedSize;
- zds->inPos = 0; /* input is consumed */
- if (!decodedSize && !isSkipFrame) {
- zds->stage = zdss_read;
- break;
- } /* this was just a header */
- zds->outEnd = zds->outStart + decodedSize;
- zds->stage = zdss_flush;
- /* pass-through */
- }
- }
- fallthrough;
-
- case zdss_flush: {
- size_t const toFlushSize = zds->outEnd - zds->outStart;
- size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize);
- op += flushedSize;
- zds->outStart += flushedSize;
- if (flushedSize == toFlushSize) { /* flush completed */
- zds->stage = zdss_read;
- if (zds->outStart + zds->blockSize > zds->outBuffSize)
- zds->outStart = zds->outEnd = 0;
- break;
- }
- /* cannot complete flush */
- someMoreWork = 0;
- break;
- }
- default:
- return ERROR(GENERIC); /* impossible */
- }
- }
-
- /* result */
- input->pos += (size_t)(ip - istart);
- output->pos += (size_t)(op - ostart);
- {
- size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
- if (!nextSrcSizeHint) { /* frame fully decoded */
- if (zds->outEnd == zds->outStart) { /* output fully flushed */
- if (zds->hostageByte) {
- if (input->pos >= input->size) {
- zds->stage = zdss_read;
- return 1;
- } /* can't release hostage (not present) */
- input->pos++; /* release hostage */
- }
- return 0;
- }
- if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
- input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
- zds->hostageByte = 1;
- }
- return 1;
- }
- nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */
- if (zds->inPos > nextSrcSizeHint)
- return ERROR(GENERIC); /* should never happen */
- nextSrcSizeHint -= zds->inPos; /* already loaded*/
- return nextSrcSizeHint;
- }
-}
-
-EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initDCtx);
-EXPORT_SYMBOL(ZSTD_decompressDCtx);
-EXPORT_SYMBOL(ZSTD_decompress_usingDict);
-
-EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initDDict);
-EXPORT_SYMBOL(ZSTD_decompress_usingDDict);
-
-EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound);
-EXPORT_SYMBOL(ZSTD_initDStream);
-EXPORT_SYMBOL(ZSTD_initDStream_usingDDict);
-EXPORT_SYMBOL(ZSTD_resetDStream);
-EXPORT_SYMBOL(ZSTD_decompressStream);
-EXPORT_SYMBOL(ZSTD_DStreamInSize);
-EXPORT_SYMBOL(ZSTD_DStreamOutSize);
-
-EXPORT_SYMBOL(ZSTD_findFrameCompressedSize);
-EXPORT_SYMBOL(ZSTD_getFrameContentSize);
-EXPORT_SYMBOL(ZSTD_findDecompressedSize);
-
-EXPORT_SYMBOL(ZSTD_isFrame);
-EXPORT_SYMBOL(ZSTD_getDictID_fromDict);
-EXPORT_SYMBOL(ZSTD_getDictID_fromDDict);
-EXPORT_SYMBOL(ZSTD_getDictID_fromFrame);
-
-EXPORT_SYMBOL(ZSTD_getFrameParams);
-EXPORT_SYMBOL(ZSTD_decompressBegin);
-EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict);
-EXPORT_SYMBOL(ZSTD_copyDCtx);
-EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress);
-EXPORT_SYMBOL(ZSTD_decompressContinue);
-EXPORT_SYMBOL(ZSTD_nextInputType);
-
-EXPORT_SYMBOL(ZSTD_decompressBlock);
-EXPORT_SYMBOL(ZSTD_insertBlock);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Zstd Decompressor");
diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
new file mode 100644
index 00000000000000..5105e59ac04a8d
--- /dev/null
+++ b/lib/zstd/decompress/huf_decompress.c
@@ -0,0 +1,1206 @@
+/* ******************************************************************
+ * huff0 huffman decoder,
+ * part of Finite State Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* **************************************************************
+* Dependencies
+****************************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
+#include "../common/compiler.h"
+#include "../common/bitstream.h" /* BIT_* */
+#include "../common/fse.h" /* to compress headers */
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "../common/error_private.h"
+
+/* **************************************************************
+* Macros
+****************************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * Huffman decompression implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(HUF_FORCE_DECOMPRESS_X1) && \
+ defined(HUF_FORCE_DECOMPRESS_X2)
+#error "Cannot force the use of the X1 and X2 decoders at the same time!"
+#endif
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+
+
+/* **************************************************************
+* Byte alignment for workSpace management
+****************************************************************/
+#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
+#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+
+/* **************************************************************
+* BMI2 Variant Wrappers
+****************************************************************/
+#if DYNAMIC_BMI2
+
+#define HUF_DGEN(fn) \
+ \
+ static size_t fn##_default( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ if (bmi2) { \
+ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#else
+
+#define HUF_DGEN(fn) \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ (void)bmi2; \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#endif
+
+
+/*-***************************/
+/* generic DTableDesc */
+/*-***************************/
+typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
+
+static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
+{
+ DTableDesc dtd;
+ ZSTD_memcpy(&dtd, table, sizeof(dtd));
+ return dtd;
+}
+
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+
+/*-***************************/
+/* single-symbol decoding */
+/*-***************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */
+
+/*
+ * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
+ * a time.
+ */
+static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
+ U64 D4;
+ if (MEM_isLittleEndian()) {
+ D4 = symbol + (nbBits << 8);
+ } else {
+ D4 = (symbol << 8) + nbBits;
+ }
+ D4 *= 0x0001000100010001ULL;
+ return D4;
+}
+
+typedef struct {
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
+ U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
+ U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+ BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
+} HUF_ReadDTableX1_Workspace;
+
+
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
+{
+ return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+ size_t iSize;
+ void* const dtPtr = DTable + 1;
+ HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
+ HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
+
+ DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
+ if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
+
+ DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+ /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* Table header */
+ { DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
+ dtd.tableType = 0;
+ dtd.tableLog = (BYTE)tableLog;
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
+ }
+
+ /* Compute symbols and rankStart given rankVal:
+ *
+ * rankVal already contains the number of values of each weight.
+ *
+ * symbols contains the symbols ordered by weight. First are the rankVal[0]
+ * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
+ * symbols[0] is filled (but unused) to avoid a branch.
+ *
+ * rankStart contains the offset where each rank belongs in the DTable.
+ * rankStart[0] is not filled because there are no entries in the table for
+ * weight 0.
+ */
+ {
+ int n;
+ int nextRankStart = 0;
+ int const unroll = 4;
+ int const nLimit = (int)nbSymbols - unroll + 1;
+ for (n=0; n<(int)tableLog+1; n++) {
+ U32 const curr = nextRankStart;
+ nextRankStart += wksp->rankVal[n];
+ wksp->rankStart[n] = curr;
+ }
+ for (n=0; n < nLimit; n += unroll) {
+ int u;
+ for (u=0; u < unroll; ++u) {
+ size_t const w = wksp->huffWeight[n+u];
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
+ }
+ }
+ for (; n < (int)nbSymbols; ++n) {
+ size_t const w = wksp->huffWeight[n];
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
+ }
+ }
+
+ /* fill DTable
+ * We fill all entries of each weight in order.
+ * That way length is a constant for each iteration of the outter loop.
+ * We can switch based on the length to a different inner loop which is
+ * optimized for that particular case.
+ */
+ {
+ U32 w;
+ int symbol=wksp->rankVal[0];
+ int rankStart=0;
+ for (w=1; w<tableLog+1; ++w) {
+ int const symbolCount = wksp->rankVal[w];
+ int const length = (1 << w) >> 1;
+ int uStart = rankStart;
+ BYTE const nbBits = (BYTE)(tableLog + 1 - w);
+ int s;
+ int u;
+ switch (length) {
+ case 1:
+ for (s=0; s<symbolCount; ++s) {
+ HUF_DEltX1 D;
+ D.byte = wksp->symbols[symbol + s];
+ D.nbBits = nbBits;
+ dt[uStart] = D;
+ uStart += 1;
+ }
+ break;
+ case 2:
+ for (s=0; s<symbolCount; ++s) {
+ HUF_DEltX1 D;
+ D.byte = wksp->symbols[symbol + s];
+ D.nbBits = nbBits;
+ dt[uStart+0] = D;
+ dt[uStart+1] = D;
+ uStart += 2;
+ }
+ break;
+ case 4:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ MEM_write64(dt + uStart, D4);
+ uStart += 4;
+ }
+ break;
+ case 8:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ MEM_write64(dt + uStart, D4);
+ MEM_write64(dt + uStart + 4, D4);
+ uStart += 8;
+ }
+ break;
+ default:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ for (u=0; u < length; u += 16) {
+ MEM_write64(dt + uStart + u + 0, D4);
+ MEM_write64(dt + uStart + u + 4, D4);
+ MEM_write64(dt + uStart + u + 8, D4);
+ MEM_write64(dt + uStart + u + 12, D4);
+ }
+ assert(u == length);
+ uStart += length;
+ }
+ break;
+ }
+ symbol += symbolCount;
+ rankStart += symbolCount * length;
+ }
+ }
+ return iSize;
+}
+
+FORCE_INLINE_TEMPLATE BYTE
+HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ BYTE const c = dt[val].byte;
+ BIT_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
+ *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+HINT_INLINE size_t
+HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+ }
+
+ /* [0-3] symbols remaining */
+ if (MEM_32bits())
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, no need to reload */
+ while (p < pEnd)
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + dstSize;
+ const void* dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+ BIT_DStream_t bitD;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
+
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ return dstSize;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* const olimit = oend - 3;
+ const void* const dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+ U32 endSignal = 1;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
+ for ( ; (endSignal) & (op4 < olimit) ; ) {
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
+ }
+
+ /* check corruption */
+ /* note : should not be necessary : op# advance in lock step, and we control op4.
+ * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+
+typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
+ const void *cSrc,
+ size_t cSrcSize,
+ const HUF_DTable *DTable);
+
+HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
+
+
+
+size_t HUF_decompress1X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress4X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
+}
+
+
+#endif /* HUF_FORCE_DECOMPRESS_X2 */
+
+
+#ifndef HUF_FORCE_DECOMPRESS_X1
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
+typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
+
+
+/* HUF_fillDTableX2Level2() :
+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
+static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
+ const U32* rankValOrigin, const int minWeight,
+ const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+ U32 nbBitsBaseline, U16 baseSeq, U32* wksp, size_t wkspSize)
+{
+ HUF_DEltX2 DElt;
+ U32* rankVal = wksp;
+
+ assert(wkspSize >= HUF_TABLELOG_MAX + 1);
+ (void)wkspSize;
+ /* get pre-calculated rankVal */
+ ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
+
+ /* fill skipped values */
+ if (minWeight>1) {
+ U32 i, skipSize = rankVal[minWeight];
+ MEM_writeLE16(&(DElt.sequence), baseSeq);
+ DElt.nbBits = (BYTE)(consumed);
+ DElt.length = 1;
+ for (i = 0; i < skipSize; i++)
+ DTable[i] = DElt;
+ }
+
+ /* fill DTable */
+ { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 length = 1 << (sizeLog-nbBits);
+ const U32 start = rankVal[weight];
+ U32 i = start;
+ const U32 end = start + length;
+
+ MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+ DElt.nbBits = (BYTE)(nbBits + consumed);
+ DElt.length = 2;
+ do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
+
+ rankVal[weight] += length;
+ } }
+}
+
+
+static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList, const U32 sortedListSize,
+ const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline, U32* wksp, size_t wkspSize)
+{
+ U32* rankVal = wksp;
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ U32 s;
+
+ assert(wkspSize >= HUF_TABLELOG_MAX + 1);
+ wksp += HUF_TABLELOG_MAX + 1;
+ wkspSize -= HUF_TABLELOG_MAX + 1;
+
+ ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
+
+ /* fill DTable */
+ for (s=0; s<sortedListSize; s++) {
+ const U16 symbol = sortedList[s].symbol;
+ const U32 weight = sortedList[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ const U32 start = rankVal[weight];
+ const U32 length = 1 << (targetLog-nbBits);
+
+ if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
+ U32 sortedRank;
+ int minWeight = nbBits + scaleLog;
+ if (minWeight < 1) minWeight = 1;
+ sortedRank = rankStart[minWeight];
+ HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
+ rankValOrigin[nbBits], minWeight,
+ sortedList+sortedRank, sortedListSize-sortedRank,
+ nbBitsBaseline, symbol, wksp, wkspSize);
+ } else {
+ HUF_DEltX2 DElt;
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
+ { U32 const end = start + length;
+ U32 u;
+ for (u = start; u < end; u++) DTable[u] = DElt;
+ } }
+ rankVal[weight] += length;
+ }
+}
+
+typedef struct {
+ rankValCol_t rankVal[HUF_TABLELOG_MAX];
+ U32 rankStats[HUF_TABLELOG_MAX + 1];
+ U32 rankStart0[HUF_TABLELOG_MAX + 2];
+ sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
+ BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
+ U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+} HUF_ReadDTableX2_Workspace;
+
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ U32 tableLog, maxW, sizeOfSort, nbSymbols;
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ U32 const maxTableLog = dtd.maxTableLog;
+ size_t iSize;
+ void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
+ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+ U32 *rankStart;
+
+ HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
+
+ if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
+
+ rankStart = wksp->rankStart0 + 1;
+ ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
+ ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
+
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
+ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+ /* find maxWeight */
+ for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+
+ /* Get start index of each weight */
+ { U32 w, nextRankStart = 0;
+ for (w=1; w<maxW+1; w++) {
+ U32 curr = nextRankStart;
+ nextRankStart += wksp->rankStats[w];
+ rankStart[w] = curr;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ { U32 s;
+ for (s=0; s<nbSymbols; s++) {
+ U32 const w = wksp->weightList[s];
+ U32 const r = rankStart[w]++;
+ wksp->sortedSymbol[r].symbol = (BYTE)s;
+ wksp->sortedSymbol[r].weight = (BYTE)w;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ { U32* const rankVal0 = wksp->rankVal[0];
+ { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
+ U32 nextRankVal = 0;
+ U32 w;
+ for (w=1; w<maxW+1; w++) {
+ U32 curr = nextRankVal;
+ nextRankVal += wksp->rankStats[w] << (w+rescale);
+ rankVal0[w] = curr;
+ } }
+ { U32 const minBits = tableLog+1 - maxW;
+ U32 consumed;
+ for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+ U32* const rankValPtr = wksp->rankVal[consumed];
+ U32 w;
+ for (w = 1; w < maxW+1; w++) {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ } } } }
+
+ HUF_fillDTableX2(dt, maxTableLog,
+ wksp->sortedSymbol, sizeOfSort,
+ wksp->rankStart0, wksp->rankVal, maxW,
+ tableLog+1,
+ wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32));
+
+ dtd.tableLog = (BYTE)maxTableLog;
+ dtd.tableType = 1;
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
+ return iSize;
+}
+
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ ZSTD_memcpy(op, dt+val, 2);
+ BIT_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ ZSTD_memcpy(op, dt+val, 1);
+ if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+ else {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
+ } }
+ return 1;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+HINT_INLINE size_t
+HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
+ const HUF_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+
+ /* closer to end : up to 2 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+ if (p < pEnd)
+ p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BIT_DStream_t bitD;
+
+ /* Init */
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ /* decode */
+ { BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
+ }
+
+ /* check */
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* const olimit = oend - (sizeof(size_t)-1);
+ const void* const dtPtr = DTable+1;
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal = 1;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ for ( ; (endSignal) & (op4 < olimit); ) {
+#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
+#else
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal = (U32)LIKELY((U32)
+ (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
+#endif
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
+
+size_t HUF_decompress1X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+
+#endif /* HUF_FORCE_DECOMPRESS_X1 */
+
+
+/* ***********************************/
+/* Universal decompression selectors */
+/* ***********************************/
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
+}
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
+}
+
+
+#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
+ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
+ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
+ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
+ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
+ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
+ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
+ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
+ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
+ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
+ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
+ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
+ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
+ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
+ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
+};
+#endif
+
+/* HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
+{
+ assert(dstSize > 0);
+ assert(dstSize <= 128*1024);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dstSize;
+ (void)cSrcSize;
+ return 0;
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dstSize;
+ (void)cSrcSize;
+ return 1;
+#else
+ /* decoder timing evaluation */
+ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
+ U32 const D256 = (U32)(dstSize >> 8);
+ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+ DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */
+ return DTime1 < DTime0;
+ }
+#endif
+}
+
+
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
+ size_t dstSize, const void* cSrc,
+ size_t cSrcSize, void* workSpace,
+ size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize):
+ HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#endif
+ }
+}
+
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#else
+ return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize):
+ HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#endif
+ }
+}
+
+
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
+}
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+#endif
+
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
+}
+
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
+ HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#endif
+ }
+}
+
diff --git a/lib/zstd/decompress/zstd_ddict.c b/lib/zstd/decompress/zstd_ddict.c
new file mode 100644
index 00000000000000..dbbc7919de534e
--- /dev/null
+++ b/lib/zstd/decompress/zstd_ddict.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_ddict.c :
+ * concentrates all logic that needs to know the internals of ZSTD_DDict object */
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/cpu.h" /* bmi2 */
+#include "../common/mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "zstd_decompress_internal.h"
+#include "zstd_ddict.h"
+
+
+
+
+/*-*******************************************************
+* Types
+*********************************************************/
+struct ZSTD_DDict_s {
+ void* dictBuffer;
+ const void* dictContent;
+ size_t dictSize;
+ ZSTD_entropyDTables_t entropy;
+ U32 dictID;
+ U32 entropyPresent;
+ ZSTD_customMem cMem;
+}; /* typedef'd to ZSTD_DDict within "zstd.h" */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictContent;
+}
+
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictSize;
+}
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ DEBUGLOG(4, "ZSTD_copyDDictParameters");
+ assert(dctx != NULL);
+ assert(ddict != NULL);
+ dctx->dictID = ddict->dictID;
+ dctx->prefixStart = ddict->dictContent;
+ dctx->virtualStart = ddict->dictContent;
+ dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
+ dctx->previousDstEnd = dctx->dictEnd;
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ dctx->dictContentBeginForFuzzing = dctx->prefixStart;
+ dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
+#endif
+ if (ddict->entropyPresent) {
+ dctx->litEntropy = 1;
+ dctx->fseEntropy = 1;
+ dctx->LLTptr = ddict->entropy.LLTable;
+ dctx->MLTptr = ddict->entropy.MLTable;
+ dctx->OFTptr = ddict->entropy.OFTable;
+ dctx->HUFptr = ddict->entropy.hufTable;
+ dctx->entropy.rep[0] = ddict->entropy.rep[0];
+ dctx->entropy.rep[1] = ddict->entropy.rep[1];
+ dctx->entropy.rep[2] = ddict->entropy.rep[2];
+ } else {
+ dctx->litEntropy = 0;
+ dctx->fseEntropy = 0;
+ }
+}
+
+
+static size_t
+ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
+ ZSTD_dictContentType_e dictContentType)
+{
+ ddict->dictID = 0;
+ ddict->entropyPresent = 0;
+ if (dictContentType == ZSTD_dct_rawContent) return 0;
+
+ if (ddict->dictSize < 8) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ { U32 const magic = MEM_readLE32(ddict->dictContent);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ }
+ ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
+ &ddict->entropy, ddict->dictContent, ddict->dictSize)),
+ dictionary_corrupted, "");
+ ddict->entropyPresent = 1;
+ return 0;
+}
+
+
+static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
+ ddict->dictBuffer = NULL;
+ ddict->dictContent = dict;
+ if (!dict) dictSize = 0;
+ } else {
+ void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
+ ddict->dictBuffer = internalBuffer;
+ ddict->dictContent = internalBuffer;
+ if (!internalBuffer) return ERROR(memory_allocation);
+ ZSTD_memcpy(internalBuffer, dict, dictSize);
+ }
+ ddict->dictSize = dictSize;
+ ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+
+ /* parse dictionary content */
+ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
+
+ return 0;
+}
+
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem)
+{
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+
+ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
+ if (ddict == NULL) return NULL;
+ ddict->cMem = customMem;
+ { size_t const initResult = ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType);
+ if (ZSTD_isError(initResult)) {
+ ZSTD_freeDDict(ddict);
+ return NULL;
+ } }
+ return ddict;
+ }
+}
+
+/*! ZSTD_createDDict() :
+* Create a digested dictionary, to start decompression without startup delay.
+* `dict` content is copied inside DDict.
+* Consequently, `dict` can be released after `ZSTD_DDict` creation */
+ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
+}
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, to start decompression without startup delay.
+ * Dictionary content is simply referenced, it will be accessed during decompression.
+ * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
+ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
+}
+
+
+const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* sBuffer, size_t sBufferSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ size_t const neededSpace = sizeof(ZSTD_DDict)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+ ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
+ assert(sBuffer != NULL);
+ assert(dict != NULL);
+ if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
+ if (sBufferSize < neededSpace) return NULL;
+ if (dictLoadMethod == ZSTD_dlm_byCopy) {
+ ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */
+ dict = ddict+1;
+ }
+ if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ ZSTD_dlm_byRef, dictContentType) ))
+ return NULL;
+ return ddict;
+}
+
+
+size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support free on NULL */
+ { ZSTD_customMem const cMem = ddict->cMem;
+ ZSTD_customFree(ddict->dictBuffer, cMem);
+ ZSTD_customFree(ddict, cMem);
+ return 0;
+ }
+}
+
+/*! ZSTD_estimateDDictSize() :
+ * Estimate amount of memory that will be needed to create a dictionary for decompression.
+ * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+ return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+}
+
+size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support sizeof on NULL */
+ return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
+}
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0;
+ return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+}
diff --git a/lib/zstd/decompress/zstd_ddict.h b/lib/zstd/decompress/zstd_ddict.h
new file mode 100644
index 00000000000000..8c1a79d666f89a
--- /dev/null
+++ b/lib/zstd/decompress/zstd_ddict.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DDICT_H
+#define ZSTD_DDICT_H
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "../common/zstd_deps.h" /* size_t */
+#include <linux/zstd.h> /* ZSTD_DDict, and several public functions */
+
+
+/*-*******************************************************
+ * Interface
+ *********************************************************/
+
+/* note: several prototypes are already published in `zstd.h` :
+ * ZSTD_createDDict()
+ * ZSTD_createDDict_byReference()
+ * ZSTD_createDDict_advanced()
+ * ZSTD_freeDDict()
+ * ZSTD_initStaticDDict()
+ * ZSTD_sizeof_DDict()
+ * ZSTD_estimateDDictSize()
+ * ZSTD_getDictID_fromDict()
+ */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+
+
+#endif /* ZSTD_DDICT_H */
diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
new file mode 100644
index 00000000000000..b4d81d84479ac7
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress.c
@@ -0,0 +1,2085 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTD_decompress() allocates its context,
+ * on stack (0), or into heap (1, default; requires malloc()).
+ * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
+ */
+#ifndef ZSTD_HEAPMODE
+# define ZSTD_HEAPMODE 1
+#endif
+
+/*!
+* LEGACY_SUPPORT :
+* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
+*/
+
+/*!
+ * MAXWINDOWSIZE_DEFAULT :
+ * maximum window size accepted by DStream __by default__.
+ * Frames requiring more memory will be rejected.
+ * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
+ */
+#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
+# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
+#endif
+
+/*!
+ * NO_FORWARD_PROGRESS_MAX :
+ * maximum allowed nb of calls to ZSTD_decompressStream()
+ * without any forward progress
+ * (defined as: no byte read from input, and no byte flushed to output)
+ * before triggering an error.
+ */
+#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
+# define ZSTD_NO_FORWARD_PROGRESS_MAX 16
+#endif
+
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/cpu.h" /* bmi2 */
+#include "../common/mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
+#include "../common/zstd_internal.h" /* blockProperties_t */
+#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
+#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
+
+
+
+
+/* ***********************************
+ * Multiple DDicts Hashset internals *
+ *************************************/
+
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
+ * Currently, that means a 0.75 load factor.
+ * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
+ * the load factor of the ddict hash set.
+ */
+
+#define DDICT_HASHSET_TABLE_BASE_SIZE 64
+#define DDICT_HASHSET_RESIZE_FACTOR 2
+
+/* Hash function to determine starting position of dict insertion within the table
+ * Returns an index between [0, hashSet->ddictPtrTableSize]
+ */
+static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
+ const U64 hash = xxh64(&dictID, sizeof(U32), 0);
+ /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
+ return hash & (hashSet->ddictPtrTableSize - 1);
+}
+
+/* Adds DDict to a hashset without resizing it.
+ * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
+ * Returns 0 if successful, or a zstd error code if something went wrong.
+ */
+static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
+ const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
+ size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
+ const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
+ RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
+ DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
+ while (hashSet->ddictPtrTable[idx] != NULL) {
+ /* Replace existing ddict if inserting ddict with same dictID */
+ if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
+ DEBUGLOG(4, "DictID already exists, replacing rather than adding");
+ hashSet->ddictPtrTable[idx] = ddict;
+ return 0;
+ }
+ idx &= idxRangeMask;
+ idx++;
+ }
+ DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
+ hashSet->ddictPtrTable[idx] = ddict;
+ hashSet->ddictPtrCount++;
+ return 0;
+}
+
+/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
+ * rehashes all values, allocates new table, frees old table.
+ * Returns 0 on success, otherwise a zstd error code.
+ */
+static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
+ size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
+ const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
+ const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
+ size_t oldTableSize = hashSet->ddictPtrTableSize;
+ size_t i;
+
+ DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
+ RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
+ hashSet->ddictPtrTable = newTable;
+ hashSet->ddictPtrTableSize = newTableSize;
+ hashSet->ddictPtrCount = 0;
+ for (i = 0; i < oldTableSize; ++i) {
+ if (oldTable[i] != NULL) {
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
+ }
+ }
+ ZSTD_customFree((void*)oldTable, customMem);
+ DEBUGLOG(4, "Finished re-hash");
+ return 0;
+}
+
+/* Fetches a DDict with the given dictID
+ * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
+ */
+static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
+ size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
+ const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
+ DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
+ for (;;) {
+ size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
+ if (currDictID == dictID || currDictID == 0) {
+ /* currDictID == 0 implies a NULL ddict entry */
+ break;
+ } else {
+ idx &= idxRangeMask; /* Goes to start of table when we reach the end */
+ idx++;
+ }
+ }
+ DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
+ return hashSet->ddictPtrTable[idx];
+}
+
+/* Allocates space for and returns a ddict hash set
+ * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
+ * Returns NULL if allocation failed.
+ */
+static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
+ ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
+ DEBUGLOG(4, "Allocating new hash set");
+ if (!ret)
+ return NULL;
+ ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
+ if (!ret->ddictPtrTable) {
+ ZSTD_customFree(ret, customMem);
+ return NULL;
+ }
+ ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
+ ret->ddictPtrCount = 0;
+ return ret;
+}
+
+/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
+ * Note: The ZSTD_DDict* within the table are NOT freed.
+ */
+static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
+ DEBUGLOG(4, "Freeing ddict hash set");
+ if (hashSet && hashSet->ddictPtrTable) {
+ ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
+ }
+ if (hashSet) {
+ ZSTD_customFree(hashSet, customMem);
+ }
+}
+
+/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
+ * Returns 0 on success, or a ZSTD error.
+ */
+static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
+ DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
+ if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
+ }
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
+ return 0;
+}
+
+/*-*************************************************************
+* Context management
+***************************************************************/
+size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support sizeof NULL */
+ return sizeof(*dctx)
+ + ZSTD_sizeof_DDict(dctx->ddictLocal)
+ + dctx->inBuffSize + dctx->outBuffSize;
+}
+
+size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
+
+
+static size_t ZSTD_startingInputLength(ZSTD_format_e format)
+{
+ size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
+ /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
+ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
+ return startingInputLength;
+}
+
+static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
+{
+ assert(dctx->streamStage == zdss_init);
+ dctx->format = ZSTD_f_zstd1;
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+ dctx->outBufferMode = ZSTD_bm_buffered;
+ dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
+ dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
+}
+
+static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
+{
+ dctx->staticSize = 0;
+ dctx->ddict = NULL;
+ dctx->ddictLocal = NULL;
+ dctx->dictEnd = NULL;
+ dctx->ddictIsCold = 0;
+ dctx->dictUses = ZSTD_dont_use;
+ dctx->inBuff = NULL;
+ dctx->inBuffSize = 0;
+ dctx->outBuffSize = 0;
+ dctx->streamStage = zdss_init;
+ dctx->legacyContext = NULL;
+ dctx->previousLegacyVersion = 0;
+ dctx->noForwardProgress = 0;
+ dctx->oversizedDuration = 0;
+ dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ dctx->ddictSet = NULL;
+ ZSTD_DCtx_resetParameters(dctx);
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ dctx->dictContentEndForFuzzing = NULL;
+#endif
+}
+
+ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
+{
+ ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
+
+ ZSTD_initDCtx_internal(dctx);
+ dctx->staticSize = workspaceSize;
+ dctx->inBuff = (char*)(dctx+1);
+ return dctx;
+}
+
+ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
+{
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+
+ { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
+ if (!dctx) return NULL;
+ dctx->customMem = customMem;
+ ZSTD_initDCtx_internal(dctx);
+ return dctx;
+ }
+}
+
+ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+ DEBUGLOG(3, "ZSTD_createDCtx");
+ return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
+}
+
+static void ZSTD_clearDict(ZSTD_DCtx* dctx)
+{
+ ZSTD_freeDDict(dctx->ddictLocal);
+ dctx->ddictLocal = NULL;
+ dctx->ddict = NULL;
+ dctx->dictUses = ZSTD_dont_use;
+}
+
+size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support free on NULL */
+ RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
+ { ZSTD_customMem const cMem = dctx->customMem;
+ ZSTD_clearDict(dctx);
+ ZSTD_customFree(dctx->inBuff, cMem);
+ dctx->inBuff = NULL;
+ if (dctx->ddictSet) {
+ ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
+ dctx->ddictSet = NULL;
+ }
+ ZSTD_customFree(dctx, cMem);
+ return 0;
+ }
+}
+
+/* no longer useful */
+void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
+{
+ size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
+ ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
+}
+
+/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
+ * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
+ * accordingly sets the ddict to be used to decompress the frame.
+ *
+ * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
+ *
+ * ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
+ */
+static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
+ assert(dctx->refMultipleDDicts && dctx->ddictSet);
+ DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
+ if (dctx->ddict) {
+ const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
+ if (frameDDict) {
+ DEBUGLOG(4, "DDict found!");
+ ZSTD_clearDict(dctx);
+ dctx->dictID = dctx->fParams.dictID;
+ dctx->ddict = frameDDict;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
+ }
+}
+
+
+/*-*************************************************************
+ * Frame header decoding
+ ***************************************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+unsigned ZSTD_isFrame(const void* buffer, size_t size)
+{
+ if (size < ZSTD_FRAMEIDSIZE) return 0;
+ { U32 const magic = MEM_readLE32(buffer);
+ if (magic == ZSTD_MAGICNUMBER) return 1;
+ if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+ }
+ return 0;
+}
+
+/* ZSTD_frameHeaderSize_internal() :
+ * srcSize must be large enough to reach header size fields.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
+ * @return : size of the Frame Header
+ * or an error code, which can be tested with ZSTD_isError() */
+static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+ RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
+
+ { BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
+ U32 const dictID= fhd & 3;
+ U32 const singleSegment = (fhd >> 5) & 1;
+ U32 const fcsId = fhd >> 6;
+ return minInputSize + !singleSegment
+ + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+ + (singleSegment && !fcsId);
+ }
+}
+
+/* ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_frameHeaderSize_prefix.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+ return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/* ZSTD_getFrameHeader_advanced() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
+ if (srcSize < minInputSize) return minInputSize;
+ RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
+
+ if ( (format != ZSTD_f_zstd1_magicless)
+ && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ /* skippable frame */
+ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
+ return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
+ zfhPtr->frameType = ZSTD_skippableFrame;
+ return 0;
+ }
+ RETURN_ERROR(prefix_unknown, "");
+ }
+
+ /* ensure there is enough `srcSize` to fully read/decode frame header */
+ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
+ if (srcSize < fhsize) return fhsize;
+ zfhPtr->headerSize = (U32)fhsize;
+ }
+
+ { BYTE const fhdByte = ip[minInputSize-1];
+ size_t pos = minInputSize;
+ U32 const dictIDSizeCode = fhdByte&3;
+ U32 const checksumFlag = (fhdByte>>2)&1;
+ U32 const singleSegment = (fhdByte>>5)&1;
+ U32 const fcsID = fhdByte>>6;
+ U64 windowSize = 0;
+ U32 dictID = 0;
+ U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
+ "reserved bits, must be zero");
+
+ if (!singleSegment) {
+ BYTE const wlByte = ip[pos++];
+ U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
+ windowSize = (1ULL << windowLog);
+ windowSize += (windowSize >> 3) * (wlByte&7);
+ }
+ switch(dictIDSizeCode)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : break;
+ case 1 : dictID = ip[pos]; pos++; break;
+ case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
+ case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
+ }
+ switch(fcsID)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
+ case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
+ case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
+ case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
+ }
+ if (singleSegment) windowSize = frameContentSize;
+
+ zfhPtr->frameType = ZSTD_frame;
+ zfhPtr->frameContentSize = frameContentSize;
+ zfhPtr->windowSize = windowSize;
+ zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ zfhPtr->dictID = dictID;
+ zfhPtr->checksumFlag = checksumFlag;
+ }
+ return 0;
+}
+
+/* ZSTD_getFrameHeader() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : this function does not consume input, it only reads it.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+{
+ return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/* ZSTD_getFrameContentSize() :
+ * compatible with legacy mode
+ * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
+{
+ { ZSTD_frameHeader zfh;
+ if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
+ return ZSTD_CONTENTSIZE_ERROR;
+ if (zfh.frameType == ZSTD_skippableFrame) {
+ return 0;
+ } else {
+ return zfh.frameContentSize;
+ } }
+}
+
+static size_t readSkippableFrameSize(void const* src, size_t srcSize)
+{
+ size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
+ U32 sizeU32;
+
+ RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
+
+ sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
+ RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
+ frameParameter_unsupported, "");
+ {
+ size_t const skippableSize = skippableHeaderSize + sizeU32;
+ RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
+ return skippableSize;
+ }
+}
+
+/* ZSTD_findDecompressedSize() :
+ * compatible with legacy mode
+ * `srcSize` must be the exact length of some number of ZSTD compressed and/or
+ * skippable frames
+ * @return : decompressed size of the frames contained */
+unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long totalDstSize = 0;
+
+ while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
+ U32 const magicNumber = MEM_readLE32(src);
+
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+ if (ZSTD_isError(skippableSize)) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+ assert(skippableSize <= srcSize);
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ }
+
+ { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
+
+ /* check for overflow */
+ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
+ totalDstSize += ret;
+ }
+ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
+ if (ZSTD_isError(frameSrcSize)) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+
+ src = (const BYTE *)src + frameSrcSize;
+ srcSize -= frameSrcSize;
+ }
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
+
+ return totalDstSize;
+}
+
+/* ZSTD_getDecompressedSize() :
+ * compatible with legacy mode
+ * @return : decompressed size if known, 0 otherwise
+ note : 0 can mean any of the following :
+ - frame content is empty
+ - decompressed size field is not present in frame header
+ - frame header unknown / not supported
+ - frame header not complete (`srcSize` too small) */
+unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
+ return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
+}
+
+
+/* ZSTD_decodeFrameHeader() :
+ * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+ * If multiple DDict references are enabled, also will choose the correct DDict to use.
+ * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
+{
+ size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
+ if (ZSTD_isError(result)) return result; /* invalid header */
+ RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
+
+ /* Reference DDict requested by frame if dctx references multiple ddicts */
+ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) {
+ ZSTD_DCtx_selectFrameDDict(dctx);
+ }
+
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ /* Skip the dictID check in fuzzing mode, because it makes the search
+ * harder.
+ */
+ RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
+ dictionary_wrong, "");
+#endif
+ dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
+ if (dctx->validateChecksum) xxh64_reset(&dctx->xxhState, 0);
+ dctx->processedCSize += headerSize;
+ return 0;
+}
+
+static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ frameSizeInfo.compressedSize = ret;
+ frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+ return frameSizeInfo;
+}
+
+static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
+
+
+ if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+ && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
+ assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
+ frameSizeInfo.compressedSize <= srcSize);
+ return frameSizeInfo;
+ } else {
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const ipstart = ip;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ ZSTD_frameHeader zfh;
+
+ /* Extract Frame Header */
+ { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(ret))
+ return ZSTD_errorFrameSizeInfo(ret);
+ if (ret > 0)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+ }
+
+ ip += zfh.headerSize;
+ remainingSize -= zfh.headerSize;
+
+ /* Iterate over each block */
+ while (1) {
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize))
+ return ZSTD_errorFrameSizeInfo(cBlockSize);
+
+ if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+
+ ip += ZSTD_blockHeaderSize + cBlockSize;
+ remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+ nbBlocks++;
+
+ if (blockProperties.lastBlock) break;
+ }
+
+ /* Final frame content checksum */
+ if (zfh.checksumFlag) {
+ if (remainingSize < 4)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+ ip += 4;
+ }
+
+ frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
+ frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
+ ? zfh.frameContentSize
+ : nbBlocks * zfh.blockSizeMax;
+ return frameSizeInfo;
+ }
+}
+
+/* ZSTD_findFrameCompressedSize() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the compressed size of the frame starting at `src` */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ return frameSizeInfo.compressedSize;
+}
+
+/* ZSTD_decompressBound() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame or a skippeable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the maximum decompressed size of the compressed source
+ */
+unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
+{
+ unsigned long long bound = 0;
+ /* Iterate over each frame */
+ while (srcSize > 0) {
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ size_t const compressedSize = frameSizeInfo.compressedSize;
+ unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+ if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+ return ZSTD_CONTENTSIZE_ERROR;
+ assert(srcSize >= compressedSize);
+ src = (const BYTE*)src + compressedSize;
+ srcSize -= compressedSize;
+ bound += decompressedBound;
+ }
+ return bound;
+}
+
+
+/*-*************************************************************
+ * Frame decoding
+ ***************************************************************/
+
+/* ZSTD_insertBlock() :
+ * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+ DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
+ ZSTD_checkContinuity(dctx, blockStart, blockSize);
+ dctx->previousDstEnd = (const char*)blockStart + blockSize;
+ return blockSize;
+}
+
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_copyRawBlock");
+ RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
+ if (dst == NULL) {
+ if (srcSize == 0) return 0;
+ RETURN_ERROR(dstBuffer_null, "");
+ }
+ ZSTD_memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
+ BYTE b,
+ size_t regenSize)
+{
+ RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
+ if (dst == NULL) {
+ if (regenSize == 0) return 0;
+ RETURN_ERROR(dstBuffer_null, "");
+ }
+ ZSTD_memset(dst, b, regenSize);
+ return regenSize;
+}
+
+static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
+{
+ (void)dctx;
+ (void)uncompressedSize;
+ (void)compressedSize;
+ (void)streaming;
+}
+
+
+/*! ZSTD_decompressFrame() :
+ * @dctx must be properly initialized
+ * will update *srcPtr and *srcSizePtr,
+ * to make *srcPtr progress by one frame. */
+static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void** srcPtr, size_t *srcSizePtr)
+{
+ const BYTE* const istart = (const BYTE*)(*srcPtr);
+ const BYTE* ip = istart;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
+ BYTE* op = ostart;
+ size_t remainingSrcSize = *srcSizePtr;
+
+ DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
+
+ /* check */
+ RETURN_ERROR_IF(
+ remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
+ srcSize_wrong, "");
+
+ /* Frame Header */
+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
+ ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
+ if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+ RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
+ srcSize_wrong, "");
+ FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , "");
+ ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ size_t decodedSize;
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSrcSize -= ZSTD_blockHeaderSize;
+ RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1);
+ break;
+ case bt_raw :
+ decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
+ break;
+ case bt_rle :
+ decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
+ break;
+ case bt_reserved :
+ default:
+ RETURN_ERROR(corruption_detected, "invalid block type");
+ }
+
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ if (dctx->validateChecksum)
+ xxh64_update(&dctx->xxhState, op, decodedSize);
+ if (decodedSize != 0)
+ op += decodedSize;
+ assert(ip != NULL);
+ ip += cBlockSize;
+ remainingSrcSize -= cBlockSize;
+ if (blockProperties.lastBlock) break;
+ }
+
+ if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
+ corruption_detected, "");
+ }
+ if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
+ RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
+ if (!dctx->forceIgnoreChecksum) {
+ U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
+ U32 checkRead;
+ checkRead = MEM_readLE32(ip);
+ RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
+ }
+ ip += 4;
+ remainingSrcSize -= 4;
+ }
+ ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
+ /* Allow caller to get size read */
+ *srcPtr = ip;
+ *srcSizePtr = remainingSrcSize;
+ return (size_t)(op-ostart);
+}
+
+static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ const ZSTD_DDict* ddict)
+{
+ void* const dststart = dst;
+ int moreThan1Frame = 0;
+
+ DEBUGLOG(5, "ZSTD_decompressMultiFrame");
+ assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
+
+ if (ddict) {
+ dict = ZSTD_DDict_dictContent(ddict);
+ dictSize = ZSTD_DDict_dictSize(ddict);
+ }
+
+ while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
+
+
+ { U32 const magicNumber = MEM_readLE32(src);
+ DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
+ (unsigned)magicNumber, ZSTD_MAGICNUMBER);
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+ FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
+ assert(skippableSize <= srcSize);
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ } }
+
+ if (ddict) {
+ /* we were called from ZSTD_decompress_usingDDict */
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
+ } else {
+ /* this will initialize correctly with no dict if dict == NULL, so
+ * use this in all cases but ddict */
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
+ }
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+
+ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
+ &src, &srcSize);
+ RETURN_ERROR_IF(
+ (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
+ && (moreThan1Frame==1),
+ srcSize_wrong,
+ "At least one frame successfully completed, "
+ "but following bytes are garbage: "
+ "it's more likely to be a srcSize error, "
+ "specifying more input bytes than size of frame(s). "
+ "Note: one could be unlucky, it might be a corruption error instead, "
+ "happening right at the place where we expect zstd magic bytes. "
+ "But this is _much_ less likely than a srcSize field error.");
+ if (ZSTD_isError(res)) return res;
+ assert(res <= dstCapacity);
+ if (res != 0)
+ dst = (BYTE*)dst + res;
+ dstCapacity -= res;
+ }
+ moreThan1Frame = 1;
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
+
+ return (size_t)((BYTE*)dst - (BYTE*)dststart);
+}
+
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize)
+{
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
+}
+
+
+static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
+{
+ switch (dctx->dictUses) {
+ default:
+ assert(0 /* Impossible */);
+ ZSTD_FALLTHROUGH;
+ case ZSTD_dont_use:
+ ZSTD_clearDict(dctx);
+ return NULL;
+ case ZSTD_use_indefinitely:
+ return dctx->ddict;
+ case ZSTD_use_once:
+ dctx->dictUses = ZSTD_dont_use;
+ return dctx->ddict;
+ }
+}
+
+size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
+}
+
+
+size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
+ size_t regenSize;
+ ZSTD_DCtx* const dctx = ZSTD_createDCtx();
+ RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
+ regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+ ZSTD_freeDCtx(dctx);
+ return regenSize;
+#else /* stack mode */
+ ZSTD_DCtx dctx;
+ ZSTD_initDCtx_internal(&dctx);
+ return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+
+/*-**************************************
+* Advanced Streaming Decompression API
+* Bufferless and synchronous
+****************************************/
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
+
+/*
+ * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed,
+ * we allow taking a partial block as the input. Currently only raw uncompressed blocks can
+ * be streamed.
+ *
+ * For blocks that can be streamed, this allows us to reduce the latency until we produce
+ * output, and avoid copying the input.
+ *
+ * @param inputSize - The total amount of input that the caller currently has.
+ */
+static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {
+ if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))
+ return dctx->expected;
+ if (dctx->bType != bt_raw)
+ return dctx->expected;
+ return MIN(MAX(inputSize, 1), dctx->expected);
+}
+
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
+ switch(dctx->stage)
+ {
+ default: /* should not happen */
+ assert(0);
+ ZSTD_FALLTHROUGH;
+ case ZSTDds_getFrameHeaderSize:
+ ZSTD_FALLTHROUGH;
+ case ZSTDds_decodeFrameHeader:
+ return ZSTDnit_frameHeader;
+ case ZSTDds_decodeBlockHeader:
+ return ZSTDnit_blockHeader;
+ case ZSTDds_decompressBlock:
+ return ZSTDnit_block;
+ case ZSTDds_decompressLastBlock:
+ return ZSTDnit_lastBlock;
+ case ZSTDds_checkChecksum:
+ return ZSTDnit_checksum;
+ case ZSTDds_decodeSkippableHeader:
+ ZSTD_FALLTHROUGH;
+ case ZSTDds_skipFrame:
+ return ZSTDnit_skippableFrame;
+ }
+}
+
+static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
+
+/* ZSTD_decompressContinue() :
+ * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
+ * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
+ /* Sanity check */
+ RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+
+ dctx->processedCSize += srcSize;
+
+ switch (dctx->stage)
+ {
+ case ZSTDds_getFrameHeaderSize :
+ assert(src != NULL);
+ if (dctx->format == ZSTD_f_zstd1) { /* allows header */
+ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */
+ dctx->stage = ZSTDds_decodeSkippableHeader;
+ return 0;
+ } }
+ dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
+ if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
+ ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = dctx->headerSize - srcSize;
+ dctx->stage = ZSTDds_decodeFrameHeader;
+ return 0;
+
+ case ZSTDds_decodeFrameHeader:
+ assert(src != NULL);
+ ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
+ dctx->expected = ZSTD_blockHeaderSize;
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ return 0;
+
+ case ZSTDds_decodeBlockHeader:
+ { blockProperties_t bp;
+ size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+ RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
+ dctx->expected = cBlockSize;
+ dctx->bType = bp.blockType;
+ dctx->rleSize = bp.origSize;
+ if (cBlockSize) {
+ dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
+ return 0;
+ }
+ /* empty block */
+ if (bp.lastBlock) {
+ if (dctx->fParams.checksumFlag) {
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ dctx->expected = 0; /* end of frame */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ }
+ return 0;
+ }
+
+ case ZSTDds_decompressLastBlock:
+ case ZSTDds_decompressBlock:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
+ { size_t rSize;
+ switch(dctx->bType)
+ {
+ case bt_compressed:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
+ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
+ dctx->expected = 0; /* Streaming not supported */
+ break;
+ case bt_raw :
+ assert(srcSize <= dctx->expected);
+ rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
+ FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed");
+ assert(rSize == srcSize);
+ dctx->expected -= rSize;
+ break;
+ case bt_rle :
+ rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
+ dctx->expected = 0; /* Streaming not supported */
+ break;
+ case bt_reserved : /* should never happen */
+ default:
+ RETURN_ERROR(corruption_detected, "invalid block type");
+ }
+ FORWARD_IF_ERROR(rSize, "");
+ RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
+ DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
+ dctx->decodedSize += rSize;
+ if (dctx->validateChecksum) xxh64_update(&dctx->xxhState, dst, rSize);
+ dctx->previousDstEnd = (char*)dst + rSize;
+
+ /* Stay on the same stage until we are finished streaming the block. */
+ if (dctx->expected > 0) {
+ return rSize;
+ }
+
+ if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
+ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
+ RETURN_ERROR_IF(
+ dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && dctx->decodedSize != dctx->fParams.frameContentSize,
+ corruption_detected, "");
+ if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
+ dctx->expected = 0; /* ends here */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ dctx->expected = ZSTD_blockHeaderSize;
+ }
+ return rSize;
+ }
+
+ case ZSTDds_checkChecksum:
+ assert(srcSize == 4); /* guaranteed by dctx->expected */
+ {
+ if (dctx->validateChecksum) {
+ U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
+ U32 const check32 = MEM_readLE32(src);
+ DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
+ RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
+ }
+ ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+ }
+
+ case ZSTDds_decodeSkippableHeader:
+ assert(src != NULL);
+ assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
+ ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
+ dctx->stage = ZSTDds_skipFrame;
+ return 0;
+
+ case ZSTDds_skipFrame:
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+
+ default:
+ assert(0); /* impossible */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ }
+}
+
+
+static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dict;
+ dctx->previousDstEnd = (const char*)dict + dictSize;
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ dctx->dictContentBeginForFuzzing = dctx->prefixStart;
+ dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
+#endif
+ return 0;
+}
+
+/*! ZSTD_loadDEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of entropy tables read */
+size_t
+ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+ const void* const dict, size_t const dictSize)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+
+ RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
+ assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
+ dictPtr += 8; /* skip header = magic + dictID */
+
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
+ ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
+ { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */
+ size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
+#ifdef HUF_FORCE_DECOMPRESS_X1
+ /* in minimal huffman, we always use X1 variants */
+ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
+ dictPtr, dictEnd - dictPtr,
+ workspace, workspaceSize);
+#else
+ size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
+ dictPtr, (size_t)(dictEnd - dictPtr),
+ workspace, workspaceSize);
+#endif
+ RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
+ dictPtr += hSize;
+ }
+
+ { short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue = MaxOff, offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
+ ZSTD_buildFSETable( entropy->OFTable,
+ offcodeNCount, offcodeMaxValue,
+ OF_base, OF_bits,
+ offcodeLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */0);
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
+ ZSTD_buildFSETable( entropy->MLTable,
+ matchlengthNCount, matchlengthMaxValue,
+ ML_base, ML_bits,
+ matchlengthLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */ 0);
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
+ ZSTD_buildFSETable( entropy->LLTable,
+ litlengthNCount, litlengthMaxValue,
+ LL_base, LL_bits,
+ litlengthLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */ 0);
+ dictPtr += litlengthHeaderSize;
+ }
+
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
+ { int i;
+ size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
+ for (i=0; i<3; i++) {
+ U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
+ RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
+ dictionary_corrupted, "");
+ entropy->rep[i] = rep;
+ } }
+
+ return (size_t)(dictPtr - (const BYTE*)dict);
+}
+
+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
+ { U32 const magic = MEM_readLE32(dict);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
+ } }
+ dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
+ RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, "");
+ dict = (const char*)dict + eSize;
+ dictSize -= eSize;
+ }
+ dctx->litEntropy = dctx->fseEntropy = 1;
+
+ /* reference dictionary content */
+ return ZSTD_refDictContent(dctx, dict, dictSize);
+}
+
+size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+{
+ assert(dctx != NULL);
+ dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->processedCSize = 0;
+ dctx->decodedSize = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->prefixStart = NULL;
+ dctx->virtualStart = NULL;
+ dctx->dictEnd = NULL;
+ dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ dctx->litEntropy = dctx->fseEntropy = 0;
+ dctx->dictID = 0;
+ dctx->bType = bt_reserved;
+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+ ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
+ dctx->LLTptr = dctx->entropy.LLTable;
+ dctx->MLTptr = dctx->entropy.MLTable;
+ dctx->OFTptr = dctx->entropy.OFTable;
+ dctx->HUFptr = dctx->entropy.hufTable;
+ return 0;
+}
+
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
+ if (dict && dictSize)
+ RETURN_ERROR_IF(
+ ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
+ dictionary_corrupted, "");
+ return 0;
+}
+
+
+/* ====== ZSTD_DDict ====== */
+
+size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
+ assert(dctx != NULL);
+ if (ddict) {
+ const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
+ size_t const dictSize = ZSTD_DDict_dictSize(ddict);
+ const void* const dictEnd = dictStart + dictSize;
+ dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
+ DEBUGLOG(4, "DDict is %s",
+ dctx->ddictIsCold ? "~cold~" : "hot!");
+ }
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
+ if (ddict) { /* NULL ddict is equivalent to no dictionary */
+ ZSTD_copyDDictParameters(dctx, ddict);
+ }
+ return 0;
+}
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return 0;
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
+ return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+}
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompress frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary (most common case).
+ * - The frame was built with dictID intentionally removed.
+ * Needed dictionary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, frame header could not be decoded.
+ * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use
+ * ZSTD_getFrameHeader(), which will provide a more precise error code. */
+unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
+{
+ ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
+ size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
+ if (ZSTD_isError(hError)) return 0;
+ return zfp.dictID;
+}
+
+
+/*! ZSTD_decompress_usingDDict() :
+* Decompression using a pre-digested Dictionary
+* Use dictionary without significant overhead. */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict)
+{
+ /* pass content and size in case legacy frames are encountered */
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
+ NULL, 0,
+ ddict);
+}
+
+
+/*=====================================
+* Streaming decompression
+*====================================*/
+
+ZSTD_DStream* ZSTD_createDStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createDStream");
+ return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticDCtx(workspace, workspaceSize);
+}
+
+ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createDCtx_advanced(customMem);
+}
+
+size_t ZSTD_freeDStream(ZSTD_DStream* zds)
+{
+ return ZSTD_freeDCtx(zds);
+}
+
+
+/* *** Initialization *** */
+
+size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ ZSTD_clearDict(dctx);
+ if (dict && dictSize != 0) {
+ dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
+ RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
+ dctx->ddict = dctx->ddictLocal;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
+ return 0;
+}
+
+size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+ FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
+ dctx->dictUses = ZSTD_use_once;
+ return 0;
+}
+
+size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+
+/* ZSTD_initDStream_usingDict() :
+ * return : expected size, aka ZSTD_startingInputLength().
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
+{
+ DEBUGLOG(4, "ZSTD_initDStream_usingDict");
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
+ return ZSTD_startingInputLength(zds->format);
+}
+
+/* note : this variant can't fail */
+size_t ZSTD_initDStream(ZSTD_DStream* zds)
+{
+ DEBUGLOG(4, "ZSTD_initDStream");
+ return ZSTD_initDStream_usingDDict(zds, NULL);
+}
+
+/* ZSTD_initDStream_usingDDict() :
+ * ddict will just be referenced, and must outlive decompression session
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
+{
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
+ return ZSTD_startingInputLength(dctx->format);
+}
+
+/* ZSTD_resetDStream() :
+ * return : expected size, aka ZSTD_startingInputLength().
+ * this function cannot fail */
+size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
+{
+ FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
+ return ZSTD_startingInputLength(dctx->format);
+}
+
+
+size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ ZSTD_clearDict(dctx);
+ if (ddict) {
+ dctx->ddict = ddict;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
+ if (dctx->ddictSet == NULL) {
+ dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
+ if (!dctx->ddictSet) {
+ RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
+ }
+ }
+ assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
+ }
+ }
+ return 0;
+}
+
+/* ZSTD_DCtx_setMaxWindowSize() :
+ * note : no direct equivalence in ZSTD_DCtx_setParameter,
+ * since this version sets windowSize, and the other sets windowLog */
+size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
+{
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
+ size_t const min = (size_t)1 << bounds.lowerBound;
+ size_t const max = (size_t)1 << bounds.upperBound;
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
+ RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
+ dctx->maxWindowSize = maxWindowSize;
+ return 0;
+}
+
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
+{
+ return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
+}
+
+ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
+{
+ ZSTD_bounds bounds = { 0, 0, 0 };
+ switch(dParam) {
+ case ZSTD_d_windowLogMax:
+ bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+ return bounds;
+ case ZSTD_d_format:
+ bounds.lowerBound = (int)ZSTD_f_zstd1;
+ bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+ return bounds;
+ case ZSTD_d_stableOutBuffer:
+ bounds.lowerBound = (int)ZSTD_bm_buffered;
+ bounds.upperBound = (int)ZSTD_bm_stable;
+ return bounds;
+ case ZSTD_d_forceIgnoreChecksum:
+ bounds.lowerBound = (int)ZSTD_d_validateChecksum;
+ bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
+ return bounds;
+ case ZSTD_d_refMultipleDDicts:
+ bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
+ bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
+ return bounds;
+ default:;
+ }
+ bounds.error = ERROR(parameter_unsupported);
+ return bounds;
+}
+
+/* ZSTD_dParam_withinBounds:
+ * @return 1 if value is within dParam bounds,
+ * 0 otherwise */
+static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
+{
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
+ if (ZSTD_isError(bounds.error)) return 0;
+ if (value < bounds.lowerBound) return 0;
+ if (value > bounds.upperBound) return 0;
+ return 1;
+}
+
+#define CHECK_DBOUNDS(p,v) { \
+ RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
+}
+
+size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
+{
+ switch (param) {
+ case ZSTD_d_windowLogMax:
+ *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
+ return 0;
+ case ZSTD_d_format:
+ *value = (int)dctx->format;
+ return 0;
+ case ZSTD_d_stableOutBuffer:
+ *value = (int)dctx->outBufferMode;
+ return 0;
+ case ZSTD_d_forceIgnoreChecksum:
+ *value = (int)dctx->forceIgnoreChecksum;
+ return 0;
+ case ZSTD_d_refMultipleDDicts:
+ *value = (int)dctx->refMultipleDDicts;
+ return 0;
+ default:;
+ }
+ RETURN_ERROR(parameter_unsupported, "");
+}
+
+size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ switch(dParam) {
+ case ZSTD_d_windowLogMax:
+ if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
+ CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
+ dctx->maxWindowSize = ((size_t)1) << value;
+ return 0;
+ case ZSTD_d_format:
+ CHECK_DBOUNDS(ZSTD_d_format, value);
+ dctx->format = (ZSTD_format_e)value;
+ return 0;
+ case ZSTD_d_stableOutBuffer:
+ CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
+ dctx->outBufferMode = (ZSTD_bufferMode_e)value;
+ return 0;
+ case ZSTD_d_forceIgnoreChecksum:
+ CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
+ dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
+ return 0;
+ case ZSTD_d_refMultipleDDicts:
+ CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
+ if (dctx->staticSize != 0) {
+ RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
+ }
+ dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
+ return 0;
+ default:;
+ }
+ RETURN_ERROR(parameter_unsupported, "");
+}
+
+size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
+{
+ if ( (reset == ZSTD_reset_session_only)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ dctx->streamStage = zdss_init;
+ dctx->noForwardProgress = 0;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ ZSTD_clearDict(dctx);
+ ZSTD_DCtx_resetParameters(dctx);
+ }
+ return 0;
+}
+
+
+size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
+{
+ return ZSTD_sizeof_DCtx(dctx);
+}
+
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+ size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
+ unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
+ size_t const minRBSize = (size_t) neededSize;
+ RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
+ frameParameter_windowTooLarge, "");
+ return minRBSize;
+}
+
+size_t ZSTD_estimateDStreamSize(size_t windowSize)
+{
+ size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ size_t const inBuffSize = blockSize; /* no block can be larger */
+ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
+ return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
+}
+
+size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
+{
+ U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
+ ZSTD_frameHeader zfh;
+ size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(err)) return err;
+ RETURN_ERROR_IF(err>0, srcSize_wrong, "");
+ RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
+ frameParameter_windowTooLarge, "");
+ return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
+}
+
+
+/* ***** Decompression ***** */
+
+static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
+{
+ return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR;
+}
+
+static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
+{
+ if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
+ zds->oversizedDuration++;
+ else
+ zds->oversizedDuration = 0;
+}
+
+static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds)
+{
+ return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION;
+}
+
+/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */
+static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
+{
+ ZSTD_outBuffer const expect = zds->expectedOutBuffer;
+ /* No requirement when ZSTD_obm_stable is not enabled. */
+ if (zds->outBufferMode != ZSTD_bm_stable)
+ return 0;
+ /* Any buffer is allowed in zdss_init, this must be the same for every other call until
+ * the context is reset.
+ */
+ if (zds->streamStage == zdss_init)
+ return 0;
+ /* The buffer must match our expectation exactly. */
+ if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
+ return 0;
+ RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
+}
+
+/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
+ * and updates the stage and the output buffer state. This call is extracted so it can be
+ * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode.
+ * NOTE: You must break after calling this function since the streamStage is modified.
+ */
+static size_t ZSTD_decompressContinueStream(
+ ZSTD_DStream* zds, char** op, char* oend,
+ void const* src, size_t srcSize) {
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ if (zds->outBufferMode == ZSTD_bm_buffered) {
+ size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
+ size_t const decodedSize = ZSTD_decompressContinue(zds,
+ zds->outBuff + zds->outStart, dstSize, src, srcSize);
+ FORWARD_IF_ERROR(decodedSize, "");
+ if (!decodedSize && !isSkipFrame) {
+ zds->streamStage = zdss_read;
+ } else {
+ zds->outEnd = zds->outStart + decodedSize;
+ zds->streamStage = zdss_flush;
+ }
+ } else {
+ /* Write directly into the output buffer */
+ size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op);
+ size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
+ FORWARD_IF_ERROR(decodedSize, "");
+ *op += decodedSize;
+ /* Flushing is not needed. */
+ zds->streamStage = zdss_read;
+ assert(*op <= oend);
+ assert(zds->outBufferMode == ZSTD_bm_stable);
+ }
+ return 0;
+}
+
+size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ const char* const src = (const char*)input->src;
+ const char* const istart = input->pos != 0 ? src + input->pos : src;
+ const char* const iend = input->size != 0 ? src + input->size : src;
+ const char* ip = istart;
+ char* const dst = (char*)output->dst;
+ char* const ostart = output->pos != 0 ? dst + output->pos : dst;
+ char* const oend = output->size != 0 ? dst + output->size : dst;
+ char* op = ostart;
+ U32 someMoreWork = 1;
+
+ DEBUGLOG(5, "ZSTD_decompressStream");
+ RETURN_ERROR_IF(
+ input->pos > input->size,
+ srcSize_wrong,
+ "forbidden. in: pos: %u vs size: %u",
+ (U32)input->pos, (U32)input->size);
+ RETURN_ERROR_IF(
+ output->pos > output->size,
+ dstSize_tooSmall,
+ "forbidden. out: pos: %u vs size: %u",
+ (U32)output->pos, (U32)output->size);
+ DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
+ FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
+
+ while (someMoreWork) {
+ switch(zds->streamStage)
+ {
+ case zdss_init :
+ DEBUGLOG(5, "stage zdss_init => transparent reset ");
+ zds->streamStage = zdss_loadHeader;
+ zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+ zds->legacyVersion = 0;
+ zds->hostageByte = 0;
+ zds->expectedOutBuffer = *output;
+ ZSTD_FALLTHROUGH;
+
+ case zdss_loadHeader :
+ DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
+ { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+ if (zds->refMultipleDDicts && zds->ddictSet) {
+ ZSTD_DCtx_selectFrameDDict(zds);
+ }
+ DEBUGLOG(5, "header size : %u", (U32)hSize);
+ if (ZSTD_isError(hSize)) {
+ return hSize; /* error */
+ }
+ if (hSize != 0) { /* need more input */
+ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
+ size_t const remainingInput = (size_t)(iend-ip);
+ assert(iend >= ip);
+ if (toLoad > remainingInput) { /* not enough input to load full header */
+ if (remainingInput > 0) {
+ ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
+ zds->lhSize += remainingInput;
+ }
+ input->pos = input->size;
+ return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
+ }
+ assert(ip != NULL);
+ ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
+ break;
+ } }
+
+ /* check for single-pass mode opportunity */
+ if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && zds->fParams.frameType != ZSTD_skippableFrame
+ && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
+ size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
+ if (cSize <= (size_t)(iend-istart)) {
+ /* shortcut : using single-pass mode */
+ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
+ if (ZSTD_isError(decompressedSize)) return decompressedSize;
+ DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+ ip = istart + cSize;
+ op += decompressedSize;
+ zds->expected = 0;
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ } }
+
+ /* Check output buffer is large enough for ZSTD_odm_stable. */
+ if (zds->outBufferMode == ZSTD_bm_stable
+ && zds->fParams.frameType != ZSTD_skippableFrame
+ && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
+ RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
+ }
+
+ /* Consume header (see ZSTDds_decodeFrameHeader) */
+ DEBUGLOG(4, "Consume header");
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
+
+ if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
+ zds->stage = ZSTDds_skipFrame;
+ } else {
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
+ zds->expected = ZSTD_blockHeaderSize;
+ zds->stage = ZSTDds_decodeBlockHeader;
+ }
+
+ /* control buffer memory usage */
+ DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
+ (U32)(zds->fParams.windowSize >>10),
+ (U32)(zds->maxWindowSize >> 10) );
+ zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
+ RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
+ frameParameter_windowTooLarge, "");
+
+ /* Adapt buffer sizes to frame header instructions */
+ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
+ size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
+ ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
+ : 0;
+
+ ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
+
+ { int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
+ int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
+
+ if (tooSmall || tooLarge) {
+ size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
+ DEBUGLOG(4, "inBuff : from %u to %u",
+ (U32)zds->inBuffSize, (U32)neededInBuffSize);
+ DEBUGLOG(4, "outBuff : from %u to %u",
+ (U32)zds->outBuffSize, (U32)neededOutBuffSize);
+ if (zds->staticSize) { /* static DCtx */
+ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
+ assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
+ RETURN_ERROR_IF(
+ bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
+ memory_allocation, "");
+ } else {
+ ZSTD_customFree(zds->inBuff, zds->customMem);
+ zds->inBuffSize = 0;
+ zds->outBuffSize = 0;
+ zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
+ RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
+ }
+ zds->inBuffSize = neededInBuffSize;
+ zds->outBuff = zds->inBuff + zds->inBuffSize;
+ zds->outBuffSize = neededOutBuffSize;
+ } } }
+ zds->streamStage = zdss_read;
+ ZSTD_FALLTHROUGH;
+
+ case zdss_read:
+ DEBUGLOG(5, "stage zdss_read");
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
+ DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
+ if (neededInSize==0) { /* end of frame */
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ }
+ if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
+ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
+ ip += neededInSize;
+ /* Function modifies the stage so we must break */
+ break;
+ } }
+ if (ip==iend) { someMoreWork = 0; break; } /* no more input */
+ zds->streamStage = zdss_load;
+ ZSTD_FALLTHROUGH;
+
+ case zdss_load:
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+ size_t const toLoad = neededInSize - zds->inPos;
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ size_t loadedSize;
+ /* At this point we shouldn't be decompressing a block that we can stream. */
+ assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
+ if (isSkipFrame) {
+ loadedSize = MIN(toLoad, (size_t)(iend-ip));
+ } else {
+ RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
+ corruption_detected,
+ "should never happen");
+ loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
+ }
+ ip += loadedSize;
+ zds->inPos += loadedSize;
+ if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
+
+ /* decode loaded input */
+ zds->inPos = 0; /* input is consumed */
+ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
+ /* Function modifies the stage so we must break */
+ break;
+ }
+ case zdss_flush:
+ { size_t const toFlushSize = zds->outEnd - zds->outStart;
+ size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
+ op += flushedSize;
+ zds->outStart += flushedSize;
+ if (flushedSize == toFlushSize) { /* flush completed */
+ zds->streamStage = zdss_read;
+ if ( (zds->outBuffSize < zds->fParams.frameContentSize)
+ && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+ DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
+ (int)(zds->outBuffSize - zds->outStart),
+ (U32)zds->fParams.blockSizeMax);
+ zds->outStart = zds->outEnd = 0;
+ }
+ break;
+ } }
+ /* cannot complete flush */
+ someMoreWork = 0;
+ break;
+
+ default:
+ assert(0); /* impossible */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ } }
+
+ /* result */
+ input->pos = (size_t)(ip - (const char*)(input->src));
+ output->pos = (size_t)(op - (char*)(output->dst));
+
+ /* Update the expected output buffer for ZSTD_obm_stable. */
+ zds->expectedOutBuffer = *output;
+
+ if ((ip==istart) && (op==ostart)) { /* no forward progress */
+ zds->noForwardProgress ++;
+ if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
+ RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
+ RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
+ assert(0);
+ }
+ } else {
+ zds->noForwardProgress = 0;
+ }
+ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
+ if (!nextSrcSizeHint) { /* frame fully decoded */
+ if (zds->outEnd == zds->outStart) { /* output fully flushed */
+ if (zds->hostageByte) {
+ if (input->pos >= input->size) {
+ /* can't release hostage (not present) */
+ zds->streamStage = zdss_read;
+ return 1;
+ }
+ input->pos++; /* release hostage */
+ } /* zds->hostageByte */
+ return 0;
+ } /* zds->outEnd == zds->outStart */
+ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
+ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
+ zds->hostageByte=1;
+ }
+ return 1;
+ } /* nextSrcSizeHint==0 */
+ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
+ assert(zds->inPos <= nextSrcSizeHint);
+ nextSrcSizeHint -= zds->inPos; /* part already loaded*/
+ return nextSrcSizeHint;
+ }
+}
+
+size_t ZSTD_decompressStream_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
new file mode 100644
index 00000000000000..2d101d9a842eca
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress_block.c
@@ -0,0 +1,1540 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_decompress_block :
+ * this module takes care of decompressing _compressed_ block */
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/compiler.h" /* prefetch */
+#include "../common/cpu.h" /* bmi2 */
+#include "../common/mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "../common/zstd_internal.h"
+#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
+#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h"
+
+/*_*******************************************************
+* Macros
+**********************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * ZSTD_decompressSequences implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
+#endif
+
+
+/*_*******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+ * Block decoding
+ ***************************************************************/
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr)
+{
+ RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
+
+ { U32 const cBlockHeader = MEM_readLE24(src);
+ U32 const cSize = cBlockHeader >> 3;
+ bpPtr->lastBlock = cBlockHeader & 1;
+ bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+ bpPtr->origSize = cSize; /* only useful for RLE */
+ if (bpPtr->blockType == bt_rle) return 1;
+ RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
+ return cSize;
+ }
+}
+
+
+/* Hidden declaration for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize);
+/*! ZSTD_decodeLiteralsBlock() :
+ * @return : nb of bytes read from src (< srcSize )
+ * note : symbol not declared but exposed for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+{
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
+ RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
+
+ { const BYTE* const istart = (const BYTE*) src;
+ symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+ switch(litEncType)
+ {
+ case set_repeat:
+ DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
+ RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
+ ZSTD_FALLTHROUGH;
+
+ case set_compressed:
+ RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
+ { size_t lhSize, litSize, litCSize;
+ U32 singleStream=0;
+ U32 const lhlCode = (istart[0] >> 2) & 3;
+ U32 const lhc = MEM_readLE32(istart);
+ size_t hufSuccess;
+ switch(lhlCode)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
+ /* 2 - 2 - 10 - 10 */
+ singleStream = !lhlCode;
+ lhSize = 3;
+ litSize = (lhc >> 4) & 0x3FF;
+ litCSize = (lhc >> 14) & 0x3FF;
+ break;
+ case 2:
+ /* 2 - 2 - 14 - 14 */
+ lhSize = 4;
+ litSize = (lhc >> 4) & 0x3FFF;
+ litCSize = lhc >> 18;
+ break;
+ case 3:
+ /* 2 - 2 - 18 - 18 */
+ lhSize = 5;
+ litSize = (lhc >> 4) & 0x3FFFF;
+ litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
+ break;
+ }
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+ RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
+
+ /* prefetch huffman table if cold */
+ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
+ PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
+ }
+
+ if (litEncType==set_repeat) {
+ if (singleStream) {
+ hufSuccess = HUF_decompress1X_usingDTable_bmi2(
+ dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->HUFptr, dctx->bmi2);
+ } else {
+ hufSuccess = HUF_decompress4X_usingDTable_bmi2(
+ dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->HUFptr, dctx->bmi2);
+ }
+ } else {
+ if (singleStream) {
+#if defined(HUF_FORCE_DECOMPRESS_X2)
+ hufSuccess = HUF_decompress1X_DCtx_wksp(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace));
+#else
+ hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace), dctx->bmi2);
+#endif
+ } else {
+ hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace), dctx->bmi2);
+ }
+ }
+
+ RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
+
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ dctx->litEntropy = 1;
+ if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
+ ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return litCSize + lhSize;
+ }
+
+ case set_basic:
+ { size_t litSize, lhSize;
+ U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ break;
+ }
+
+ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
+ ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+ return lhSize+litSize;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+lhSize;
+ dctx->litSize = litSize;
+ return lhSize+litSize;
+ }
+
+ case set_rle:
+ { U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t litSize, lhSize;
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
+ break;
+ }
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+ ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+1;
+ }
+ default:
+ RETURN_ERROR(corruption_detected, "impossible");
+ }
+ }
+}
+
+/* Default FSE distribution tables.
+ * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
+ * They were generated programmatically with following method :
+ * - start from default distributions, present in /lib/common/zstd_internal.h
+ * - generate tables normally, using ZSTD_buildFSETable()
+ * - printout the content of tables
+ * - pretify output, report below, test with fuzzer to ensure it's correct */
+
+/* Default FSE distribution table for Literal Lengths */
+static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 4, 0}, { 16, 0, 4, 0},
+ { 32, 0, 5, 1}, { 0, 0, 5, 3},
+ { 0, 0, 5, 4}, { 0, 0, 5, 6},
+ { 0, 0, 5, 7}, { 0, 0, 5, 9},
+ { 0, 0, 5, 10}, { 0, 0, 5, 12},
+ { 0, 0, 6, 14}, { 0, 1, 5, 16},
+ { 0, 1, 5, 20}, { 0, 1, 5, 22},
+ { 0, 2, 5, 28}, { 0, 3, 5, 32},
+ { 0, 4, 5, 48}, { 32, 6, 5, 64},
+ { 0, 7, 5, 128}, { 0, 8, 6, 256},
+ { 0, 10, 6, 1024}, { 0, 12, 6, 4096},
+ { 32, 0, 4, 0}, { 0, 0, 4, 1},
+ { 0, 0, 5, 2}, { 32, 0, 5, 4},
+ { 0, 0, 5, 5}, { 32, 0, 5, 7},
+ { 0, 0, 5, 8}, { 32, 0, 5, 10},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 32, 1, 5, 16}, { 0, 1, 5, 18},
+ { 32, 1, 5, 22}, { 0, 2, 5, 24},
+ { 32, 3, 5, 32}, { 0, 3, 5, 40},
+ { 0, 6, 4, 64}, { 16, 6, 4, 64},
+ { 32, 7, 5, 128}, { 0, 9, 6, 512},
+ { 0, 11, 6, 2048}, { 48, 0, 4, 0},
+ { 16, 0, 4, 1}, { 32, 0, 5, 2},
+ { 32, 0, 5, 3}, { 32, 0, 5, 5},
+ { 32, 0, 5, 6}, { 32, 0, 5, 8},
+ { 32, 0, 5, 9}, { 32, 0, 5, 11},
+ { 32, 0, 5, 12}, { 0, 0, 6, 15},
+ { 32, 1, 5, 18}, { 32, 1, 5, 20},
+ { 32, 2, 5, 24}, { 32, 2, 5, 28},
+ { 32, 3, 5, 40}, { 32, 4, 5, 48},
+ { 0, 16, 6,65536}, { 0, 15, 6,32768},
+ { 0, 14, 6,16384}, { 0, 13, 6, 8192},
+}; /* LL_defaultDTable */
+
+/* Default FSE distribution table for Offset Codes */
+static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 5, 0}, { 0, 6, 4, 61},
+ { 0, 9, 5, 509}, { 0, 15, 5,32765},
+ { 0, 21, 5,2097149}, { 0, 3, 5, 5},
+ { 0, 7, 4, 125}, { 0, 12, 5, 4093},
+ { 0, 18, 5,262141}, { 0, 23, 5,8388605},
+ { 0, 5, 5, 29}, { 0, 8, 4, 253},
+ { 0, 14, 5,16381}, { 0, 20, 5,1048573},
+ { 0, 2, 5, 1}, { 16, 7, 4, 125},
+ { 0, 11, 5, 2045}, { 0, 17, 5,131069},
+ { 0, 22, 5,4194301}, { 0, 4, 5, 13},
+ { 16, 8, 4, 253}, { 0, 13, 5, 8189},
+ { 0, 19, 5,524285}, { 0, 1, 5, 1},
+ { 16, 6, 4, 61}, { 0, 10, 5, 1021},
+ { 0, 16, 5,65533}, { 0, 28, 5,268435453},
+ { 0, 27, 5,134217725}, { 0, 26, 5,67108861},
+ { 0, 25, 5,33554429}, { 0, 24, 5,16777213},
+}; /* OF_defaultDTable */
+
+
+/* Default FSE distribution table for Match Lengths */
+static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 6, 3}, { 0, 0, 4, 4},
+ { 32, 0, 5, 5}, { 0, 0, 5, 6},
+ { 0, 0, 5, 8}, { 0, 0, 5, 9},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 0, 0, 6, 16}, { 0, 0, 6, 19},
+ { 0, 0, 6, 22}, { 0, 0, 6, 25},
+ { 0, 0, 6, 28}, { 0, 0, 6, 31},
+ { 0, 0, 6, 34}, { 0, 1, 6, 37},
+ { 0, 1, 6, 41}, { 0, 2, 6, 47},
+ { 0, 3, 6, 59}, { 0, 4, 6, 83},
+ { 0, 7, 6, 131}, { 0, 9, 6, 515},
+ { 16, 0, 4, 4}, { 0, 0, 4, 5},
+ { 32, 0, 5, 6}, { 0, 0, 5, 7},
+ { 32, 0, 5, 9}, { 0, 0, 5, 10},
+ { 0, 0, 6, 12}, { 0, 0, 6, 15},
+ { 0, 0, 6, 18}, { 0, 0, 6, 21},
+ { 0, 0, 6, 24}, { 0, 0, 6, 27},
+ { 0, 0, 6, 30}, { 0, 0, 6, 33},
+ { 0, 1, 6, 35}, { 0, 1, 6, 39},
+ { 0, 2, 6, 43}, { 0, 3, 6, 51},
+ { 0, 4, 6, 67}, { 0, 5, 6, 99},
+ { 0, 8, 6, 259}, { 32, 0, 4, 4},
+ { 48, 0, 4, 4}, { 16, 0, 4, 5},
+ { 32, 0, 5, 7}, { 32, 0, 5, 8},
+ { 32, 0, 5, 10}, { 32, 0, 5, 11},
+ { 0, 0, 6, 14}, { 0, 0, 6, 17},
+ { 0, 0, 6, 20}, { 0, 0, 6, 23},
+ { 0, 0, 6, 26}, { 0, 0, 6, 29},
+ { 0, 0, 6, 32}, { 0, 16, 6,65539},
+ { 0, 15, 6,32771}, { 0, 14, 6,16387},
+ { 0, 13, 6, 8195}, { 0, 12, 6, 4099},
+ { 0, 11, 6, 2051}, { 0, 10, 6, 1027},
+}; /* ML_defaultDTable */
+
+
+static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
+{
+ void* ptr = dt;
+ ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
+ ZSTD_seqSymbol* const cell = dt + 1;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->nbBits = 0;
+ cell->nextState = 0;
+ assert(nbAddBits < 255);
+ cell->nbAdditionalBits = (BYTE)nbAddBits;
+ cell->baseValue = baseValue;
+}
+
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * cannot fail if input is valid =>
+ * all inputs are presumed validated at this stage */
+FORCE_INLINE_TEMPLATE
+void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_seqSymbol* const tableDecode = dt+1;
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+
+ U16* symbolNext = (U16*)wksp;
+ BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
+ U32 highThreshold = tableSize - 1;
+
+
+ /* Sanity Checks */
+ assert(maxSymbolValue <= MaxSeq);
+ assert(tableLog <= MaxFSELog);
+ assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
+ (void)wkspSize;
+ /* Init, lay down lowprob symbols */
+ { ZSTD_seqSymbol_header DTableH;
+ DTableH.tableLog = tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].baseValue = s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ assert(normalizedCounter[s]>=0);
+ symbolNext[s] = (U16)normalizedCounter[s];
+ } } }
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ assert(tableSize <= 512);
+ /* Specialized symbol spreading for the case when there are
+ * no low probability (-1 count) symbols. When compressing
+ * small blocks we avoid low probability symbols to hit this
+ * case, since header decoding speed matters more.
+ */
+ if (highThreshold == tableSize - 1) {
+ size_t const tableMask = tableSize-1;
+ size_t const step = FSE_TABLESTEP(tableSize);
+ /* First lay down the symbols in order.
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
+ * misses since small blocks generally have small table logs, so nearly
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
+ * our buffer to handle the over-write.
+ */
+ {
+ U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ pos += n;
+ }
+ }
+ /* Now we spread those positions across the table.
+ * The benefit of doing it in two stages is that we avoid the the
+ * variable size inner loop, which caused lots of branch misses.
+ * Now we can run through all the positions without any branch misses.
+ * We unroll the loop twice, since that is what emperically worked best.
+ */
+ {
+ size_t position = 0;
+ size_t s;
+ size_t const unroll = 2;
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableDecode[uPosition].baseValue = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0);
+ }
+ } else {
+ U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ int const n = normalizedCounter[s];
+ for (i=0; i<n; i++) {
+ tableDecode[position].baseValue = s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ {
+ U32 u;
+ for (u=0; u<tableSize; u++) {
+ U32 const symbol = tableDecode[u].baseValue;
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ assert(nbAdditionalBits[symbol] < 255);
+ tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
+ tableDecode[u].baseValue = baseValue[symbol];
+ }
+ }
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+
+#if DYNAMIC_BMI2
+TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+#endif
+
+void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+ return;
+ }
+#endif
+ (void)bmi2;
+ ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+
+
+/*! ZSTD_buildSeqTable() :
+ * @return : nb bytes read from src,
+ * or an error code if it fails */
+static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
+ symbolEncodingType_e type, unsigned max, U32 maxLog,
+ const void* src, size_t srcSize,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
+ int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
+ int bmi2)
+{
+ switch(type)
+ {
+ case set_rle :
+ RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
+ RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
+ { U32 const symbol = *(const BYTE*)src;
+ U32 const baseline = baseValue[symbol];
+ U32 const nbBits = nbAdditionalBits[symbol];
+ ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
+ }
+ *DTablePtr = DTableSpace;
+ return 1;
+ case set_basic :
+ *DTablePtr = defaultTable;
+ return 0;
+ case set_repeat:
+ RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
+ /* prefetch FSE table if used */
+ if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
+ const void* const pStart = *DTablePtr;
+ size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
+ PREFETCH_AREA(pStart, pSize);
+ }
+ return 0;
+ case set_compressed :
+ { unsigned tableLog;
+ S16 norm[MaxSeq+1];
+ size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+ RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
+ RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
+ ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
+ *DTablePtr = DTableSpace;
+ return headerSize;
+ }
+ default :
+ assert(0);
+ RETURN_ERROR(GENERIC, "impossible");
+ }
+}
+
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip = istart;
+ int nbSeq;
+ DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
+
+ /* check */
+ RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
+
+ /* SeqHead */
+ nbSeq = *ip++;
+ if (!nbSeq) {
+ *nbSeqPtr=0;
+ RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
+ return 1;
+ }
+ if (nbSeq > 0x7F) {
+ if (nbSeq == 0xFF) {
+ RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
+ nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
+ ip+=2;
+ } else {
+ RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
+ nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+ }
+ }
+ *nbSeqPtr = nbSeq;
+
+ /* FSE table descriptors */
+ RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
+ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+ symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+ symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+ ip++;
+
+ /* Build DTables */
+ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
+ LLtype, MaxLL, LLFSELog,
+ ip, iend-ip,
+ LL_base, LL_bits,
+ LL_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ dctx->bmi2);
+ RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
+ ip += llhSize;
+ }
+
+ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
+ OFtype, MaxOff, OffFSELog,
+ ip, iend-ip,
+ OF_base, OF_bits,
+ OF_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ dctx->bmi2);
+ RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
+ ip += ofhSize;
+ }
+
+ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
+ MLtype, MaxML, MLFSELog,
+ ip, iend-ip,
+ ML_base, ML_bits,
+ ML_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ dctx->bmi2);
+ RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
+ ip += mlhSize;
+ }
+ }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t matchLength;
+ size_t offset;
+ const BYTE* match;
+} seq_t;
+
+typedef struct {
+ size_t state;
+ const ZSTD_seqSymbol* table;
+} ZSTD_fseState;
+
+typedef struct {
+ BIT_DStream_t DStream;
+ ZSTD_fseState stateLL;
+ ZSTD_fseState stateOffb;
+ ZSTD_fseState stateML;
+ size_t prevOffset[ZSTD_REP_NUM];
+ const BYTE* prefixStart;
+ const BYTE* dictEnd;
+ size_t pos;
+} seqState_t;
+
+/*! ZSTD_overlapCopy8() :
+ * Copies 8 bytes from ip to op and updates op and ip where ip <= op.
+ * If the offset is < 8 then the offset is spread to at least 8 bytes.
+ *
+ * Precondition: *ip <= *op
+ * Postcondition: *op - *op >= 8
+ */
+HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
+ assert(*ip <= *op);
+ if (offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[offset];
+ (*op)[0] = (*ip)[0];
+ (*op)[1] = (*ip)[1];
+ (*op)[2] = (*ip)[2];
+ (*op)[3] = (*ip)[3];
+ *ip += dec32table[offset];
+ ZSTD_copy4(*op+4, *ip);
+ *ip -= sub2;
+ } else {
+ ZSTD_copy8(*op, *ip);
+ }
+ *ip += 8;
+ *op += 8;
+ assert(*op - *ip >= 8);
+}
+
+/*! ZSTD_safecopy() :
+ * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
+ * and write up to 16 bytes past oend_w (op >= oend_w is allowed).
+ * This function is only called in the uncommon case where the sequence is near the end of the block. It
+ * should be fast for a single long sequence, but can be slow for several short sequences.
+ *
+ * @param ovtype controls the overlap detection
+ * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
+ * The src buffer must be before the dst buffer.
+ */
+static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
+ ptrdiff_t const diff = op - ip;
+ BYTE* const oend = op + length;
+
+ assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
+ (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
+
+ if (length < 8) {
+ /* Handle short lengths. */
+ while (op < oend) *op++ = *ip++;
+ return;
+ }
+ if (ovtype == ZSTD_overlap_src_before_dst) {
+ /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
+ assert(length >= 8);
+ ZSTD_overlapCopy8(&op, &ip, diff);
+ assert(op - ip >= 8);
+ assert(op <= oend);
+ }
+
+ if (oend <= oend_w) {
+ /* No risk of overwrite. */
+ ZSTD_wildcopy(op, ip, length, ovtype);
+ return;
+ }
+ if (op <= oend_w) {
+ /* Wildcopy until we get close to the end. */
+ assert(oend > oend_w);
+ ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
+ ip += oend_w - op;
+ op = oend_w;
+ }
+ /* Handle the leftovers. */
+ while (op < oend) *op++ = *ip++;
+}
+
+/* ZSTD_execSequenceEnd():
+ * This version handles cases that are near the end of the output buffer. It requires
+ * more careful checks to make sure there is no overflow. By separating out these hard
+ * and unlikely cases, we can speed up the common cases.
+ *
+ * NOTE: This function needs to be fast for a single long sequence, but doesn't need
+ * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
+ */
+FORCE_NOINLINE
+size_t ZSTD_execSequenceEnd(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+
+ /* bounds checks : careful of address space overflow in 32-bit mode */
+ RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
+ RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
+ assert(op < op + sequenceLength);
+ assert(oLitEnd < op + sequenceLength);
+
+ /* copy literals */
+ ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
+ op = oLitEnd;
+ *litPtr = iLitEnd;
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix */
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
+ match = dictEnd - (prefixStart-match);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ } }
+ ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
+ return sequenceLength;
+}
+
+HINT_INLINE
+size_t ZSTD_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ assert(op != NULL /* Precondition */);
+ assert(oend_w < oend /* No underflow */);
+ /* Handle edge cases in a slow path:
+ * - Read beyond end of literals
+ * - Match end is within WILDCOPY_OVERLIMIT of oend
+ * - 32-bit mode and the match length overflows
+ */
+ if (UNLIKELY(
+ iLitEnd > litLimit ||
+ oMatchEnd > oend_w ||
+ (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
+ return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
+ /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
+ assert(op <= oLitEnd /* No overflow */);
+ assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
+ assert(oMatchEnd <= oend /* No underflow */);
+ assert(iLitEnd <= litLimit /* Literal length is in bounds */);
+ assert(oLitEnd <= oend_w /* Can wildcopy literals */);
+ assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
+
+ /* Copy Literals:
+ * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
+ * We likely don't need the full 32-byte wildcopy.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(op, (*litPtr));
+ if (UNLIKELY(sequence.litLength > 16)) {
+ ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
+ }
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* Copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix -> go into extDict */
+ RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
+ match = dictEnd + (match - prefixStart);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ } }
+ /* Match within prefix of 1 or more bytes */
+ assert(op <= oMatchEnd);
+ assert(oMatchEnd <= oend_w);
+ assert(match >= prefixStart);
+ assert(sequence.matchLength >= 1);
+
+ /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
+ * without overlap checking.
+ */
+ if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
+ /* We bet on a full wildcopy for matches, since we expect matches to be
+ * longer than literals (in general). In silesia, ~10% of matches are longer
+ * than 16 bytes.
+ */
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+ return sequenceLength;
+ }
+ assert(sequence.offset < WILDCOPY_VECLEN);
+
+ /* Copy 8 bytes and spread the offset to be >= 8. */
+ ZSTD_overlapCopy8(&op, &match, sequence.offset);
+
+ /* If the match length is > 8 bytes, then continue with the wildcopy. */
+ if (sequence.matchLength > 8) {
+ assert(op < oMatchEnd);
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
+ }
+ return sequenceLength;
+}
+
+static void
+ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
+{
+ const void* ptr = dt;
+ const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
+ (U32)DStatePtr->state, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+FORCE_INLINE_TEMPLATE void
+ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
+{
+ ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.nextState + lowBits;
+}
+
+FORCE_INLINE_TEMPLATE void
+ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo)
+{
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.nextState + lowBits;
+}
+
+/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
+ * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * bits before reloading. This value is the maximum number of bytes we read
+ * after reloading when we are decoding long offsets.
+ */
+#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
+ (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
+ ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
+ : 0)
+
+typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e;
+
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
+{
+ seq_t seq;
+ ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
+ ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
+ ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
+ U32 const llBase = llDInfo.baseValue;
+ U32 const mlBase = mlDInfo.baseValue;
+ U32 const ofBase = ofDInfo.baseValue;
+ BYTE const llBits = llDInfo.nbAdditionalBits;
+ BYTE const mlBits = mlDInfo.nbAdditionalBits;
+ BYTE const ofBits = ofDInfo.nbAdditionalBits;
+ BYTE const totalBits = llBits+mlBits+ofBits;
+
+ /* sequence */
+ { size_t offset;
+ if (ofBits > 1) {
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ assert(ofBits <= MaxOff);
+ if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
+ U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
+ offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+ BIT_reloadDStream(&seqState->DStream);
+ if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+ assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
+ } else {
+ offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ }
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ } else {
+ U32 const ll0 = (llBase == 0);
+ if (LIKELY((ofBits == 0))) {
+ if (LIKELY(!ll0))
+ offset = seqState->prevOffset[0];
+ else {
+ offset = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ }
+ } else {
+ offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
+ { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+ } } }
+ seq.offset = offset;
+ }
+
+ seq.matchLength = mlBase;
+ if (mlBits > 0)
+ seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
+
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+ seq.litLength = llBase;
+ if (llBits > 0)
+ seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
+
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
+
+ DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+
+ if (prefetch == ZSTD_p_prefetch) {
+ size_t const pos = seqState->pos + seq.litLength;
+ const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
+ seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+ * No consequence though : no memory access will occur, offset is only used for prefetching */
+ seqState->pos = pos + seq.matchLength;
+ }
+
+ /* ANS state update
+ * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
+ * clang-9.2.0 does 7% worse with ZSTD_updateFseState().
+ * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
+ * better option, so it is the default for other compilers. But, if you
+ * measure that it is worse, please put up a pull request.
+ */
+ {
+#if !defined(__clang__)
+ const int kUseUpdateFseState = 1;
+#else
+ const int kUseUpdateFseState = 0;
+#endif
+ if (kUseUpdateFseState) {
+ ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
+ ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+ ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
+ } else {
+ ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */
+ }
+ }
+
+ return seq;
+}
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
+{
+ size_t const windowSize = dctx->fParams.windowSize;
+ /* No dictionary used. */
+ if (dctx->dictContentEndForFuzzing == NULL) return 0;
+ /* Dictionary is our prefix. */
+ if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
+ /* Dictionary is not our ext-dict. */
+ if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
+ /* Dictionary is not within our window size. */
+ if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
+ /* Dictionary is active. */
+ return 1;
+}
+
+MEM_STATIC void ZSTD_assertValidSequence(
+ ZSTD_DCtx const* dctx,
+ BYTE const* op, BYTE const* oend,
+ seq_t const seq,
+ BYTE const* prefixStart, BYTE const* virtualStart)
+{
+#if DEBUGLEVEL >= 1
+ size_t const windowSize = dctx->fParams.windowSize;
+ size_t const sequenceSize = seq.litLength + seq.matchLength;
+ BYTE const* const oLitEnd = op + seq.litLength;
+ DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+ assert(op <= oend);
+ assert((size_t)(oend - op) >= sequenceSize);
+ assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
+ if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
+ size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
+ /* Offset must be within the dictionary. */
+ assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
+ assert(seq.offset <= windowSize + dictSize);
+ } else {
+ /* Offset must be within our window. */
+ assert(seq.offset <= windowSize);
+ }
+#else
+ (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
+#endif
+}
+#endif
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+FORCE_INLINE_TEMPLATE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ DEBUGLOG(5, "ZSTD_decompressSequences_body");
+ (void)frame;
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seqState_t seqState;
+ size_t error = 0;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected, "");
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+ assert(dst != NULL);
+
+ ZSTD_STATIC_ASSERT(
+ BIT_DStream_unfinished < BIT_DStream_completed &&
+ BIT_DStream_endOfBuffer < BIT_DStream_completed &&
+ BIT_DStream_completed < BIT_DStream_overflow);
+
+#if defined(__x86_64__)
+ /* Align the decompression loop to 32 + 16 bytes.
+ *
+ * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
+ * speed swings based on the alignment of the decompression loop. This
+ * performance swing is caused by parts of the decompression loop falling
+ * out of the DSB. The entire decompression loop should fit in the DSB,
+ * when it can't we get much worse performance. You can measure if you've
+ * hit the good case or the bad case with this perf command for some
+ * compressed file test.zst:
+ *
+ * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
+ * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
+ *
+ * If you see most cycles served out of the MITE you've hit the bad case.
+ * If you see most cycles served out of the DSB you've hit the good case.
+ * If it is pretty even then you may be in an okay case.
+ *
+ * I've been able to reproduce this issue on the following CPUs:
+ * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
+ * Use Instruments->Counters to get DSB/MITE cycles.
+ * I never got performance swings, but I was able to
+ * go from the good case of mostly DSB to half of the
+ * cycles served from MITE.
+ * - Coffeelake: Intel i9-9900k
+ *
+ * I haven't been able to reproduce the instability or DSB misses on any
+ * of the following CPUS:
+ * - Haswell
+ * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
+ * - Skylake
+ *
+ * If you are seeing performance stability this script can help test.
+ * It tests on 4 commits in zstd where I saw performance change.
+ *
+ * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
+ */
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 4");
+#endif
+ for ( ; ; ) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ BIT_reloadDStream(&(seqState.DStream));
+ op += oneSeqSize;
+ /* gcc and clang both don't like early returns in this loop.
+ * Instead break and check for an error at the end of the loop.
+ */
+ if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
+ error = oneSeqSize;
+ break;
+ }
+ if (UNLIKELY(!--nbSeq)) break;
+ }
+
+ /* check if reached exact end */
+ DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
+ if (ZSTD_isError(error)) return error;
+ RETURN_ERROR_IF(nbSeq, corruption_detected, "");
+ RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequencesLong_body(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ (void)frame;
+
+ /* Regen sequences */
+ if (nbSeq) {
+#define STORED_SEQS 4
+#define STORED_SEQS_MASK (STORED_SEQS-1)
+#define ADVANCED_SEQS 4
+ seq_t sequences[STORED_SEQS];
+ int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
+ seqState_t seqState;
+ int seqNb;
+ dctx->fseEntropy = 1;
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ seqState.prefixStart = prefixStart;
+ seqState.pos = (size_t)(op-prefixStart);
+ seqState.dictEnd = dictEnd;
+ assert(dst != NULL);
+ assert(iend >= ip);
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected, "");
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+ /* prepare in advance */
+ for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
+ sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
+ PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ }
+ RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
+
+ /* decode and decompress */
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ sequences[seqNb & STORED_SEQS_MASK] = sequence;
+ op += oneSeqSize;
+ }
+ RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
+
+ /* finish queue */
+ seqNb -= seqAdvance;
+ for ( ; seqNb<nbSeq ; seqNb++) {
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if DYNAMIC_BMI2
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static TARGET_ATTRIBUTE("bmi2") size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+#endif /* DYNAMIC_BMI2 */
+
+typedef size_t (*ZSTD_decompressSequences_t)(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame);
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static size_t
+ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequences");
+#if DYNAMIC_BMI2
+ if (dctx->bmi2) {
+ return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ }
+#endif
+ return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+/* ZSTD_decompressSequencesLong() :
+ * decompression function triggered when a minimum share of offsets is considered "long",
+ * aka out of cache.
+ * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
+ * This function will try to mitigate main memory latency through the use of prefetching */
+static size_t
+ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequencesLong");
+#if DYNAMIC_BMI2
+ if (dctx->bmi2) {
+ return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ }
+#endif
+ return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+/* ZSTD_getLongOffsetsShare() :
+ * condition : offTable must be valid
+ * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
+ * compared to maximum possible of (1<<OffFSELog) */
+static unsigned
+ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
+{
+ const void* ptr = offTable;
+ U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
+ const ZSTD_seqSymbol* table = offTable + 1;
+ U32 const max = 1 << tableLog;
+ U32 u, total = 0;
+ DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
+
+ assert(max <= (1 << OffFSELog)); /* max not too large */
+ for (u=0; u<max; u++) {
+ if (table[u].nbAdditionalBits > 22) total += 1;
+ }
+
+ assert(tableLog <= OffFSELog);
+ total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
+
+ return total;
+}
+#endif
+
+size_t
+ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, const int frame)
+{ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+ /* isLongOffset must be true if there are long offsets.
+ * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
+ * We don't expect that to be the case in 64-bit mode.
+ * In block mode, window size is not known, so we have to be conservative.
+ * (note: but it could be evaluated from current-lowLimit)
+ */
+ ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
+ DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
+
+ RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
+
+ /* Decode literals section */
+ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+ }
+
+ /* Build Decoding Tables */
+ {
+ /* These macros control at build-time which decompressor implementation
+ * we use. If neither is defined, we do some inspection and dispatch at
+ * runtime.
+ */
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ int usePrefetchDecoder = dctx->ddictIsCold;
+#endif
+ int nbSeq;
+ size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
+ if (ZSTD_isError(seqHSize)) return seqHSize;
+ ip += seqHSize;
+ srcSize -= seqHSize;
+
+ RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ if ( !usePrefetchDecoder
+ && (!frame || (dctx->fParams.windowSize > (1<<24)))
+ && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
+ U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
+ U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
+ usePrefetchDecoder = (shareLongOffsets >= minShare);
+ }
+#endif
+
+ dctx->ddictIsCold = 0;
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ if (usePrefetchDecoder)
+#endif
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+#endif
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+ /* else */
+ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+#endif
+ }
+}
+
+
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
+{
+ if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dst;
+ dctx->previousDstEnd = dst;
+ }
+}
+
+
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t dSize;
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
+ dctx->previousDstEnd = (char*)dst + dSize;
+ return dSize;
+}
diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
new file mode 100644
index 00000000000000..e7f5f6689459f7
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress_block.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DEC_BLOCK_H
+#define ZSTD_DEC_BLOCK_H
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "../common/zstd_deps.h" /* size_t */
+#include <linux/zstd.h> /* DCtx, and some public functions */
+#include "../common/zstd_internal.h" /* blockProperties_t, and some public functions */
+#include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */
+
+
+/* === Prototypes === */
+
+/* note: prototypes already published within `zstd.h` :
+ * ZSTD_decompressBlock()
+ */
+
+/* note: prototypes already published within `zstd_internal.h` :
+ * ZSTD_getcBlockSize()
+ * ZSTD_decodeSeqHeaders()
+ */
+
+
+/* ZSTD_decompressBlock_internal() :
+ * decompress block, starting at `src`,
+ * into destination buffer `dst`.
+ * @return : decompressed block size,
+ * or an error code (which can be tested using ZSTD_isError())
+ */
+size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, const int frame);
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * this function must be called with valid parameters only
+ * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
+ * in which case it cannot fail.
+ * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
+ * defined in zstd_decompress_internal.h.
+ * Internal use only.
+ */
+void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U32* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize,
+ int bmi2);
+
+
+#endif /* ZSTD_DEC_BLOCK_H */
diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
new file mode 100644
index 00000000000000..4b9052f687558a
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress_internal.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* zstd_decompress_internal:
+ * objects and definitions shared within lib/decompress modules */
+
+ #ifndef ZSTD_DECOMPRESS_INTERNAL_H
+ #define ZSTD_DECOMPRESS_INTERNAL_H
+
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "../common/mem.h" /* BYTE, U16, U32 */
+#include "../common/zstd_internal.h" /* ZSTD_seqSymbol */
+
+
+
+/*-*******************************************************
+ * Constants
+ *********************************************************/
+static UNUSED_ATTR const U32 LL_base[MaxLL+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 28, 32, 40,
+ 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+ 0x2000, 0x4000, 0x8000, 0x10000 };
+
+static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
+ 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
+ 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
+ 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
+
+static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31 };
+
+static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
+ 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 37, 39, 41, 43, 47, 51, 59,
+ 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+ 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+
+/*-*******************************************************
+ * Decompression types
+ *********************************************************/
+ typedef struct {
+ U32 fastMode;
+ U32 tableLog;
+ } ZSTD_seqSymbol_header;
+
+ typedef struct {
+ U16 nextState;
+ BYTE nbAdditionalBits;
+ BYTE nbBits;
+ U32 baseValue;
+ } ZSTD_seqSymbol;
+
+ #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
+
+#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
+#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
+
+typedef struct {
+ ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
+ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
+ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
+ HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
+ U32 rep[ZSTD_REP_NUM];
+ U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
+} ZSTD_entropyDTables_t;
+
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+ ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+ ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
+ ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
+
+typedef enum { zdss_init=0, zdss_loadHeader,
+ zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+
+typedef enum {
+ ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */
+ ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */
+ ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
+} ZSTD_dictUses_e;
+
+/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
+typedef struct {
+ const ZSTD_DDict** ddictPtrTable;
+ size_t ddictPtrTableSize;
+ size_t ddictPtrCount;
+} ZSTD_DDictHashSet;
+
+struct ZSTD_DCtx_s
+{
+ const ZSTD_seqSymbol* LLTptr;
+ const ZSTD_seqSymbol* MLTptr;
+ const ZSTD_seqSymbol* OFTptr;
+ const HUF_DTable* HUFptr;
+ ZSTD_entropyDTables_t entropy;
+ U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */
+ const void* previousDstEnd; /* detect continuity */
+ const void* prefixStart; /* start of current segment */
+ const void* virtualStart; /* virtual start of previous segment if it was just before current one */
+ const void* dictEnd; /* end of previous segment */
+ size_t expected;
+ ZSTD_frameHeader fParams;
+ U64 processedCSize;
+ U64 decodedSize;
+ blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
+ ZSTD_dStage stage;
+ U32 litEntropy;
+ U32 fseEntropy;
+ struct xxh64_state xxhState;
+ size_t headerSize;
+ ZSTD_format_e format;
+ ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
+ U32 validateChecksum; /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
+ const BYTE* litPtr;
+ ZSTD_customMem customMem;
+ size_t litSize;
+ size_t rleSize;
+ size_t staticSize;
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+
+ /* dictionary */
+ ZSTD_DDict* ddictLocal;
+ const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
+ U32 dictID;
+ int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
+ ZSTD_dictUses_e dictUses;
+ ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */
+ ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
+
+ /* streaming */
+ ZSTD_dStreamStage streamStage;
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inPos;
+ size_t maxWindowSize;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outStart;
+ size_t outEnd;
+ size_t lhSize;
+ void* legacyContext;
+ U32 previousLegacyVersion;
+ U32 legacyVersion;
+ U32 hostageByte;
+ int noForwardProgress;
+ ZSTD_bufferMode_e outBufferMode;
+ ZSTD_outBuffer expectedOutBuffer;
+
+ /* workspace */
+ BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
+ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
+
+ size_t oversizedDuration;
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ void const* dictContentBeginForFuzzing;
+ void const* dictContentEndForFuzzing;
+#endif
+
+ /* Tracing */
+}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
+
+
+/*-*******************************************************
+ * Shared internal functions
+ *********************************************************/
+
+/*! ZSTD_loadDEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */
+size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+ const void* const dict, size_t const dictSize);
+
+/*! ZSTD_checkContinuity() :
+ * check if next `dst` follows previous position, where decompression ended.
+ * If yes, do nothing (continue on current segment).
+ * If not, classify previous segment as "external dictionary", and start a new segment.
+ * This function cannot fail. */
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize);
+
+
+#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h
new file mode 100644
index 00000000000000..0fbec508f285e3
--- /dev/null
+++ b/lib/zstd/decompress_sources.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*
+ * This file includes every .c file needed for decompression.
+ * It is used by lib/decompress_unzstd.c to include the decompression
+ * source into the translation-unit, so it can be used for kernel
+ * decompression.
+ */
+
+#include "common/debug.c"
+#include "common/entropy_common.c"
+#include "common/error_private.c"
+#include "common/fse_decompress.c"
+#include "common/zstd_common.c"
+#include "decompress/huf_decompress.c"
+#include "decompress/zstd_ddict.c"
+#include "decompress/zstd_decompress.c"
+#include "decompress/zstd_decompress_block.c"
+#include "zstd_decompress_module.c"
diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c
deleted file mode 100644
index 2b0a643c32c4b2..00000000000000
--- a/lib/zstd/entropy_common.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Common functions of New Generation Entropy library
- * Copyright (C) 2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* *************************************
-* Dependencies
-***************************************/
-#include "error_private.h" /* ERR_*, ERROR */
-#include "fse.h"
-#include "huf.h"
-#include "mem.h"
-
-/*=== Version ===*/
-unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
-
-/*=== Error Management ===*/
-unsigned FSE_isError(size_t code) { return ERR_isError(code); }
-
-unsigned HUF_isError(size_t code) { return ERR_isError(code); }
-
-/*-**************************************************************
-* FSE NCount encoding-decoding
-****************************************************************/
-size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize)
-{
- const BYTE *const istart = (const BYTE *)headerBuffer;
- const BYTE *const iend = istart + hbSize;
- const BYTE *ip = istart;
- int nbBits;
- int remaining;
- int threshold;
- U32 bitStream;
- int bitCount;
- unsigned charnum = 0;
- int previous0 = 0;
-
- if (hbSize < 4)
- return ERROR(srcSize_wrong);
- bitStream = ZSTD_readLE32(ip);
- nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
- if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX)
- return ERROR(tableLog_tooLarge);
- bitStream >>= 4;
- bitCount = 4;
- *tableLogPtr = nbBits;
- remaining = (1 << nbBits) + 1;
- threshold = 1 << nbBits;
- nbBits++;
-
- while ((remaining > 1) & (charnum <= *maxSVPtr)) {
- if (previous0) {
- unsigned n0 = charnum;
- while ((bitStream & 0xFFFF) == 0xFFFF) {
- n0 += 24;
- if (ip < iend - 5) {
- ip += 2;
- bitStream = ZSTD_readLE32(ip) >> bitCount;
- } else {
- bitStream >>= 16;
- bitCount += 16;
- }
- }
- while ((bitStream & 3) == 3) {
- n0 += 3;
- bitStream >>= 2;
- bitCount += 2;
- }
- n0 += bitStream & 3;
- bitCount += 2;
- if (n0 > *maxSVPtr)
- return ERROR(maxSymbolValue_tooSmall);
- while (charnum < n0)
- normalizedCounter[charnum++] = 0;
- if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
- ip += bitCount >> 3;
- bitCount &= 7;
- bitStream = ZSTD_readLE32(ip) >> bitCount;
- } else {
- bitStream >>= 2;
- }
- }
- {
- int const max = (2 * threshold - 1) - remaining;
- int count;
-
- if ((bitStream & (threshold - 1)) < (U32)max) {
- count = bitStream & (threshold - 1);
- bitCount += nbBits - 1;
- } else {
- count = bitStream & (2 * threshold - 1);
- if (count >= threshold)
- count -= max;
- bitCount += nbBits;
- }
-
- count--; /* extra accuracy */
- remaining -= count < 0 ? -count : count; /* -1 means +1 */
- normalizedCounter[charnum++] = (short)count;
- previous0 = !count;
- while (remaining < threshold) {
- nbBits--;
- threshold >>= 1;
- }
-
- if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
- ip += bitCount >> 3;
- bitCount &= 7;
- } else {
- bitCount -= (int)(8 * (iend - 4 - ip));
- ip = iend - 4;
- }
- bitStream = ZSTD_readLE32(ip) >> (bitCount & 31);
- }
- } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
- if (remaining != 1)
- return ERROR(corruption_detected);
- if (bitCount > 32)
- return ERROR(corruption_detected);
- *maxSVPtr = charnum - 1;
-
- ip += (bitCount + 7) >> 3;
- return ip - istart;
-}
-
-/*! HUF_readStats() :
- Read compact Huffman tree, saved by HUF_writeCTable().
- `huffWeight` is destination buffer.
- `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
- @return : size read from `src` , or an error Code .
- Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
-*/
-size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
-{
- U32 weightTotal;
- const BYTE *ip = (const BYTE *)src;
- size_t iSize;
- size_t oSize;
-
- if (!srcSize)
- return ERROR(srcSize_wrong);
- iSize = ip[0];
- /* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */
-
- if (iSize >= 128) { /* special header */
- oSize = iSize - 127;
- iSize = ((oSize + 1) / 2);
- if (iSize + 1 > srcSize)
- return ERROR(srcSize_wrong);
- if (oSize >= hwSize)
- return ERROR(corruption_detected);
- ip += 1;
- {
- U32 n;
- for (n = 0; n < oSize; n += 2) {
- huffWeight[n] = ip[n / 2] >> 4;
- huffWeight[n + 1] = ip[n / 2] & 15;
- }
- }
- } else { /* header compressed with FSE (normal case) */
- if (iSize + 1 > srcSize)
- return ERROR(srcSize_wrong);
- oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */
- if (FSE_isError(oSize))
- return oSize;
- }
-
- /* collect weight stats */
- memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
- weightTotal = 0;
- {
- U32 n;
- for (n = 0; n < oSize; n++) {
- if (huffWeight[n] >= HUF_TABLELOG_MAX)
- return ERROR(corruption_detected);
- rankStats[huffWeight[n]]++;
- weightTotal += (1 << huffWeight[n]) >> 1;
- }
- }
- if (weightTotal == 0)
- return ERROR(corruption_detected);
-
- /* get last non-null symbol weight (implied, total must be 2^n) */
- {
- U32 const tableLog = BIT_highbit32(weightTotal) + 1;
- if (tableLog > HUF_TABLELOG_MAX)
- return ERROR(corruption_detected);
- *tableLogPtr = tableLog;
- /* determine last weight */
- {
- U32 const total = 1 << tableLog;
- U32 const rest = total - weightTotal;
- U32 const verif = 1 << BIT_highbit32(rest);
- U32 const lastWeight = BIT_highbit32(rest) + 1;
- if (verif != rest)
- return ERROR(corruption_detected); /* last value must be a clean power of 2 */
- huffWeight[oSize] = (BYTE)lastWeight;
- rankStats[lastWeight]++;
- }
- }
-
- /* check tree construction validity */
- if ((rankStats[1] < 2) || (rankStats[1] & 1))
- return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
-
- /* results */
- *nbSymbolsPtr = (U32)(oSize + 1);
- return iSize + 1;
-}
diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h
deleted file mode 100644
index 1a60b31f706cb6..00000000000000
--- a/lib/zstd/error_private.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/* Note : this module is expected to remain private, do not expose it */
-
-#ifndef ERROR_H_MODULE
-#define ERROR_H_MODULE
-
-/* ****************************************
-* Dependencies
-******************************************/
-#include <linux/types.h> /* size_t */
-#include <linux/zstd.h> /* enum list */
-
-/* ****************************************
-* Compiler-specific
-******************************************/
-#define ERR_STATIC static __attribute__((unused))
-
-/*-****************************************
-* Customization (error_public.h)
-******************************************/
-typedef ZSTD_ErrorCode ERR_enum;
-#define PREFIX(name) ZSTD_error_##name
-
-/*-****************************************
-* Error codes handling
-******************************************/
-#define ERROR(name) ((size_t)-PREFIX(name))
-
-ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
-
-ERR_STATIC ERR_enum ERR_getErrorCode(size_t code)
-{
- if (!ERR_isError(code))
- return (ERR_enum)0;
- return (ERR_enum)(0 - code);
-}
-
-#endif /* ERROR_H_MODULE */
diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h
deleted file mode 100644
index 7460ab04b1916d..00000000000000
--- a/lib/zstd/fse.h
+++ /dev/null
@@ -1,575 +0,0 @@
-/*
- * FSE : Finite State Entropy codec
- * Public Prototypes declaration
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-#ifndef FSE_H
-#define FSE_H
-
-/*-*****************************************
-* Dependencies
-******************************************/
-#include <linux/types.h> /* size_t, ptrdiff_t */
-
-/*-*****************************************
-* FSE_PUBLIC_API : control library symbols visibility
-******************************************/
-#define FSE_PUBLIC_API
-
-/*------ Version ------*/
-#define FSE_VERSION_MAJOR 0
-#define FSE_VERSION_MINOR 9
-#define FSE_VERSION_RELEASE 0
-
-#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
-#define FSE_QUOTE(str) #str
-#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
-#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
-
-#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE)
-FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
-
-/*-*****************************************
-* Tool functions
-******************************************/
-FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
-
-/* Error Management */
-FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
-
-/*-*****************************************
-* FSE detailed API
-******************************************/
-/*!
-FSE_compress() does the following:
-1. count symbol occurrence from source[] into table count[]
-2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
-3. save normalized counters to memory buffer using writeNCount()
-4. build encoding table 'CTable' from normalized counters
-5. encode the data stream using encoding table 'CTable'
-
-FSE_decompress() does the following:
-1. read normalized counters with readNCount()
-2. build decoding table 'DTable' from normalized counters
-3. decode the data stream using decoding table 'DTable'
-
-The following API allows targeting specific sub-functions for advanced tasks.
-For example, it's possible to compress several blocks using the same 'CTable',
-or to save and provide normalized distribution using external method.
-*/
-
-/* *** COMPRESSION *** */
-/*! FSE_optimalTableLog():
- dynamically downsize 'tableLog' when conditions are met.
- It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
- @return : recommended tableLog (necessarily <= 'maxTableLog') */
-FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-
-/*! FSE_normalizeCount():
- normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
- 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
- @return : tableLog,
- or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue);
-
-/*! FSE_NCountWriteBound():
- Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
- Typically useful for allocation purpose. */
-FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
-
-/*! FSE_writeNCount():
- Compactly save 'normalizedCounter' into 'buffer'.
- @return : size of the compressed table,
- or an errorCode, which can be tested using FSE_isError(). */
-FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*! Constructor and Destructor of FSE_CTable.
- Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
-typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
-
-/*! FSE_compress_usingCTable():
- Compress `src` using `ct` into `dst` which must be already allocated.
- @return : size of compressed data (<= `dstCapacity`),
- or 0 if compressed data could not fit into `dst`,
- or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct);
-
-/*!
-Tutorial :
-----------
-The first step is to count all symbols. FSE_count() does this job very fast.
-Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
-'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
-maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
-FSE_count() will return the number of occurrence of the most frequent symbol.
-This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
-If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
-
-The next step is to normalize the frequencies.
-FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
-It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
-You can use 'tableLog'==0 to mean "use default tableLog value".
-If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
-which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
-
-The result of FSE_normalizeCount() will be saved into a table,
-called 'normalizedCounter', which is a table of signed short.
-'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
-The return value is tableLog if everything proceeded as expected.
-It is 0 if there is a single symbol within distribution.
-If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
-
-'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
-'buffer' must be already allocated.
-For guaranteed success, buffer size must be at least FSE_headerBound().
-The result of the function is the number of bytes written into 'buffer'.
-If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
-
-'normalizedCounter' can then be used to create the compression table 'CTable'.
-The space required by 'CTable' must be already allocated, using FSE_createCTable().
-You can then use FSE_buildCTable() to fill 'CTable'.
-If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
-
-'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
-Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
-The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
-If it returns '0', compressed data could not fit into 'dst'.
-If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
-*/
-
-/* *** DECOMPRESSION *** */
-
-/*! FSE_readNCount():
- Read compactly saved 'normalizedCounter' from 'rBuffer'.
- @return : size read from 'rBuffer',
- or an errorCode, which can be tested using FSE_isError().
- maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
-FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize);
-
-/*! Constructor and Destructor of FSE_DTable.
- Note that its size depends on 'tableLog' */
-typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
-
-/*! FSE_buildDTable():
- Builds 'dt', which must be already allocated, using FSE_createDTable().
- return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize);
-
-/*! FSE_decompress_usingDTable():
- Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
- into `dst` which must be already allocated.
- @return : size of regenerated data (necessarily <= `dstCapacity`),
- or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt);
-
-/*!
-Tutorial :
-----------
-(Note : these functions only decompress FSE-compressed blocks.
- If block is uncompressed, use memcpy() instead
- If block is a single repeated byte, use memset() instead )
-
-The first step is to obtain the normalized frequencies of symbols.
-This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
-'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
-In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
-or size the table to handle worst case situations (typically 256).
-FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
-The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
-Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
-If there is an error, the function will return an error code, which can be tested using FSE_isError().
-
-The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
-This is performed by the function FSE_buildDTable().
-The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
-If there is an error, the function will return an error code, which can be tested using FSE_isError().
-
-`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
-`cSrcSize` must be strictly correct, otherwise decompression will fail.
-FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
-If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
-*/
-
-/* *** Dependency *** */
-#include "bitstream.h"
-
-/* *****************************************
-* Static allocation
-*******************************************/
-/* FSE buffer bounds */
-#define FSE_NCOUNTBOUND 512
-#define FSE_BLOCKBOUND(size) (size + (size >> 7))
-#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
-
-/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
-#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2))
-#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog))
-
-/* *****************************************
-* FSE advanced API
-*******************************************/
-/* FSE_count_wksp() :
- * Same as FSE_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned
- */
-size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace);
-
-/* FSE_countFast_wksp() :
- * Same as FSE_countFast(), but using an externally provided scratch buffer.
- * `workSpace` must be a table of minimum `1024` unsigned
- */
-size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace);
-
-/*! FSE_count_simple
- * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
- * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
-*/
-size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize);
-
-unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
-/**< same as FSE_optimalTableLog(), which used `minus==2` */
-
-size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits);
-/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
-
-size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue);
-/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
-
-/* FSE_buildCTable_wksp() :
- * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
- * `wkspSize` must be >= `(1<<tableLog)`.
- */
-size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize);
-
-size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits);
-/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
-
-size_t FSE_buildDTable_rle(FSE_DTable *dt, unsigned char symbolValue);
-/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
-
-size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize);
-/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
-
-/* *****************************************
-* FSE symbol compression API
-*******************************************/
-/*!
- This API consists of small unitary functions, which highly benefit from being inlined.
- Hence their body are included in next section.
-*/
-typedef struct {
- ptrdiff_t value;
- const void *stateTable;
- const void *symbolTT;
- unsigned stateLog;
-} FSE_CState_t;
-
-static void FSE_initCState(FSE_CState_t *CStatePtr, const FSE_CTable *ct);
-
-static void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *CStatePtr, unsigned symbol);
-
-static void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *CStatePtr);
-
-/**<
-These functions are inner components of FSE_compress_usingCTable().
-They allow the creation of custom streams, mixing multiple tables and bit sources.
-
-A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
-So the first symbol you will encode is the last you will decode, like a LIFO stack.
-
-You will need a few variables to track your CStream. They are :
-
-FSE_CTable ct; // Provided by FSE_buildCTable()
-BIT_CStream_t bitStream; // bitStream tracking structure
-FSE_CState_t state; // State tracking structure (can have several)
-
-
-The first thing to do is to init bitStream and state.
- size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
- FSE_initCState(&state, ct);
-
-Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
-You can then encode your input data, byte after byte.
-FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
-Remember decoding will be done in reverse direction.
- FSE_encodeByte(&bitStream, &state, symbol);
-
-At any time, you can also add any bit sequence.
-Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
- BIT_addBits(&bitStream, bitField, nbBits);
-
-The above methods don't commit data to memory, they just store it into local register, for speed.
-Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
-Writing data to memory is a manual operation, performed by the flushBits function.
- BIT_flushBits(&bitStream);
-
-Your last FSE encoding operation shall be to flush your last state value(s).
- FSE_flushState(&bitStream, &state);
-
-Finally, you must close the bitStream.
-The function returns the size of CStream in bytes.
-If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
-If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
- size_t size = BIT_closeCStream(&bitStream);
-*/
-
-/* *****************************************
-* FSE symbol decompression API
-*******************************************/
-typedef struct {
- size_t state;
- const void *table; /* precise table may vary, depending on U16 */
-} FSE_DState_t;
-
-static void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt);
-
-static unsigned char FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
-
-static unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr);
-
-/**<
-Let's now decompose FSE_decompress_usingDTable() into its unitary components.
-You will decode FSE-encoded symbols from the bitStream,
-and also any other bitFields you put in, **in reverse order**.
-
-You will need a few variables to track your bitStream. They are :
-
-BIT_DStream_t DStream; // Stream context
-FSE_DState_t DState; // State context. Multiple ones are possible
-FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
-
-The first thing to do is to init the bitStream.
- errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
-
-You should then retrieve your initial state(s)
-(in reverse flushing order if you have several ones) :
- errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
-
-You can then decode your data, symbol after symbol.
-For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
-Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
- unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
-
-You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
-Note : maximum allowed nbBits is 25, for 32-bits compatibility
- size_t bitField = BIT_readBits(&DStream, nbBits);
-
-All above operations only read from local register (which size depends on size_t).
-Refueling the register from memory is manually performed by the reload method.
- endSignal = FSE_reloadDStream(&DStream);
-
-BIT_reloadDStream() result tells if there is still some more data to read from DStream.
-BIT_DStream_unfinished : there is still some data left into the DStream.
-BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
-BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
-BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
-
-When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
-to properly detect the exact end of stream.
-After each decoded symbol, check if DStream is fully consumed using this simple test :
- BIT_reloadDStream(&DStream) >= BIT_DStream_completed
-
-When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
-Checking if DStream has reached its end is performed by :
- BIT_endOfDStream(&DStream);
-Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
- FSE_endOfDState(&DState);
-*/
-
-/* *****************************************
-* FSE unsafe API
-*******************************************/
-static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
-/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
-
-/* *****************************************
-* Implementation of inlined functions
-*******************************************/
-typedef struct {
- int deltaFindState;
- U32 deltaNbBits;
-} FSE_symbolCompressionTransform; /* total 8 bytes */
-
-ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct)
-{
- const void *ptr = ct;
- const U16 *u16ptr = (const U16 *)ptr;
- const U32 tableLog = ZSTD_read16(ptr);
- statePtr->value = (ptrdiff_t)1 << tableLog;
- statePtr->stateTable = u16ptr + 2;
- statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1));
- statePtr->stateLog = tableLog;
-}
-
-/*! FSE_initCState2() :
-* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
-* uses the smallest state value possible, saving the cost of this symbol */
-ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol)
-{
- FSE_initCState(statePtr, ct);
- {
- const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
- const U16 *stateTable = (const U16 *)(statePtr->stateTable);
- U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16);
- statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
- statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
- }
-}
-
-ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol)
-{
- const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
- const U16 *const stateTable = (const U16 *)(statePtr->stateTable);
- U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
- BIT_addBits(bitC, statePtr->value, nbBitsOut);
- statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
-}
-
-ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr)
-{
- BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
- BIT_flushBits(bitC);
-}
-
-/* ====== Decompression ====== */
-
-typedef struct {
- U16 tableLog;
- U16 fastMode;
-} FSE_DTableHeader; /* sizeof U32 */
-
-typedef struct {
- unsigned short newState;
- unsigned char symbol;
- unsigned char nbBits;
-} FSE_decode_t; /* size == U32 */
-
-ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt)
-{
- const void *ptr = dt;
- const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr;
- DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
- BIT_reloadDStream(bitD);
- DStatePtr->table = dt + 1;
-}
-
-ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr)
-{
- FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
- return DInfo.symbol;
-}
-
-ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
-{
- FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
- U32 const nbBits = DInfo.nbBits;
- size_t const lowBits = BIT_readBits(bitD, nbBits);
- DStatePtr->state = DInfo.newState + lowBits;
-}
-
-ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
-{
- FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
- U32 const nbBits = DInfo.nbBits;
- BYTE const symbol = DInfo.symbol;
- size_t const lowBits = BIT_readBits(bitD, nbBits);
-
- DStatePtr->state = DInfo.newState + lowBits;
- return symbol;
-}
-
-/*! FSE_decodeSymbolFast() :
- unsafe, only works if no symbol has a probability > 50% */
-ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
-{
- FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
- U32 const nbBits = DInfo.nbBits;
- BYTE const symbol = DInfo.symbol;
- size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
-
- DStatePtr->state = DInfo.newState + lowBits;
- return symbol;
-}
-
-ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; }
-
-/* **************************************************************
-* Tuning parameters
-****************************************************************/
-/*!MEMORY_USAGE :
-* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-* Increasing memory usage improves compression ratio
-* Reduced memory usage can improve speed, due to cache effect
-* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#ifndef FSE_MAX_MEMORY_USAGE
-#define FSE_MAX_MEMORY_USAGE 14
-#endif
-#ifndef FSE_DEFAULT_MEMORY_USAGE
-#define FSE_DEFAULT_MEMORY_USAGE 13
-#endif
-
-/*!FSE_MAX_SYMBOL_VALUE :
-* Maximum symbol value authorized.
-* Required for proper stack allocation */
-#ifndef FSE_MAX_SYMBOL_VALUE
-#define FSE_MAX_SYMBOL_VALUE 255
-#endif
-
-/* **************************************************************
-* template functions type & suffix
-****************************************************************/
-#define FSE_FUNCTION_TYPE BYTE
-#define FSE_FUNCTION_EXTENSION
-#define FSE_DECODE_TYPE FSE_decode_t
-
-/* ***************************************************************
-* Constants
-*****************************************************************/
-#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2)
-#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG)
-#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1)
-#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2)
-#define FSE_MIN_TABLELOG 5
-
-#define FSE_TABLELOG_ABSOLUTE_MAX 15
-#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
-#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3)
-
-#endif /* FSE_H */
diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c
deleted file mode 100644
index ef3d1741d53282..00000000000000
--- a/lib/zstd/fse_compress.c
+++ /dev/null
@@ -1,795 +0,0 @@
-/*
- * FSE : Finite State Entropy encoder
- * Copyright (C) 2013-2015, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* **************************************************************
-* Compiler specifics
-****************************************************************/
-#define FORCE_INLINE static __always_inline
-
-/* **************************************************************
-* Includes
-****************************************************************/
-#include "bitstream.h"
-#include "fse.h"
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/math64.h>
-#include <linux/string.h> /* memcpy, memset */
-
-/* **************************************************************
-* Error Management
-****************************************************************/
-#define FSE_STATIC_ASSERT(c) \
- { \
- enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
- } /* use only *after* variable declarations */
-
-/* **************************************************************
-* Templates
-****************************************************************/
-/*
- designed to be included
- for type-specific functions (template emulation in C)
- Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X, Y) X##Y
-#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
-#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
-
-/* Function templates */
-
-/* FSE_buildCTable_wksp() :
- * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
- * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
- * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
- */
-size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
-{
- U32 const tableSize = 1 << tableLog;
- U32 const tableMask = tableSize - 1;
- void *const ptr = ct;
- U16 *const tableU16 = ((U16 *)ptr) + 2;
- void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableLog ? tableSize >> 1 : 1);
- FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
- U32 const step = FSE_TABLESTEP(tableSize);
- U32 highThreshold = tableSize - 1;
-
- U32 *cumul;
- FSE_FUNCTION_TYPE *tableSymbol;
- size_t spaceUsed32 = 0;
-
- cumul = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2;
- tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* CTable header */
- tableU16[-2] = (U16)tableLog;
- tableU16[-1] = (U16)maxSymbolValue;
-
- /* For explanations on how to distribute symbol values over the table :
- * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
-
- /* symbol start positions */
- {
- U32 u;
- cumul[0] = 0;
- for (u = 1; u <= maxSymbolValue + 1; u++) {
- if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */
- cumul[u] = cumul[u - 1] + 1;
- tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1);
- } else {
- cumul[u] = cumul[u - 1] + normalizedCounter[u - 1];
- }
- }
- cumul[maxSymbolValue + 1] = tableSize + 1;
- }
-
- /* Spread symbols */
- {
- U32 position = 0;
- U32 symbol;
- for (symbol = 0; symbol <= maxSymbolValue; symbol++) {
- int nbOccurences;
- for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) {
- tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
- position = (position + step) & tableMask;
- while (position > highThreshold)
- position = (position + step) & tableMask; /* Low proba area */
- }
- }
-
- if (position != 0)
- return ERROR(GENERIC); /* Must have gone through all positions */
- }
-
- /* Build table */
- {
- U32 u;
- for (u = 0; u < tableSize; u++) {
- FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
- tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */
- }
- }
-
- /* Build Symbol Transformation Table */
- {
- unsigned total = 0;
- unsigned s;
- for (s = 0; s <= maxSymbolValue; s++) {
- switch (normalizedCounter[s]) {
- case 0: break;
-
- case -1:
- case 1:
- symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog);
- symbolTT[s].deltaFindState = total - 1;
- total++;
- break;
- default: {
- U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1);
- U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
- symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
- symbolTT[s].deltaFindState = total - normalizedCounter[s];
- total += normalizedCounter[s];
- }
- }
- }
- }
-
- return 0;
-}
-
-/*-**************************************************************
-* FSE NCount encoding-decoding
-****************************************************************/
-size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
-{
- size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3;
- return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
-}
-
-static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
- unsigned writeIsSafe)
-{
- BYTE *const ostart = (BYTE *)header;
- BYTE *out = ostart;
- BYTE *const oend = ostart + headerBufferSize;
- int nbBits;
- const int tableSize = 1 << tableLog;
- int remaining;
- int threshold;
- U32 bitStream;
- int bitCount;
- unsigned charnum = 0;
- int previous0 = 0;
-
- bitStream = 0;
- bitCount = 0;
- /* Table Size */
- bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount;
- bitCount += 4;
-
- /* Init */
- remaining = tableSize + 1; /* +1 for extra accuracy */
- threshold = tableSize;
- nbBits = tableLog + 1;
-
- while (remaining > 1) { /* stops at 1 */
- if (previous0) {
- unsigned start = charnum;
- while (!normalizedCounter[charnum])
- charnum++;
- while (charnum >= start + 24) {
- start += 24;
- bitStream += 0xFFFFU << bitCount;
- if ((!writeIsSafe) && (out > oend - 2))
- return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream >> 8);
- out += 2;
- bitStream >>= 16;
- }
- while (charnum >= start + 3) {
- start += 3;
- bitStream += 3 << bitCount;
- bitCount += 2;
- }
- bitStream += (charnum - start) << bitCount;
- bitCount += 2;
- if (bitCount > 16) {
- if ((!writeIsSafe) && (out > oend - 2))
- return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream >> 8);
- out += 2;
- bitStream >>= 16;
- bitCount -= 16;
- }
- }
- {
- int count = normalizedCounter[charnum++];
- int const max = (2 * threshold - 1) - remaining;
- remaining -= count < 0 ? -count : count;
- count++; /* +1 for extra accuracy */
- if (count >= threshold)
- count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
- bitStream += count << bitCount;
- bitCount += nbBits;
- bitCount -= (count < max);
- previous0 = (count == 1);
- if (remaining < 1)
- return ERROR(GENERIC);
- while (remaining < threshold)
- nbBits--, threshold >>= 1;
- }
- if (bitCount > 16) {
- if ((!writeIsSafe) && (out > oend - 2))
- return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream >> 8);
- out += 2;
- bitStream >>= 16;
- bitCount -= 16;
- }
- }
-
- /* flush remaining bitStream */
- if ((!writeIsSafe) && (out > oend - 2))
- return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream >> 8);
- out += (bitCount + 7) / 8;
-
- if (charnum > maxSymbolValue + 1)
- return ERROR(GENERIC);
-
- return (out - ostart);
-}
-
-size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
- if (tableLog > FSE_MAX_TABLELOG)
- return ERROR(tableLog_tooLarge); /* Unsupported */
- if (tableLog < FSE_MIN_TABLELOG)
- return ERROR(GENERIC); /* Unsupported */
-
- if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
- return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
-
- return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
-}
-
-/*-**************************************************************
-* Counting histogram
-****************************************************************/
-/*! FSE_count_simple
- This function counts byte values within `src`, and store the histogram into table `count`.
- It doesn't use any additional memory.
- But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
- For this reason, prefer using a table `count` with 256 elements.
- @return : count of most numerous element
-*/
-size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
-{
- const BYTE *ip = (const BYTE *)src;
- const BYTE *const end = ip + srcSize;
- unsigned maxSymbolValue = *maxSymbolValuePtr;
- unsigned max = 0;
-
- memset(count, 0, (maxSymbolValue + 1) * sizeof(*count));
- if (srcSize == 0) {
- *maxSymbolValuePtr = 0;
- return 0;
- }
-
- while (ip < end)
- count[*ip++]++;
-
- while (!count[maxSymbolValue])
- maxSymbolValue--;
- *maxSymbolValuePtr = maxSymbolValue;
-
- {
- U32 s;
- for (s = 0; s <= maxSymbolValue; s++)
- if (count[s] > max)
- max = count[s];
- }
-
- return (size_t)max;
-}
-
-/* FSE_count_parallel_wksp() :
- * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
- * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
-static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax,
- unsigned *const workSpace)
-{
- const BYTE *ip = (const BYTE *)source;
- const BYTE *const iend = ip + sourceSize;
- unsigned maxSymbolValue = *maxSymbolValuePtr;
- unsigned max = 0;
- U32 *const Counting1 = workSpace;
- U32 *const Counting2 = Counting1 + 256;
- U32 *const Counting3 = Counting2 + 256;
- U32 *const Counting4 = Counting3 + 256;
-
- memset(Counting1, 0, 4 * 256 * sizeof(unsigned));
-
- /* safety checks */
- if (!sourceSize) {
- memset(count, 0, maxSymbolValue + 1);
- *maxSymbolValuePtr = 0;
- return 0;
- }
- if (!maxSymbolValue)
- maxSymbolValue = 255; /* 0 == default */
-
- /* by stripes of 16 bytes */
- {
- U32 cached = ZSTD_read32(ip);
- ip += 4;
- while (ip < iend - 15) {
- U32 c = cached;
- cached = ZSTD_read32(ip);
- ip += 4;
- Counting1[(BYTE)c]++;
- Counting2[(BYTE)(c >> 8)]++;
- Counting3[(BYTE)(c >> 16)]++;
- Counting4[c >> 24]++;
- c = cached;
- cached = ZSTD_read32(ip);
- ip += 4;
- Counting1[(BYTE)c]++;
- Counting2[(BYTE)(c >> 8)]++;
- Counting3[(BYTE)(c >> 16)]++;
- Counting4[c >> 24]++;
- c = cached;
- cached = ZSTD_read32(ip);
- ip += 4;
- Counting1[(BYTE)c]++;
- Counting2[(BYTE)(c >> 8)]++;
- Counting3[(BYTE)(c >> 16)]++;
- Counting4[c >> 24]++;
- c = cached;
- cached = ZSTD_read32(ip);
- ip += 4;
- Counting1[(BYTE)c]++;
- Counting2[(BYTE)(c >> 8)]++;
- Counting3[(BYTE)(c >> 16)]++;
- Counting4[c >> 24]++;
- }
- ip -= 4;
- }
-
- /* finish last symbols */
- while (ip < iend)
- Counting1[*ip++]++;
-
- if (checkMax) { /* verify stats will fit into destination table */
- U32 s;
- for (s = 255; s > maxSymbolValue; s--) {
- Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
- if (Counting1[s])
- return ERROR(maxSymbolValue_tooSmall);
- }
- }
-
- {
- U32 s;
- for (s = 0; s <= maxSymbolValue; s++) {
- count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
- if (count[s] > max)
- max = count[s];
- }
- }
-
- while (!count[maxSymbolValue])
- maxSymbolValue--;
- *maxSymbolValuePtr = maxSymbolValue;
- return (size_t)max;
-}
-
-/* FSE_countFast_wksp() :
- * Same as FSE_countFast(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
-{
- if (sourceSize < 1500)
- return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
- return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
-}
-
-/* FSE_count_wksp() :
- * Same as FSE_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
-{
- if (*maxSymbolValuePtr < 255)
- return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
- *maxSymbolValuePtr = 255;
- return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
-}
-
-/*-**************************************************************
-* FSE Compression Code
-****************************************************************/
-/*! FSE_sizeof_CTable() :
- FSE_CTable is a variable size structure which contains :
- `U16 tableLog;`
- `U16 maxSymbolValue;`
- `U16 nextStateNumber[1 << tableLog];` // This size is variable
- `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable
-Allocation is manual (C standard does not support variable-size structures).
-*/
-size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog)
-{
- if (tableLog > FSE_MAX_TABLELOG)
- return ERROR(tableLog_tooLarge);
- return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32);
-}
-
-/* provides the minimum logSize to safely represent a distribution */
-static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
-{
- U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
- U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
- U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
- return minBits;
-}
-
-unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
-{
- U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
- U32 tableLog = maxTableLog;
- U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
- if (tableLog == 0)
- tableLog = FSE_DEFAULT_TABLELOG;
- if (maxBitsSrc < tableLog)
- tableLog = maxBitsSrc; /* Accuracy can be reduced */
- if (minBits > tableLog)
- tableLog = minBits; /* Need a minimum to safely represent all symbol values */
- if (tableLog < FSE_MIN_TABLELOG)
- tableLog = FSE_MIN_TABLELOG;
- if (tableLog > FSE_MAX_TABLELOG)
- tableLog = FSE_MAX_TABLELOG;
- return tableLog;
-}
-
-unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
-{
- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
-}
-
-/* Secondary normalization method.
- To be used when primary method fails. */
-
-static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue)
-{
- short const NOT_YET_ASSIGNED = -2;
- U32 s;
- U32 distributed = 0;
- U32 ToDistribute;
-
- /* Init */
- U32 const lowThreshold = (U32)(total >> tableLog);
- U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
-
- for (s = 0; s <= maxSymbolValue; s++) {
- if (count[s] == 0) {
- norm[s] = 0;
- continue;
- }
- if (count[s] <= lowThreshold) {
- norm[s] = -1;
- distributed++;
- total -= count[s];
- continue;
- }
- if (count[s] <= lowOne) {
- norm[s] = 1;
- distributed++;
- total -= count[s];
- continue;
- }
-
- norm[s] = NOT_YET_ASSIGNED;
- }
- ToDistribute = (1 << tableLog) - distributed;
-
- if ((total / ToDistribute) > lowOne) {
- /* risk of rounding to zero */
- lowOne = (U32)((total * 3) / (ToDistribute * 2));
- for (s = 0; s <= maxSymbolValue; s++) {
- if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
- norm[s] = 1;
- distributed++;
- total -= count[s];
- continue;
- }
- }
- ToDistribute = (1 << tableLog) - distributed;
- }
-
- if (distributed == maxSymbolValue + 1) {
- /* all values are pretty poor;
- probably incompressible data (should have already been detected);
- find max, then give all remaining points to max */
- U32 maxV = 0, maxC = 0;
- for (s = 0; s <= maxSymbolValue; s++)
- if (count[s] > maxC)
- maxV = s, maxC = count[s];
- norm[maxV] += (short)ToDistribute;
- return 0;
- }
-
- if (total == 0) {
- /* all of the symbols were low enough for the lowOne or lowThreshold */
- for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
- if (norm[s] > 0)
- ToDistribute--, norm[s]++;
- return 0;
- }
-
- {
- U64 const vStepLog = 62 - tableLog;
- U64 const mid = (1ULL << (vStepLog - 1)) - 1;
- U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
- U64 tmpTotal = mid;
- for (s = 0; s <= maxSymbolValue; s++) {
- if (norm[s] == NOT_YET_ASSIGNED) {
- U64 const end = tmpTotal + (count[s] * rStep);
- U32 const sStart = (U32)(tmpTotal >> vStepLog);
- U32 const sEnd = (U32)(end >> vStepLog);
- U32 const weight = sEnd - sStart;
- if (weight < 1)
- return ERROR(GENERIC);
- norm[s] = (short)weight;
- tmpTotal = end;
- }
- }
- }
-
- return 0;
-}
-
-size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue)
-{
- /* Sanity checks */
- if (tableLog == 0)
- tableLog = FSE_DEFAULT_TABLELOG;
- if (tableLog < FSE_MIN_TABLELOG)
- return ERROR(GENERIC); /* Unsupported size */
- if (tableLog > FSE_MAX_TABLELOG)
- return ERROR(tableLog_tooLarge); /* Unsupported size */
- if (tableLog < FSE_minTableLog(total, maxSymbolValue))
- return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
-
- {
- U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000};
- U64 const scale = 62 - tableLog;
- U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */
- U64 const vStep = 1ULL << (scale - 20);
- int stillToDistribute = 1 << tableLog;
- unsigned s;
- unsigned largest = 0;
- short largestP = 0;
- U32 lowThreshold = (U32)(total >> tableLog);
-
- for (s = 0; s <= maxSymbolValue; s++) {
- if (count[s] == total)
- return 0; /* rle special case */
- if (count[s] == 0) {
- normalizedCounter[s] = 0;
- continue;
- }
- if (count[s] <= lowThreshold) {
- normalizedCounter[s] = -1;
- stillToDistribute--;
- } else {
- short proba = (short)((count[s] * step) >> scale);
- if (proba < 8) {
- U64 restToBeat = vStep * rtbTable[proba];
- proba += (count[s] * step) - ((U64)proba << scale) > restToBeat;
- }
- if (proba > largestP)
- largestP = proba, largest = s;
- normalizedCounter[s] = proba;
- stillToDistribute -= proba;
- }
- }
- if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
- /* corner case, need another normalization method */
- size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
- if (FSE_isError(errorCode))
- return errorCode;
- } else
- normalizedCounter[largest] += (short)stillToDistribute;
- }
-
- return tableLog;
-}
-
-/* fake FSE_CTable, for raw (uncompressed) input */
-size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits)
-{
- const unsigned tableSize = 1 << nbBits;
- const unsigned tableMask = tableSize - 1;
- const unsigned maxSymbolValue = tableMask;
- void *const ptr = ct;
- U16 *const tableU16 = ((U16 *)ptr) + 2;
- void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */
- FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
- unsigned s;
-
- /* Sanity checks */
- if (nbBits < 1)
- return ERROR(GENERIC); /* min size */
-
- /* header */
- tableU16[-2] = (U16)nbBits;
- tableU16[-1] = (U16)maxSymbolValue;
-
- /* Build table */
- for (s = 0; s < tableSize; s++)
- tableU16[s] = (U16)(tableSize + s);
-
- /* Build Symbol Transformation Table */
- {
- const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
- for (s = 0; s <= maxSymbolValue; s++) {
- symbolTT[s].deltaNbBits = deltaNbBits;
- symbolTT[s].deltaFindState = s - 1;
- }
- }
-
- return 0;
-}
-
-/* fake FSE_CTable, for rle input (always same symbol) */
-size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue)
-{
- void *ptr = ct;
- U16 *tableU16 = ((U16 *)ptr) + 2;
- void *FSCTptr = (U32 *)ptr + 2;
- FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr;
-
- /* header */
- tableU16[-2] = (U16)0;
- tableU16[-1] = (U16)symbolValue;
-
- /* Build table */
- tableU16[0] = 0;
- tableU16[1] = 0; /* just in case */
-
- /* Build Symbol Transformation Table */
- symbolTT[symbolValue].deltaNbBits = 0;
- symbolTT[symbolValue].deltaFindState = 0;
-
- return 0;
-}
-
-static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast)
-{
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *const iend = istart + srcSize;
- const BYTE *ip = iend;
-
- BIT_CStream_t bitC;
- FSE_CState_t CState1, CState2;
-
- /* init */
- if (srcSize <= 2)
- return 0;
- {
- size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
- if (FSE_isError(initError))
- return 0; /* not enough space available to write a bitstream */
- }
-
-#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
-
- if (srcSize & 1) {
- FSE_initCState2(&CState1, ct, *--ip);
- FSE_initCState2(&CState2, ct, *--ip);
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
- FSE_FLUSHBITS(&bitC);
- } else {
- FSE_initCState2(&CState2, ct, *--ip);
- FSE_initCState2(&CState1, ct, *--ip);
- }
-
- /* join to mod 4 */
- srcSize -= 2;
- if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */
- FSE_encodeSymbol(&bitC, &CState2, *--ip);
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
- FSE_FLUSHBITS(&bitC);
- }
-
- /* 2 or 4 encoding per loop */
- while (ip > istart) {
-
- FSE_encodeSymbol(&bitC, &CState2, *--ip);
-
- if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */
- FSE_FLUSHBITS(&bitC);
-
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
-
- if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */
- FSE_encodeSymbol(&bitC, &CState2, *--ip);
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
- }
-
- FSE_FLUSHBITS(&bitC);
- }
-
- FSE_flushCState(&bitC, &CState2);
- FSE_flushCState(&bitC, &CState1);
- return BIT_closeCStream(&bitC);
-}
-
-size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct)
-{
- unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
-
- if (fast)
- return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
- else
- return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
-}
-
-size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c
deleted file mode 100644
index 0b353530fb3f30..00000000000000
--- a/lib/zstd/fse_decompress.c
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * FSE : Finite State Entropy decoder
- * Copyright (C) 2013-2015, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* **************************************************************
-* Compiler specifics
-****************************************************************/
-#define FORCE_INLINE static __always_inline
-
-/* **************************************************************
-* Includes
-****************************************************************/
-#include "bitstream.h"
-#include "fse.h"
-#include "zstd_internal.h"
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/string.h> /* memcpy, memset */
-
-/* **************************************************************
-* Error Management
-****************************************************************/
-#define FSE_isError ERR_isError
-#define FSE_STATIC_ASSERT(c) \
- { \
- enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
- } /* use only *after* variable declarations */
-
-/* **************************************************************
-* Templates
-****************************************************************/
-/*
- designed to be included
- for type-specific functions (template emulation in C)
- Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X, Y) X##Y
-#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
-#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
-
-/* Function templates */
-
-size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
-{
- void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
- FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr);
- U16 *symbolNext = (U16 *)workspace;
-
- U32 const maxSV1 = maxSymbolValue + 1;
- U32 const tableSize = 1 << tableLog;
- U32 highThreshold = tableSize - 1;
-
- /* Sanity Checks */
- if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1))
- return ERROR(tableLog_tooLarge);
- if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE)
- return ERROR(maxSymbolValue_tooLarge);
- if (tableLog > FSE_MAX_TABLELOG)
- return ERROR(tableLog_tooLarge);
-
- /* Init, lay down lowprob symbols */
- {
- FSE_DTableHeader DTableH;
- DTableH.tableLog = (U16)tableLog;
- DTableH.fastMode = 1;
- {
- S16 const largeLimit = (S16)(1 << (tableLog - 1));
- U32 s;
- for (s = 0; s < maxSV1; s++) {
- if (normalizedCounter[s] == -1) {
- tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
- symbolNext[s] = 1;
- } else {
- if (normalizedCounter[s] >= largeLimit)
- DTableH.fastMode = 0;
- symbolNext[s] = normalizedCounter[s];
- }
- }
- }
- memcpy(dt, &DTableH, sizeof(DTableH));
- }
-
- /* Spread symbols */
- {
- U32 const tableMask = tableSize - 1;
- U32 const step = FSE_TABLESTEP(tableSize);
- U32 s, position = 0;
- for (s = 0; s < maxSV1; s++) {
- int i;
- for (i = 0; i < normalizedCounter[s]; i++) {
- tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
- position = (position + step) & tableMask;
- while (position > highThreshold)
- position = (position + step) & tableMask; /* lowprob area */
- }
- }
- if (position != 0)
- return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
- }
-
- /* Build Decoding table */
- {
- U32 u;
- for (u = 0; u < tableSize; u++) {
- FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
- U16 nextState = symbolNext[symbol]++;
- tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState));
- tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize);
- }
- }
-
- return 0;
-}
-
-/*-*******************************************************
-* Decompression (Byte symbols)
-*********************************************************/
-size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue)
-{
- void *ptr = dt;
- FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
- void *dPtr = dt + 1;
- FSE_decode_t *const cell = (FSE_decode_t *)dPtr;
-
- DTableH->tableLog = 0;
- DTableH->fastMode = 0;
-
- cell->newState = 0;
- cell->symbol = symbolValue;
- cell->nbBits = 0;
-
- return 0;
-}
-
-size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits)
-{
- void *ptr = dt;
- FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
- void *dPtr = dt + 1;
- FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr;
- const unsigned tableSize = 1 << nbBits;
- const unsigned tableMask = tableSize - 1;
- const unsigned maxSV1 = tableMask + 1;
- unsigned s;
-
- /* Sanity checks */
- if (nbBits < 1)
- return ERROR(GENERIC); /* min size */
-
- /* Build Decoding Table */
- DTableH->tableLog = (U16)nbBits;
- DTableH->fastMode = 1;
- for (s = 0; s < maxSV1; s++) {
- dinfo[s].newState = 0;
- dinfo[s].symbol = (BYTE)s;
- dinfo[s].nbBits = (BYTE)nbBits;
- }
-
- return 0;
-}
-
-FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt,
- const unsigned fast)
-{
- BYTE *const ostart = (BYTE *)dst;
- BYTE *op = ostart;
- BYTE *const omax = op + maxDstSize;
- BYTE *const olimit = omax - 3;
-
- BIT_DStream_t bitD;
- FSE_DState_t state1;
- FSE_DState_t state2;
-
- /* Init */
- CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
-
- FSE_initDState(&state1, &bitD, dt);
- FSE_initDState(&state2, &bitD, dt);
-
-#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
-
- /* 4 symbols per loop */
- for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) {
- op[0] = FSE_GETSYMBOL(&state1);
-
- if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
- BIT_reloadDStream(&bitD);
-
- op[1] = FSE_GETSYMBOL(&state2);
-
- if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
- {
- if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) {
- op += 2;
- break;
- }
- }
-
- op[2] = FSE_GETSYMBOL(&state1);
-
- if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
- BIT_reloadDStream(&bitD);
-
- op[3] = FSE_GETSYMBOL(&state2);
- }
-
- /* tail */
- /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
- while (1) {
- if (op > (omax - 2))
- return ERROR(dstSize_tooSmall);
- *op++ = FSE_GETSYMBOL(&state1);
- if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
- *op++ = FSE_GETSYMBOL(&state2);
- break;
- }
-
- if (op > (omax - 2))
- return ERROR(dstSize_tooSmall);
- *op++ = FSE_GETSYMBOL(&state2);
- if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
- *op++ = FSE_GETSYMBOL(&state1);
- break;
- }
- }
-
- return op - ostart;
-}
-
-size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt)
-{
- const void *ptr = dt;
- const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr;
- const U32 fastMode = DTableH->fastMode;
-
- /* select fast mode (static) */
- if (fastMode)
- return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
- return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize)
-{
- const BYTE *const istart = (const BYTE *)cSrc;
- const BYTE *ip = istart;
- unsigned tableLog;
- unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
- size_t NCountLength;
-
- FSE_DTable *dt;
- short *counting;
- size_t spaceUsed32 = 0;
-
- FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32));
-
- dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog);
- counting = (short *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* normal FSE decoding mode */
- NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
- if (FSE_isError(NCountLength))
- return NCountLength;
- // if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining
- // case : NCountLength==cSrcSize */
- if (tableLog > maxLog)
- return ERROR(tableLog_tooLarge);
- ip += NCountLength;
- cSrcSize -= NCountLength;
-
- CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize));
-
- return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */
-}
diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
deleted file mode 100644
index 923218d12e2893..00000000000000
--- a/lib/zstd/huf.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Huffman coder, part of New Generation Entropy library
- * header file
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-#ifndef HUF_H_298734234
-#define HUF_H_298734234
-
-/* *** Dependencies *** */
-#include <linux/types.h> /* size_t */
-
-/* *** Tool functions *** */
-#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
-size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
-
-/* Error Management */
-unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
-
-/* *** Advanced function *** */
-
-/** HUF_compress4X_wksp() :
-* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
-size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
- size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
-
-/* *** Dependencies *** */
-#include "mem.h" /* U32 */
-
-/* *** Constants *** */
-#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
-#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
-#define HUF_SYMBOLVALUE_MAX 255
-
-#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
-#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
-#error "HUF_TABLELOG_MAX is too large !"
-#endif
-
-/* ****************************************
-* Static allocation
-******************************************/
-/* HUF buffer bounds */
-#define HUF_CTABLEBOUND 129
-#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
-#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
-
-/* static allocation of HUF's Compression Table */
-#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
- U32 name##hb[maxSymbolValue + 1]; \
- void *name##hv = &(name##hb); \
- HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */
-
-/* static allocation of HUF's DTable */
-typedef U32 HUF_DTable;
-#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog)))
-#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)}
-#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)}
-
-/* The workspace must have alignment at least 4 and be at least this large */
-#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10)
-#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32))
-
-/* The workspace must have alignment at least 4 and be at least this large */
-#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10)
-#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
-
-/* ****************************************
-* Advanced decompression functions
-******************************************/
-size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< single-symbol decoder */
-size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< double-symbols decoder */
-
-/* ****************************************
-* HUF detailed API
-******************************************/
-/*!
-HUF_compress() does the following:
-1. count symbol occurrence from source[] into table count[] using FSE_count()
-2. (optional) refine tableLog using HUF_optimalTableLog()
-3. build Huffman table from count using HUF_buildCTable()
-4. save Huffman table to memory buffer using HUF_writeCTable_wksp()
-5. encode the data stream using HUF_compress4X_usingCTable()
-
-The following API allows targeting specific sub-functions for advanced tasks.
-For example, it's possible to compress several blocks using the same 'CTable',
-or to save and regenerate 'CTable' using external methods.
-*/
-/* FSE_count() : find it within "fse.h" */
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
-size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize);
-size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
-
-typedef enum {
- HUF_repeat_none, /**< Cannot use the previous table */
- HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
- 4}X_repeat */
- HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
-} HUF_repeat;
-/** HUF_compress4X_repeat() :
-* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
-* If it uses hufTable it does not modify hufTable or repeat.
-* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
-* If preferRepeat then the old table will always be used if valid. */
-size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
- size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
- int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
-
-/** HUF_buildCTable_wksp() :
- * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
- * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
- */
-size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize);
-
-/*! HUF_readStats() :
- Read compact Huffman tree, saved by HUF_writeCTable().
- `huffWeight` is destination buffer.
- @return : size read from `src` , or an error Code .
- Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
-size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize,
- void *workspace, size_t workspaceSize);
-
-/** HUF_readCTable() :
-* Loading a CTable saved with HUF_writeCTable() */
-size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
-
-/*
-HUF_decompress() does the following:
-1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
-2. build Huffman table from save, using HUF_readDTableXn()
-3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
-*/
-
-/** HUF_selectDecoder() :
-* Tells which decoder is likely to decode faster,
-* based on a set of pre-determined metrics.
-* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
-* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
-U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize);
-
-size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
-size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
-
-size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-
-/* single stream variants */
-
-size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
- size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
-size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
-/** HUF_compress1X_repeat() :
-* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
-* If it uses hufTable it does not modify hufTable or repeat.
-* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
-* If preferRepeat then the old table will always be used if valid. */
-size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
- size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
- int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
-
-size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize);
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< single-symbol decoder */
-size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
- size_t workspaceSize); /**< double-symbols decoder */
-
-size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize,
- const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
-size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
-
-#endif /* HUF_H_298734234 */
diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
deleted file mode 100644
index fd32838c185f25..00000000000000
--- a/lib/zstd/huf_compress.c
+++ /dev/null
@@ -1,773 +0,0 @@
-/*
- * Huffman encoder, part of New Generation Entropy library
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* **************************************************************
-* Includes
-****************************************************************/
-#include "bitstream.h"
-#include "fse.h" /* header compression */
-#include "huf.h"
-#include <linux/kernel.h>
-#include <linux/string.h> /* memcpy, memset */
-
-/* **************************************************************
-* Error Management
-****************************************************************/
-#define HUF_STATIC_ASSERT(c) \
- { \
- enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
- } /* use only *after* variable declarations */
-#define CHECK_V_F(e, f) \
- size_t const e = f; \
- if (ERR_isError(e)) \
- return f
-#define CHECK_F(f) \
- { \
- CHECK_V_F(_var_err__, f); \
- }
-
-/* **************************************************************
-* Utils
-****************************************************************/
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
-{
- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
-}
-
-/* *******************************************************
-* HUF : Huffman block compression
-*********************************************************/
-/* HUF_compressWeights() :
- * Same as FSE_compress(), but dedicated to huff0's weights compression.
- * The use case needs much less stack memory.
- * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
- */
-#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
-size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize)
-{
- BYTE *const ostart = (BYTE *)dst;
- BYTE *op = ostart;
- BYTE *const oend = ostart + dstSize;
-
- U32 maxSymbolValue = HUF_TABLELOG_MAX;
- U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
-
- FSE_CTable *CTable;
- U32 *count;
- S16 *norm;
- size_t spaceUsed32 = 0;
-
- HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32));
-
- CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX);
- count = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_MAX + 1;
- norm = (S16 *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* init conditions */
- if (wtSize <= 1)
- return 0; /* Not compressible */
-
- /* Scan input and build symbol stats */
- {
- CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize));
- if (maxCount == wtSize)
- return 1; /* only a single symbol in src : rle */
- if (maxCount == 1)
- return 0; /* each symbol present maximum once => not compressible */
- }
-
- tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
- CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue));
-
- /* Write table description header */
- {
- CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog));
- op += hSize;
- }
-
- /* Compress */
- CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize));
- {
- CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable));
- if (cSize == 0)
- return 0; /* not enough space for compressed data */
- op += cSize;
- }
-
- return op - ostart;
-}
-
-struct HUF_CElt_s {
- U16 val;
- BYTE nbBits;
-}; /* typedef'd to HUF_CElt within "huf.h" */
-
-/*! HUF_writeCTable_wksp() :
- `CTable` : Huffman tree to save, using huf representation.
- @return : size of saved CTable */
-size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize)
-{
- BYTE *op = (BYTE *)dst;
- U32 n;
-
- BYTE *bitsToWeight;
- BYTE *huffWeight;
- size_t spaceUsed32 = 0;
-
- bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2;
- huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* check conditions */
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
- return ERROR(maxSymbolValue_tooLarge);
-
- /* convert to weight */
- bitsToWeight[0] = 0;
- for (n = 1; n < huffLog + 1; n++)
- bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
- for (n = 0; n < maxSymbolValue; n++)
- huffWeight[n] = bitsToWeight[CTable[n].nbBits];
-
- /* attempt weights compression by FSE */
- {
- CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize));
- if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */
- op[0] = (BYTE)hSize;
- return hSize + 1;
- }
- }
-
- /* write raw values as 4-bits (max : 15) */
- if (maxSymbolValue > (256 - 128))
- return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
- if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize)
- return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
- op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1));
- huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
- for (n = 0; n < maxSymbolValue; n += 2)
- op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]);
- return ((maxSymbolValue + 1) / 2) + 1;
-}
-
-size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
-{
- U32 *rankVal;
- BYTE *huffWeight;
- U32 tableLog = 0;
- U32 nbSymbols = 0;
- size_t readSize;
- size_t spaceUsed32 = 0;
-
- rankVal = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
- huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- /* get symbol weights */
- readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
- if (ERR_isError(readSize))
- return readSize;
-
- /* check result */
- if (tableLog > HUF_TABLELOG_MAX)
- return ERROR(tableLog_tooLarge);
- if (nbSymbols > maxSymbolValue + 1)
- return ERROR(maxSymbolValue_tooSmall);
-
- /* Prepare base value per rank */
- {
- U32 n, nextRankStart = 0;
- for (n = 1; n <= tableLog; n++) {
- U32 curr = nextRankStart;
- nextRankStart += (rankVal[n] << (n - 1));
- rankVal[n] = curr;
- }
- }
-
- /* fill nbBits */
- {
- U32 n;
- for (n = 0; n < nbSymbols; n++) {
- const U32 w = huffWeight[n];
- CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
- }
- }
-
- /* fill val */
- {
- U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */
- U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0};
- {
- U32 n;
- for (n = 0; n < nbSymbols; n++)
- nbPerRank[CTable[n].nbBits]++;
- }
- /* determine stating value per rank */
- valPerRank[tableLog + 1] = 0; /* for w==0 */
- {
- U16 min = 0;
- U32 n;
- for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */
- valPerRank[n] = min; /* get starting value within each rank */
- min += nbPerRank[n];
- min >>= 1;
- }
- }
- /* assign value within rank, symbol order */
- {
- U32 n;
- for (n = 0; n <= maxSymbolValue; n++)
- CTable[n].val = valPerRank[CTable[n].nbBits]++;
- }
- }
-
- return readSize;
-}
-
-typedef struct nodeElt_s {
- U32 count;
- U16 parent;
- BYTE byte;
- BYTE nbBits;
-} nodeElt;
-
-static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits)
-{
- const U32 largestBits = huffNode[lastNonNull].nbBits;
- if (largestBits <= maxNbBits)
- return largestBits; /* early exit : no elt > maxNbBits */
-
- /* there are several too large elements (at least >= 2) */
- {
- int totalCost = 0;
- const U32 baseCost = 1 << (largestBits - maxNbBits);
- U32 n = lastNonNull;
-
- while (huffNode[n].nbBits > maxNbBits) {
- totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
- huffNode[n].nbBits = (BYTE)maxNbBits;
- n--;
- } /* n stops at huffNode[n].nbBits <= maxNbBits */
- while (huffNode[n].nbBits == maxNbBits)
- n--; /* n end at index of smallest symbol using < maxNbBits */
-
- /* renorm totalCost */
- totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
-
- /* repay normalized cost */
- {
- U32 const noSymbol = 0xF0F0F0F0;
- U32 rankLast[HUF_TABLELOG_MAX + 2];
- int pos;
-
- /* Get pos of last (smallest) symbol per rank */
- memset(rankLast, 0xF0, sizeof(rankLast));
- {
- U32 currNbBits = maxNbBits;
- for (pos = n; pos >= 0; pos--) {
- if (huffNode[pos].nbBits >= currNbBits)
- continue;
- currNbBits = huffNode[pos].nbBits; /* < maxNbBits */
- rankLast[maxNbBits - currNbBits] = pos;
- }
- }
-
- while (totalCost > 0) {
- U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
- for (; nBitsToDecrease > 1; nBitsToDecrease--) {
- U32 highPos = rankLast[nBitsToDecrease];
- U32 lowPos = rankLast[nBitsToDecrease - 1];
- if (highPos == noSymbol)
- continue;
- if (lowPos == noSymbol)
- break;
- {
- U32 const highTotal = huffNode[highPos].count;
- U32 const lowTotal = 2 * huffNode[lowPos].count;
- if (highTotal <= lowTotal)
- break;
- }
- }
- /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
- /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
- while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
- nBitsToDecrease++;
- totalCost -= 1 << (nBitsToDecrease - 1);
- if (rankLast[nBitsToDecrease - 1] == noSymbol)
- rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
- huffNode[rankLast[nBitsToDecrease]].nbBits++;
- if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
- rankLast[nBitsToDecrease] = noSymbol;
- else {
- rankLast[nBitsToDecrease]--;
- if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease)
- rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
- }
- } /* while (totalCost > 0) */
-
- while (totalCost < 0) { /* Sometimes, cost correction overshoot */
- if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0
- (using maxNbBits) */
- while (huffNode[n].nbBits == maxNbBits)
- n--;
- huffNode[n + 1].nbBits--;
- rankLast[1] = n + 1;
- totalCost++;
- continue;
- }
- huffNode[rankLast[1] + 1].nbBits--;
- rankLast[1]++;
- totalCost++;
- }
- }
- } /* there are several too large elements (at least >= 2) */
-
- return maxNbBits;
-}
-
-typedef struct {
- U32 base;
- U32 curr;
-} rankPos;
-
-static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue)
-{
- rankPos rank[32];
- U32 n;
-
- memset(rank, 0, sizeof(rank));
- for (n = 0; n <= maxSymbolValue; n++) {
- U32 r = BIT_highbit32(count[n] + 1);
- rank[r].base++;
- }
- for (n = 30; n > 0; n--)
- rank[n - 1].base += rank[n].base;
- for (n = 0; n < 32; n++)
- rank[n].curr = rank[n].base;
- for (n = 0; n <= maxSymbolValue; n++) {
- U32 const c = count[n];
- U32 const r = BIT_highbit32(c + 1) + 1;
- U32 pos = rank[r].curr++;
- while ((pos > rank[r].base) && (c > huffNode[pos - 1].count))
- huffNode[pos] = huffNode[pos - 1], pos--;
- huffNode[pos].count = c;
- huffNode[pos].byte = (BYTE)n;
- }
-}
-
-/** HUF_buildCTable_wksp() :
- * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
- * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
- */
-#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1)
-typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1];
-size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize)
-{
- nodeElt *const huffNode0 = (nodeElt *)workSpace;
- nodeElt *const huffNode = huffNode0 + 1;
- U32 n, nonNullRank;
- int lowS, lowN;
- U16 nodeNb = STARTNODE;
- U32 nodeRoot;
-
- /* safety checks */
- if (wkspSize < sizeof(huffNodeTable))
- return ERROR(GENERIC); /* workSpace is not large enough */
- if (maxNbBits == 0)
- maxNbBits = HUF_TABLELOG_DEFAULT;
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
- return ERROR(GENERIC);
- memset(huffNode0, 0, sizeof(huffNodeTable));
-
- /* sort, decreasing order */
- HUF_sort(huffNode, count, maxSymbolValue);
-
- /* init for parents */
- nonNullRank = maxSymbolValue;
- while (huffNode[nonNullRank].count == 0)
- nonNullRank--;
- lowS = nonNullRank;
- nodeRoot = nodeNb + lowS - 1;
- lowN = nodeNb;
- huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count;
- huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb;
- nodeNb++;
- lowS -= 2;
- for (n = nodeNb; n <= nodeRoot; n++)
- huffNode[n].count = (U32)(1U << 30);
- huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */
-
- /* create parents */
- while (nodeNb <= nodeRoot) {
- U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
- U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
- huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
- huffNode[n1].parent = huffNode[n2].parent = nodeNb;
- nodeNb++;
- }
-
- /* distribute weights (unlimited tree height) */
- huffNode[nodeRoot].nbBits = 0;
- for (n = nodeRoot - 1; n >= STARTNODE; n--)
- huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
- for (n = 0; n <= nonNullRank; n++)
- huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
-
- /* enforce maxTableLog */
- maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
-
- /* fill result into tree (val, nbBits) */
- {
- U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0};
- U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0};
- if (maxNbBits > HUF_TABLELOG_MAX)
- return ERROR(GENERIC); /* check fit into table */
- for (n = 0; n <= nonNullRank; n++)
- nbPerRank[huffNode[n].nbBits]++;
- /* determine stating value per rank */
- {
- U16 min = 0;
- for (n = maxNbBits; n > 0; n--) {
- valPerRank[n] = min; /* get starting value within each rank */
- min += nbPerRank[n];
- min >>= 1;
- }
- }
- for (n = 0; n <= maxSymbolValue; n++)
- tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
- for (n = 0; n <= maxSymbolValue; n++)
- tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
- }
-
- return maxNbBits;
-}
-
-static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
-{
- size_t nbBits = 0;
- int s;
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
- nbBits += CTable[s].nbBits * count[s];
- }
- return nbBits >> 3;
-}
-
-static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
-{
- int bad = 0;
- int s;
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
- bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
- }
- return !bad;
-}
-
-static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable)
-{
- BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
-}
-
-size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
-
-#define HUF_FLUSHBITS(s) BIT_flushBits(s)
-
-#define HUF_FLUSHBITS_1(stream) \
- if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \
- HUF_FLUSHBITS(stream)
-
-#define HUF_FLUSHBITS_2(stream) \
- if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \
- HUF_FLUSHBITS(stream)
-
-size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
-{
- const BYTE *ip = (const BYTE *)src;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- BYTE *op = ostart;
- size_t n;
- BIT_CStream_t bitC;
-
- /* init */
- if (dstSize < 8)
- return 0; /* not enough space to compress */
- {
- size_t const initErr = BIT_initCStream(&bitC, op, oend - op);
- if (HUF_isError(initErr))
- return 0;
- }
-
- n = srcSize & ~3; /* join to mod 4 */
- switch (srcSize & 3) {
- case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
- fallthrough;
- case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
- fallthrough;
- case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
- fallthrough;
- case 0:
- default:;
- }
-
- for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */
- HUF_encodeSymbol(&bitC, ip[n - 1], CTable);
- HUF_FLUSHBITS_1(&bitC);
- HUF_encodeSymbol(&bitC, ip[n - 2], CTable);
- HUF_FLUSHBITS_2(&bitC);
- HUF_encodeSymbol(&bitC, ip[n - 3], CTable);
- HUF_FLUSHBITS_1(&bitC);
- HUF_encodeSymbol(&bitC, ip[n - 4], CTable);
- HUF_FLUSHBITS(&bitC);
- }
-
- return BIT_closeCStream(&bitC);
-}
-
-size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
-{
- size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */
- const BYTE *ip = (const BYTE *)src;
- const BYTE *const iend = ip + srcSize;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- BYTE *op = ostart;
-
- if (dstSize < 6 + 1 + 1 + 1 + 8)
- return 0; /* minimum space to compress successfully */
- if (srcSize < 12)
- return 0; /* no saving possible : too small input */
- op += 6; /* jumpTable */
-
- {
- CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
- if (cSize == 0)
- return 0;
- ZSTD_writeLE16(ostart, (U16)cSize);
- op += cSize;
- }
-
- ip += segmentSize;
- {
- CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
- if (cSize == 0)
- return 0;
- ZSTD_writeLE16(ostart + 2, (U16)cSize);
- op += cSize;
- }
-
- ip += segmentSize;
- {
- CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
- if (cSize == 0)
- return 0;
- ZSTD_writeLE16(ostart + 4, (U16)cSize);
- op += cSize;
- }
-
- ip += segmentSize;
- {
- CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable));
- if (cSize == 0)
- return 0;
- op += cSize;
- }
-
- return op - ostart;
-}
-
-static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream,
- const HUF_CElt *CTable)
-{
- size_t const cSize =
- singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
- if (HUF_isError(cSize)) {
- return cSize;
- }
- if (cSize == 0) {
- return 0;
- } /* uncompressible */
- op += cSize;
- /* check compressibility */
- if ((size_t)(op - ostart) >= srcSize - 1) {
- return 0;
- }
- return op - ostart;
-}
-
-/* `workSpace` must a table of at least 1024 unsigned */
-static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog,
- unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat)
-{
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- BYTE *op = ostart;
-
- U32 *count;
- size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
- HUF_CElt *CTable;
- size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
-
- /* checks & inits */
- if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize)
- return ERROR(GENERIC);
- if (!srcSize)
- return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
- if (!dstSize)
- return 0; /* cannot fit within dst budget */
- if (srcSize > HUF_BLOCKSIZE_MAX)
- return ERROR(srcSize_wrong); /* curr block size limit */
- if (huffLog > HUF_TABLELOG_MAX)
- return ERROR(tableLog_tooLarge);
- if (!maxSymbolValue)
- maxSymbolValue = HUF_SYMBOLVALUE_MAX;
- if (!huffLog)
- huffLog = HUF_TABLELOG_DEFAULT;
-
- count = (U32 *)workSpace;
- workSpace = (BYTE *)workSpace + countSize;
- wkspSize -= countSize;
- CTable = (HUF_CElt *)workSpace;
- workSpace = (BYTE *)workSpace + CTableSize;
- wkspSize -= CTableSize;
-
- /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
- if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
- return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
- }
-
- /* Scan input and build symbol stats */
- {
- CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace));
- if (largest == srcSize) {
- *ostart = ((const BYTE *)src)[0];
- return 1;
- } /* single symbol, rle */
- if (largest <= (srcSize >> 7) + 1)
- return 0; /* Fast heuristic : not compressible enough */
- }
-
- /* Check validity of previous table */
- if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
- *repeat = HUF_repeat_none;
- }
- /* Heuristic : use existing table for small inputs */
- if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
- return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
- }
-
- /* Build Huffman Tree */
- huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
- {
- CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize));
- huffLog = (U32)maxBits;
- /* Zero the unused symbols so we can check it for validity */
- memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
- }
-
- /* Write table description header */
- {
- CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize));
- /* Check if using the previous table will be beneficial */
- if (repeat && *repeat != HUF_repeat_none) {
- size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
- size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
- if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
- return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
- }
- }
- /* Use the new table */
- if (hSize + 12ul >= srcSize) {
- return 0;
- }
- op += hSize;
- if (repeat) {
- *repeat = HUF_repeat_none;
- }
- if (oldHufTable) {
- memcpy(oldHufTable, CTable, CTableSize);
- } /* Save the new table */
- }
- return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
-}
-
-size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
- size_t wkspSize)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
-}
-
-size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
- size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat,
- preferRepeat);
-}
-
-size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
- size_t wkspSize)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
-}
-
-size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
- size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat,
- preferRepeat);
-}
diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c
deleted file mode 100644
index 6526482047dce9..00000000000000
--- a/lib/zstd/huf_decompress.c
+++ /dev/null
@@ -1,960 +0,0 @@
-/*
- * Huffman decoder, part of New Generation Entropy library
- * Copyright (C) 2013-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- *
- * You can contact the author at :
- * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- */
-
-/* **************************************************************
-* Compiler specifics
-****************************************************************/
-#define FORCE_INLINE static __always_inline
-
-/* **************************************************************
-* Dependencies
-****************************************************************/
-#include "bitstream.h" /* BIT_* */
-#include "fse.h" /* header compression */
-#include "huf.h"
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/string.h> /* memcpy, memset */
-
-/* **************************************************************
-* Error Management
-****************************************************************/
-#define HUF_STATIC_ASSERT(c) \
- { \
- enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
- } /* use only *after* variable declarations */
-
-/*-***************************/
-/* generic DTableDesc */
-/*-***************************/
-
-typedef struct {
- BYTE maxTableLog;
- BYTE tableType;
- BYTE tableLog;
- BYTE reserved;
-} DTableDesc;
-
-static DTableDesc HUF_getDTableDesc(const HUF_DTable *table)
-{
- DTableDesc dtd;
- memcpy(&dtd, table, sizeof(dtd));
- return dtd;
-}
-
-/*-***************************/
-/* single-symbol decoding */
-/*-***************************/
-
-typedef struct {
- BYTE byte;
- BYTE nbBits;
-} HUF_DEltX2; /* single-symbol decoding */
-
-size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
-{
- U32 tableLog = 0;
- U32 nbSymbols = 0;
- size_t iSize;
- void *const dtPtr = DTable + 1;
- HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr;
-
- U32 *rankVal;
- BYTE *huffWeight;
- size_t spaceUsed32 = 0;
-
- rankVal = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
- huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
- /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
-
- iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
- if (HUF_isError(iSize))
- return iSize;
-
- /* Table header */
- {
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (tableLog > (U32)(dtd.maxTableLog + 1))
- return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
- dtd.tableType = 0;
- dtd.tableLog = (BYTE)tableLog;
- memcpy(DTable, &dtd, sizeof(dtd));
- }
-
- /* Calculate starting value for each rank */
- {
- U32 n, nextRankStart = 0;
- for (n = 1; n < tableLog + 1; n++) {
- U32 const curr = nextRankStart;
- nextRankStart += (rankVal[n] << (n - 1));
- rankVal[n] = curr;
- }
- }
-
- /* fill DTable */
- {
- U32 n;
- for (n = 0; n < nbSymbols; n++) {
- U32 const w = huffWeight[n];
- U32 const length = (1 << w) >> 1;
- U32 u;
- HUF_DEltX2 D;
- D.byte = (BYTE)n;
- D.nbBits = (BYTE)(tableLog + 1 - w);
- for (u = rankVal[w]; u < rankVal[w] + length; u++)
- dt[u] = D;
- rankVal[w] += length;
- }
- }
-
- return iSize;
-}
-
-static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog)
-{
- size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
- BYTE const c = dt[val].byte;
- BIT_skipBits(Dstream, dt[val].nbBits);
- return c;
-}
-
-#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
- if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
- HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
- if (ZSTD_64bits()) \
- HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog)
-{
- BYTE *const pStart = p;
-
- /* up to 4 symbols at a time */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) {
- HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
- HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
- }
-
- /* closer to the end */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
- HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
- /* no more data to retrieve from bitstream, hence no need to reload */
- while (p < pEnd)
- HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
- return pEnd - pStart;
-}
-
-static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- BYTE *op = (BYTE *)dst;
- BYTE *const oend = op + dstSize;
- const void *dtPtr = DTable + 1;
- const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
- BIT_DStream_t bitD;
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- U32 const dtLog = dtd.tableLog;
-
- {
- size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
- if (HUF_isError(errorCode))
- return errorCode;
- }
-
- HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
-
- /* check */
- if (!BIT_endOfDStream(&bitD))
- return ERROR(corruption_detected);
-
- return dstSize;
-}
-
-size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 0)
- return ERROR(GENERIC);
- return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- const BYTE *ip = (const BYTE *)cSrc;
-
- size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
- if (HUF_isError(hSize))
- return hSize;
- if (hSize >= cSrcSize)
- return ERROR(srcSize_wrong);
- ip += hSize;
- cSrcSize -= hSize;
-
- return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
-}
-
-static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- /* Check */
- if (cSrcSize < 10)
- return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
-
- {
- const BYTE *const istart = (const BYTE *)cSrc;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- const void *const dtPtr = DTable + 1;
- const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
-
- /* Init */
- BIT_DStream_t bitD1;
- BIT_DStream_t bitD2;
- BIT_DStream_t bitD3;
- BIT_DStream_t bitD4;
- size_t const length1 = ZSTD_readLE16(istart);
- size_t const length2 = ZSTD_readLE16(istart + 2);
- size_t const length3 = ZSTD_readLE16(istart + 4);
- size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
- const BYTE *const istart1 = istart + 6; /* jumpTable */
- const BYTE *const istart2 = istart1 + length1;
- const BYTE *const istart3 = istart2 + length2;
- const BYTE *const istart4 = istart3 + length3;
- const size_t segmentSize = (dstSize + 3) / 4;
- BYTE *const opStart2 = ostart + segmentSize;
- BYTE *const opStart3 = opStart2 + segmentSize;
- BYTE *const opStart4 = opStart3 + segmentSize;
- BYTE *op1 = ostart;
- BYTE *op2 = opStart2;
- BYTE *op3 = opStart3;
- BYTE *op4 = opStart4;
- U32 endSignal;
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- U32 const dtLog = dtd.tableLog;
-
- if (length4 > cSrcSize)
- return ERROR(corruption_detected); /* overflow */
- {
- size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
- if (HUF_isError(errorCode))
- return errorCode;
- }
-
- /* 16-32 symbols per loop (4-8 symbols per stream) */
- endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
- for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) {
- HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
- HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
- HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
- HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
- endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
- }
-
- /* check corruption */
- if (op1 > opStart2)
- return ERROR(corruption_detected);
- if (op2 > opStart3)
- return ERROR(corruption_detected);
- if (op3 > opStart4)
- return ERROR(corruption_detected);
- /* note : op4 supposed already verified within main loop */
-
- /* finish bitStreams one by one */
- HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
- HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
- HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
- HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
-
- /* check */
- endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
- if (!endSignal)
- return ERROR(corruption_detected);
-
- /* decoded size */
- return dstSize;
- }
-}
-
-size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 0)
- return ERROR(GENERIC);
- return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- const BYTE *ip = (const BYTE *)cSrc;
-
- size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
- if (HUF_isError(hSize))
- return hSize;
- if (hSize >= cSrcSize)
- return ERROR(srcSize_wrong);
- ip += hSize;
- cSrcSize -= hSize;
-
- return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
-}
-
-/* *************************/
-/* double-symbols decoding */
-/* *************************/
-typedef struct {
- U16 sequence;
- BYTE nbBits;
- BYTE length;
-} HUF_DEltX4; /* double-symbols decoding */
-
-typedef struct {
- BYTE symbol;
- BYTE weight;
-} sortedSymbol_t;
-
-/* HUF_fillDTableX4Level2() :
- * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
-static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight,
- const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq)
-{
- HUF_DEltX4 DElt;
- U32 rankVal[HUF_TABLELOG_MAX + 1];
-
- /* get pre-calculated rankVal */
- memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
- /* fill skipped values */
- if (minWeight > 1) {
- U32 i, skipSize = rankVal[minWeight];
- ZSTD_writeLE16(&(DElt.sequence), baseSeq);
- DElt.nbBits = (BYTE)(consumed);
- DElt.length = 1;
- for (i = 0; i < skipSize; i++)
- DTable[i] = DElt;
- }
-
- /* fill DTable */
- {
- U32 s;
- for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */
- const U32 symbol = sortedSymbols[s].symbol;
- const U32 weight = sortedSymbols[s].weight;
- const U32 nbBits = nbBitsBaseline - weight;
- const U32 length = 1 << (sizeLog - nbBits);
- const U32 start = rankVal[weight];
- U32 i = start;
- const U32 end = start + length;
-
- ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
- DElt.nbBits = (BYTE)(nbBits + consumed);
- DElt.length = 2;
- do {
- DTable[i++] = DElt;
- } while (i < end); /* since length >= 1 */
-
- rankVal[weight] += length;
- }
- }
-}
-
-typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
-typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
-
-static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart,
- rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline)
-{
- U32 rankVal[HUF_TABLELOG_MAX + 1];
- const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
- const U32 minBits = nbBitsBaseline - maxWeight;
- U32 s;
-
- memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
- /* fill DTable */
- for (s = 0; s < sortedListSize; s++) {
- const U16 symbol = sortedList[s].symbol;
- const U32 weight = sortedList[s].weight;
- const U32 nbBits = nbBitsBaseline - weight;
- const U32 start = rankVal[weight];
- const U32 length = 1 << (targetLog - nbBits);
-
- if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */
- U32 sortedRank;
- int minWeight = nbBits + scaleLog;
- if (minWeight < 1)
- minWeight = 1;
- sortedRank = rankStart[minWeight];
- HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank,
- sortedListSize - sortedRank, nbBitsBaseline, symbol);
- } else {
- HUF_DEltX4 DElt;
- ZSTD_writeLE16(&(DElt.sequence), symbol);
- DElt.nbBits = (BYTE)(nbBits);
- DElt.length = 1;
- {
- U32 const end = start + length;
- U32 u;
- for (u = start; u < end; u++)
- DTable[u] = DElt;
- }
- }
- rankVal[weight] += length;
- }
-}
-
-size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
-{
- U32 tableLog, maxW, sizeOfSort, nbSymbols;
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- U32 const maxTableLog = dtd.maxTableLog;
- size_t iSize;
- void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */
- HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr;
- U32 *rankStart;
-
- rankValCol_t *rankVal;
- U32 *rankStats;
- U32 *rankStart0;
- sortedSymbol_t *sortedSymbol;
- BYTE *weightList;
- size_t spaceUsed32 = 0;
-
- HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0);
-
- rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
- rankStats = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_MAX + 1;
- rankStart0 = (U32 *)workspace + spaceUsed32;
- spaceUsed32 += HUF_TABLELOG_MAX + 2;
- sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
- weightList = (BYTE *)((U32 *)workspace + spaceUsed32);
- spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
- if ((spaceUsed32 << 2) > workspaceSize)
- return ERROR(tableLog_tooLarge);
- workspace = (U32 *)workspace + spaceUsed32;
- workspaceSize -= (spaceUsed32 << 2);
-
- rankStart = rankStart0 + 1;
- memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
-
- HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
- if (maxTableLog > HUF_TABLELOG_MAX)
- return ERROR(tableLog_tooLarge);
- /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
-
- iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
- if (HUF_isError(iSize))
- return iSize;
-
- /* check result */
- if (tableLog > maxTableLog)
- return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
-
- /* find maxWeight */
- for (maxW = tableLog; rankStats[maxW] == 0; maxW--) {
- } /* necessarily finds a solution before 0 */
-
- /* Get start index of each weight */
- {
- U32 w, nextRankStart = 0;
- for (w = 1; w < maxW + 1; w++) {
- U32 curr = nextRankStart;
- nextRankStart += rankStats[w];
- rankStart[w] = curr;
- }
- rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
- sizeOfSort = nextRankStart;
- }
-
- /* sort symbols by weight */
- {
- U32 s;
- for (s = 0; s < nbSymbols; s++) {
- U32 const w = weightList[s];
- U32 const r = rankStart[w]++;
- sortedSymbol[r].symbol = (BYTE)s;
- sortedSymbol[r].weight = (BYTE)w;
- }
- rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
- }
-
- /* Build rankVal */
- {
- U32 *const rankVal0 = rankVal[0];
- {
- int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */
- U32 nextRankVal = 0;
- U32 w;
- for (w = 1; w < maxW + 1; w++) {
- U32 curr = nextRankVal;
- nextRankVal += rankStats[w] << (w + rescale);
- rankVal0[w] = curr;
- }
- }
- {
- U32 const minBits = tableLog + 1 - maxW;
- U32 consumed;
- for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
- U32 *const rankValPtr = rankVal[consumed];
- U32 w;
- for (w = 1; w < maxW + 1; w++) {
- rankValPtr[w] = rankVal0[w] >> consumed;
- }
- }
- }
- }
-
- HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1);
-
- dtd.tableLog = (BYTE)maxTableLog;
- dtd.tableType = 1;
- memcpy(DTable, &dtd, sizeof(dtd));
- return iSize;
-}
-
-static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
-{
- size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
- memcpy(op, dt + val, 2);
- BIT_skipBits(DStream, dt[val].nbBits);
- return dt[val].length;
-}
-
-static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
-{
- size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
- memcpy(op, dt + val, 1);
- if (dt[val].length == 1)
- BIT_skipBits(DStream, dt[val].nbBits);
- else {
- if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) {
- BIT_skipBits(DStream, dt[val].nbBits);
- if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8))
- /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
- DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8);
- }
- }
- return 1;
-}
-
-#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
- if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
- ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
- if (ZSTD_64bits()) \
- ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog)
-{
- BYTE *const pStart = p;
-
- /* up to 8 symbols at a time */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) {
- HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
- HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
- HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
- }
-
- /* closer to end : up to 2 symbols at a time */
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2))
- HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-
- while (p <= pEnd - 2)
- HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
-
- if (p < pEnd)
- p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
- return p - pStart;
-}
-
-static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- BIT_DStream_t bitD;
-
- /* Init */
- {
- size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
- if (HUF_isError(errorCode))
- return errorCode;
- }
-
- /* decode */
- {
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */
- const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
- }
-
- /* check */
- if (!BIT_endOfDStream(&bitD))
- return ERROR(corruption_detected);
-
- /* decoded size */
- return dstSize;
-}
-
-size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 1)
- return ERROR(GENERIC);
- return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- const BYTE *ip = (const BYTE *)cSrc;
-
- size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
- if (HUF_isError(hSize))
- return hSize;
- if (hSize >= cSrcSize)
- return ERROR(srcSize_wrong);
- ip += hSize;
- cSrcSize -= hSize;
-
- return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
-}
-
-static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- if (cSrcSize < 10)
- return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
-
- {
- const BYTE *const istart = (const BYTE *)cSrc;
- BYTE *const ostart = (BYTE *)dst;
- BYTE *const oend = ostart + dstSize;
- const void *const dtPtr = DTable + 1;
- const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
-
- /* Init */
- BIT_DStream_t bitD1;
- BIT_DStream_t bitD2;
- BIT_DStream_t bitD3;
- BIT_DStream_t bitD4;
- size_t const length1 = ZSTD_readLE16(istart);
- size_t const length2 = ZSTD_readLE16(istart + 2);
- size_t const length3 = ZSTD_readLE16(istart + 4);
- size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
- const BYTE *const istart1 = istart + 6; /* jumpTable */
- const BYTE *const istart2 = istart1 + length1;
- const BYTE *const istart3 = istart2 + length2;
- const BYTE *const istart4 = istart3 + length3;
- size_t const segmentSize = (dstSize + 3) / 4;
- BYTE *const opStart2 = ostart + segmentSize;
- BYTE *const opStart3 = opStart2 + segmentSize;
- BYTE *const opStart4 = opStart3 + segmentSize;
- BYTE *op1 = ostart;
- BYTE *op2 = opStart2;
- BYTE *op3 = opStart3;
- BYTE *op4 = opStart4;
- U32 endSignal;
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- U32 const dtLog = dtd.tableLog;
-
- if (length4 > cSrcSize)
- return ERROR(corruption_detected); /* overflow */
- {
- size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
- if (HUF_isError(errorCode))
- return errorCode;
- }
- {
- size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
- if (HUF_isError(errorCode))
- return errorCode;
- }
-
- /* 16-32 symbols per loop (4-8 symbols per stream) */
- endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
- for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) {
- HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
- HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
- HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
- HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
- HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
- HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
- HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
- HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
- HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
- HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
- HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
- HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
-
- endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
- }
-
- /* check corruption */
- if (op1 > opStart2)
- return ERROR(corruption_detected);
- if (op2 > opStart3)
- return ERROR(corruption_detected);
- if (op3 > opStart4)
- return ERROR(corruption_detected);
- /* note : op4 already verified within main loop */
-
- /* finish bitStreams one by one */
- HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
- HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
- HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
- HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
-
- /* check */
- {
- U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
- if (!endCheck)
- return ERROR(corruption_detected);
- }
-
- /* decoded size */
- return dstSize;
- }
-}
-
-size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 1)
- return ERROR(GENERIC);
- return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- const BYTE *ip = (const BYTE *)cSrc;
-
- size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
- if (HUF_isError(hSize))
- return hSize;
- if (hSize >= cSrcSize)
- return ERROR(srcSize_wrong);
- ip += hSize;
- cSrcSize -= hSize;
-
- return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
-}
-
-/* ********************************/
-/* Generic decompression selector */
-/* ********************************/
-
-size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
- : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
- return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
- : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
-}
-
-typedef struct {
- U32 tableTime;
- U32 decode256Time;
-} algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = {
- /* single, double, quad */
- {{0, 0}, {1, 1}, {2, 2}}, /* Q==0 : impossible */
- {{0, 0}, {1, 1}, {2, 2}}, /* Q==1 : impossible */
- {{38, 130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
- {{448, 128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
- {{556, 128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
- {{714, 128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
- {{883, 128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
- {{897, 128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
- {{926, 128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
- {{947, 128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
- {{1107, 128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
- {{1177, 128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
- {{1242, 128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
- {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */
- {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */
- {{722, 128}, {1891, 145}, {1936, 146}}, /* Q ==15 : 93-99% */
-};
-
-/** HUF_selectDecoder() :
-* Tells which decoder is likely to decode faster,
-* based on a set of pre-determined metrics.
-* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
-* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
-U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize)
-{
- /* decoder timing evaluation */
- U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
- U32 const D256 = (U32)(dstSize >> 8);
- U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
- U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
- DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
-
- return DTime1 < DTime0;
-}
-
-typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize);
-
-size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- /* validation checks */
- if (dstSize == 0)
- return ERROR(dstSize_tooSmall);
- if (cSrcSize > dstSize)
- return ERROR(corruption_detected); /* invalid */
- if (cSrcSize == dstSize) {
- memcpy(dst, cSrc, dstSize);
- return dstSize;
- } /* not compressed */
- if (cSrcSize == 1) {
- memset(dst, *(const BYTE *)cSrc, dstSize);
- return dstSize;
- } /* RLE */
-
- {
- U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
- return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
- : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
- }
-}
-
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- /* validation checks */
- if (dstSize == 0)
- return ERROR(dstSize_tooSmall);
- if ((cSrcSize >= dstSize) || (cSrcSize <= 1))
- return ERROR(corruption_detected); /* invalid */
-
- {
- U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
- return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
- : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
- }
-}
-
-size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
-{
- /* validation checks */
- if (dstSize == 0)
- return ERROR(dstSize_tooSmall);
- if (cSrcSize > dstSize)
- return ERROR(corruption_detected); /* invalid */
- if (cSrcSize == dstSize) {
- memcpy(dst, cSrc, dstSize);
- return dstSize;
- } /* not compressed */
- if (cSrcSize == 1) {
- memset(dst, *(const BYTE *)cSrc, dstSize);
- return dstSize;
- } /* RLE */
-
- {
- U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
- return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
- : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
- }
-}
diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h
deleted file mode 100644
index 93d7a2c377fe3c..00000000000000
--- a/lib/zstd/mem.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-#ifndef MEM_H_MODULE
-#define MEM_H_MODULE
-
-/*-****************************************
-* Dependencies
-******************************************/
-#include <asm/unaligned.h>
-#include <linux/string.h> /* memcpy */
-#include <linux/types.h> /* size_t, ptrdiff_t */
-
-/*-****************************************
-* Compiler specifics
-******************************************/
-#define ZSTD_STATIC static inline
-
-/*-**************************************************************
-* Basic Types
-*****************************************************************/
-typedef uint8_t BYTE;
-typedef uint16_t U16;
-typedef int16_t S16;
-typedef uint32_t U32;
-typedef int32_t S32;
-typedef uint64_t U64;
-typedef int64_t S64;
-typedef ptrdiff_t iPtrDiff;
-typedef uintptr_t uPtrDiff;
-
-/*-**************************************************************
-* Memory I/O
-*****************************************************************/
-ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; }
-ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; }
-
-#if defined(__LITTLE_ENDIAN)
-#define ZSTD_LITTLE_ENDIAN 1
-#else
-#define ZSTD_LITTLE_ENDIAN 0
-#endif
-
-ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; }
-
-ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); }
-
-ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); }
-
-ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); }
-
-ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); }
-
-ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); }
-
-ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); }
-
-ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); }
-
-/*=== Little endian r/w ===*/
-
-ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); }
-
-ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); }
-
-ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val)
-{
- ZSTD_writeLE16(memPtr, (U16)val);
- ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
-}
-
-ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); }
-
-ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); }
-
-ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr)
-{
- if (ZSTD_32bits())
- return (size_t)ZSTD_readLE32(memPtr);
- else
- return (size_t)ZSTD_readLE64(memPtr);
-}
-
-ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val)
-{
- if (ZSTD_32bits())
- ZSTD_writeLE32(memPtr, (U32)val);
- else
- ZSTD_writeLE64(memPtr, (U64)val);
-}
-
-/*=== Big endian r/w ===*/
-
-ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); }
-
-ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); }
-
-ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); }
-
-ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr)
-{
- if (ZSTD_32bits())
- return (size_t)ZSTD_readBE32(memPtr);
- else
- return (size_t)ZSTD_readBE64(memPtr);
-}
-
-ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val)
-{
- if (ZSTD_32bits())
- ZSTD_writeBE32(memPtr, (U32)val);
- else
- ZSTD_writeBE64(memPtr, (U64)val);
-}
-
-/* function safe only for comparisons */
-ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length)
-{
- switch (length) {
- default:
- case 4: return ZSTD_read32(memPtr);
- case 3:
- if (ZSTD_isLittleEndian())
- return ZSTD_read32(memPtr) << 8;
- else
- return ZSTD_read32(memPtr) >> 8;
- }
-}
-
-#endif /* MEM_H_MODULE */
diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c
deleted file mode 100644
index a282624ee1553c..00000000000000
--- a/lib/zstd/zstd_common.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/*-*************************************
-* Dependencies
-***************************************/
-#include "error_private.h"
-#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
-#include <linux/kernel.h>
-
-/*=**************************************************************
-* Custom allocator
-****************************************************************/
-
-#define stack_push(stack, size) \
- ({ \
- void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \
- (stack)->ptr = (char *)ptr + (size); \
- (stack)->ptr <= (stack)->end ? ptr : NULL; \
- })
-
-ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize)
-{
- ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace};
- ZSTD_stack *stack = (ZSTD_stack *)workspace;
- /* Verify preconditions */
- if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) {
- ZSTD_customMem error = {NULL, NULL, NULL};
- return error;
- }
- /* Initialize the stack */
- stack->ptr = workspace;
- stack->end = (char *)workspace + workspaceSize;
- stack_push(stack, sizeof(ZSTD_stack));
- return stackMem;
-}
-
-void *ZSTD_stackAllocAll(void *opaque, size_t *size)
-{
- ZSTD_stack *stack = (ZSTD_stack *)opaque;
- *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr);
- return stack_push(stack, *size);
-}
-
-void *ZSTD_stackAlloc(void *opaque, size_t size)
-{
- ZSTD_stack *stack = (ZSTD_stack *)opaque;
- return stack_push(stack, size);
-}
-void ZSTD_stackFree(void *opaque, void *address)
-{
- (void)opaque;
- (void)address;
-}
-
-void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); }
-
-void ZSTD_free(void *ptr, ZSTD_customMem customMem)
-{
- if (ptr != NULL)
- customMem.customFree(customMem.opaque, ptr);
-}
diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
new file mode 100644
index 00000000000000..65548a4bb93410
--- /dev/null
+++ b/lib/zstd/zstd_compress_module.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/zstd.h>
+
+#include "common/zstd_deps.h"
+#include "common/zstd_internal.h"
+
+#define ZSTD_FORWARD_IF_ERR(ret) \
+ do { \
+ size_t const __ret = (ret); \
+ if (ZSTD_isError(__ret)) \
+ return __ret; \
+ } while (0)
+
+static size_t zstd_cctx_init(zstd_cctx *cctx, const zstd_parameters *parameters,
+ unsigned long long pledged_src_size)
+{
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_reset(
+ cctx, ZSTD_reset_session_and_parameters));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setPledgedSrcSize(
+ cctx, pledged_src_size));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_windowLog, parameters->cParams.windowLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_hashLog, parameters->cParams.hashLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_chainLog, parameters->cParams.chainLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_searchLog, parameters->cParams.searchLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_minMatch, parameters->cParams.minMatch));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_targetLength, parameters->cParams.targetLength));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_strategy, parameters->cParams.strategy));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_contentSizeFlag, parameters->fParams.contentSizeFlag));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_checksumFlag, parameters->fParams.checksumFlag));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_dictIDFlag, !parameters->fParams.noDictIDFlag));
+ return 0;
+}
+
+int zstd_min_clevel(void)
+{
+ return ZSTD_minCLevel();
+}
+EXPORT_SYMBOL(zstd_min_clevel);
+
+int zstd_max_clevel(void)
+{
+ return ZSTD_maxCLevel();
+}
+EXPORT_SYMBOL(zstd_max_clevel);
+
+size_t zstd_compress_bound(size_t src_size)
+{
+ return ZSTD_compressBound(src_size);
+}
+EXPORT_SYMBOL(zstd_compress_bound);
+
+zstd_parameters zstd_get_params(int level,
+ unsigned long long estimated_src_size)
+{
+ return ZSTD_getParams(level, estimated_src_size, 0);
+}
+EXPORT_SYMBOL(zstd_get_params);
+
+size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
+{
+ return ZSTD_estimateCCtxSize_usingCParams(*cparams);
+}
+EXPORT_SYMBOL(zstd_cctx_workspace_bound);
+
+zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
+{
+ if (workspace == NULL)
+ return NULL;
+ return ZSTD_initStaticCCtx(workspace, workspace_size);
+}
+EXPORT_SYMBOL(zstd_init_cctx);
+
+size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size, const zstd_parameters *parameters)
+{
+ ZSTD_FORWARD_IF_ERR(zstd_cctx_init(cctx, parameters, src_size));
+ return ZSTD_compress2(cctx, dst, dst_capacity, src, src_size);
+}
+EXPORT_SYMBOL(zstd_compress_cctx);
+
+size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
+{
+ return ZSTD_estimateCStreamSize_usingCParams(*cparams);
+}
+EXPORT_SYMBOL(zstd_cstream_workspace_bound);
+
+zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
+ unsigned long long pledged_src_size, void *workspace, size_t workspace_size)
+{
+ zstd_cstream *cstream;
+
+ if (workspace == NULL)
+ return NULL;
+
+ cstream = ZSTD_initStaticCStream(workspace, workspace_size);
+ if (cstream == NULL)
+ return NULL;
+
+ /* 0 means unknown in linux zstd API but means 0 in new zstd API */
+ if (pledged_src_size == 0)
+ pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
+
+ if (ZSTD_isError(zstd_cctx_init(cstream, parameters, pledged_src_size)))
+ return NULL;
+
+ return cstream;
+}
+EXPORT_SYMBOL(zstd_init_cstream);
+
+size_t zstd_reset_cstream(zstd_cstream *cstream,
+ unsigned long long pledged_src_size)
+{
+ return ZSTD_resetCStream(cstream, pledged_src_size);
+}
+EXPORT_SYMBOL(zstd_reset_cstream);
+
+size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
+ zstd_in_buffer *input)
+{
+ return ZSTD_compressStream(cstream, output, input);
+}
+EXPORT_SYMBOL(zstd_compress_stream);
+
+size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output)
+{
+ return ZSTD_flushStream(cstream, output);
+}
+EXPORT_SYMBOL(zstd_flush_stream);
+
+size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
+{
+ return ZSTD_endStream(cstream, output);
+}
+EXPORT_SYMBOL(zstd_end_stream);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Compressor");
diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
new file mode 100644
index 00000000000000..f4ed952ed4852a
--- /dev/null
+++ b/lib/zstd/zstd_decompress_module.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/zstd.h>
+
+#include "common/zstd_deps.h"
+
+/* Common symbols. zstd_compress must depend on zstd_decompress. */
+
+unsigned int zstd_is_error(size_t code)
+{
+ return ZSTD_isError(code);
+}
+EXPORT_SYMBOL(zstd_is_error);
+
+zstd_error_code zstd_get_error_code(size_t code)
+{
+ return ZSTD_getErrorCode(code);
+}
+EXPORT_SYMBOL(zstd_get_error_code);
+
+const char *zstd_get_error_name(size_t code)
+{
+ return ZSTD_getErrorName(code);
+}
+EXPORT_SYMBOL(zstd_get_error_name);
+
+/* Decompression symbols. */
+
+size_t zstd_dctx_workspace_bound(void)
+{
+ return ZSTD_estimateDCtxSize();
+}
+EXPORT_SYMBOL(zstd_dctx_workspace_bound);
+
+zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
+{
+ if (workspace == NULL)
+ return NULL;
+ return ZSTD_initStaticDCtx(workspace, workspace_size);
+}
+EXPORT_SYMBOL(zstd_init_dctx);
+
+size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size)
+{
+ return ZSTD_decompressDCtx(dctx, dst, dst_capacity, src, src_size);
+}
+EXPORT_SYMBOL(zstd_decompress_dctx);
+
+size_t zstd_dstream_workspace_bound(size_t max_window_size)
+{
+ return ZSTD_estimateDStreamSize(max_window_size);
+}
+EXPORT_SYMBOL(zstd_dstream_workspace_bound);
+
+zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
+ size_t workspace_size)
+{
+ if (workspace == NULL)
+ return NULL;
+ (void)max_window_size;
+ return ZSTD_initStaticDStream(workspace, workspace_size);
+}
+EXPORT_SYMBOL(zstd_init_dstream);
+
+size_t zstd_reset_dstream(zstd_dstream *dstream)
+{
+ return ZSTD_resetDStream(dstream);
+}
+EXPORT_SYMBOL(zstd_reset_dstream);
+
+size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
+ zstd_in_buffer *input)
+{
+ return ZSTD_decompressStream(dstream, output, input);
+}
+EXPORT_SYMBOL(zstd_decompress_stream);
+
+size_t zstd_find_frame_compressed_size(const void *src, size_t src_size)
+{
+ return ZSTD_findFrameCompressedSize(src, src_size);
+}
+EXPORT_SYMBOL(zstd_find_frame_compressed_size);
+
+size_t zstd_get_frame_header(zstd_frame_header *header, const void *src,
+ size_t src_size)
+{
+ return ZSTD_getFrameHeader(header, src, src_size);
+}
+EXPORT_SYMBOL(zstd_get_frame_header);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Decompressor");
diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
deleted file mode 100644
index dac753397f868d..00000000000000
--- a/lib/zstd/zstd_internal.h
+++ /dev/null
@@ -1,273 +0,0 @@
-/**
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-#ifndef ZSTD_CCOMMON_H_MODULE
-#define ZSTD_CCOMMON_H_MODULE
-
-/*-*******************************************************
-* Compiler specifics
-*********************************************************/
-#define FORCE_INLINE static __always_inline
-#define FORCE_NOINLINE static noinline
-
-/*-*************************************
-* Dependencies
-***************************************/
-#include "error_private.h"
-#include "mem.h"
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/xxhash.h>
-#include <linux/zstd.h>
-
-/*-*************************************
-* shared macros
-***************************************/
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-#define CHECK_F(f) \
- { \
- size_t const errcod = f; \
- if (ERR_isError(errcod)) \
- return errcod; \
- } /* check and Forward error code */
-#define CHECK_E(f, e) \
- { \
- size_t const errcod = f; \
- if (ERR_isError(errcod)) \
- return ERROR(e); \
- } /* check and send Error code */
-#define ZSTD_STATIC_ASSERT(c) \
- { \
- enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \
- }
-
-/*-*************************************
-* Common constants
-***************************************/
-#define ZSTD_OPT_NUM (1 << 12)
-#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */
-
-#define ZSTD_REP_NUM 3 /* number of repcodes */
-#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
-#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1)
-#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
-static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8};
-
-#define KB *(1 << 10)
-#define MB *(1 << 20)
-#define GB *(1U << 30)
-
-#define BIT7 128
-#define BIT6 64
-#define BIT5 32
-#define BIT4 16
-#define BIT1 2
-#define BIT0 1
-
-#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
-static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8};
-static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4};
-
-#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
-static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
-typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
-
-#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
-#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
-
-#define HufLog 12
-typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
-
-#define LONGNBSEQ 0x7F00
-
-#define MINMATCH 3
-#define EQUAL_READ32 4
-
-#define Litbits 8
-#define MaxLit ((1 << Litbits) - 1)
-#define MaxML 52
-#define MaxLL 35
-#define MaxOff 28
-#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
-#define MLFSELog 9
-#define LLFSELog 9
-#define OffFSELog 8
-
-static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
-static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1};
-#define LL_DEFAULTNORMLOG 6 /* for static allocation */
-static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
-
-static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
-static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
-#define ML_DEFAULTNORMLOG 6 /* for static allocation */
-static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
-
-static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1};
-#define OF_DEFAULTNORMLOG 5 /* for static allocation */
-static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
-
-/*-*******************************************
-* Shared functions to include for inlining
-*********************************************/
-ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) {
- /*
- * zstd relies heavily on gcc being able to analyze and inline this
- * memcpy() call, since it is called in a tight loop. Preboot mode
- * is compiled in freestanding mode, which stops gcc from analyzing
- * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a
- * regular memcpy().
- */
- __builtin_memcpy(dst, src, 8);
-}
-/*! ZSTD_wildcopy() :
-* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
-#define WILDCOPY_OVERLENGTH 8
-ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
-{
- const BYTE* ip = (const BYTE*)src;
- BYTE* op = (BYTE*)dst;
- BYTE* const oend = op + length;
-#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200
- /*
- * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388.
- * Avoid the bad case where the loop only runs once by handling the
- * special case separately. This doesn't trigger the bug because it
- * doesn't involve pointer/integer overflow.
- */
- if (length <= 8)
- return ZSTD_copy8(dst, src);
-#endif
- do {
- ZSTD_copy8(op, ip);
- op += 8;
- ip += 8;
- } while (op < oend);
-}
-
-/*-*******************************************
-* Private interfaces
-*********************************************/
-typedef struct ZSTD_stats_s ZSTD_stats_t;
-
-typedef struct {
- U32 off;
- U32 len;
-} ZSTD_match_t;
-
-typedef struct {
- U32 price;
- U32 off;
- U32 mlen;
- U32 litlen;
- U32 rep[ZSTD_REP_NUM];
-} ZSTD_optimal_t;
-
-typedef struct seqDef_s {
- U32 offset;
- U16 litLength;
- U16 matchLength;
-} seqDef;
-
-typedef struct {
- seqDef *sequencesStart;
- seqDef *sequences;
- BYTE *litStart;
- BYTE *lit;
- BYTE *llCode;
- BYTE *mlCode;
- BYTE *ofCode;
- U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
- U32 longLengthPos;
- /* opt */
- ZSTD_optimal_t *priceTable;
- ZSTD_match_t *matchTable;
- U32 *matchLengthFreq;
- U32 *litLengthFreq;
- U32 *litFreq;
- U32 *offCodeFreq;
- U32 matchLengthSum;
- U32 matchSum;
- U32 litLengthSum;
- U32 litSum;
- U32 offCodeSum;
- U32 log2matchLengthSum;
- U32 log2matchSum;
- U32 log2litLengthSum;
- U32 log2litSum;
- U32 log2offCodeSum;
- U32 factor;
- U32 staticPrices;
- U32 cachedPrice;
- U32 cachedLitLength;
- const BYTE *cachedLiterals;
-} seqStore_t;
-
-const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx);
-void ZSTD_seqToCodes(const seqStore_t *seqStorePtr);
-int ZSTD_isSkipFrame(ZSTD_DCtx *dctx);
-
-/*= Custom memory allocation functions */
-typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size);
-typedef void (*ZSTD_freeFunction)(void *opaque, void *address);
-typedef struct {
- ZSTD_allocFunction customAlloc;
- ZSTD_freeFunction customFree;
- void *opaque;
-} ZSTD_customMem;
-
-void *ZSTD_malloc(size_t size, ZSTD_customMem customMem);
-void ZSTD_free(void *ptr, ZSTD_customMem customMem);
-
-/*====== stack allocation ======*/
-
-typedef struct {
- void *ptr;
- const void *end;
-} ZSTD_stack;
-
-#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t))
-#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t))
-
-ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize);
-
-void *ZSTD_stackAllocAll(void *opaque, size_t *size);
-void *ZSTD_stackAlloc(void *opaque, size_t size);
-void ZSTD_stackFree(void *opaque, void *address);
-
-/*====== common function ======*/
-
-ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); }
-
-/* hidden functions */
-
-/* ZSTD_invalidateRepCodes() :
- * ensures next compression will not use repcodes from previous block.
- * Note : only works with regular variant;
- * do not use with extDict variant ! */
-void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx);
-
-size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx);
-size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx);
-size_t ZSTD_freeCDict(ZSTD_CDict *cdict);
-size_t ZSTD_freeDDict(ZSTD_DDict *cdict);
-size_t ZSTD_freeCStream(ZSTD_CStream *zcs);
-size_t ZSTD_freeDStream(ZSTD_DStream *zds);
-
-#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h
deleted file mode 100644
index 55e1b4cba8088c..00000000000000
--- a/lib/zstd/zstd_opt.h
+++ /dev/null
@@ -1,1014 +0,0 @@
-/**
- * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the BSD-style license found in the
- * LICENSE file in the root directory of https://github.com/facebook/zstd.
- * An additional grant of patent rights can be found in the PATENTS file in the
- * same directory.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation. This program is dual-licensed; you may select
- * either version 2 of the GNU General Public License ("GPL") or BSD license
- * ("BSD").
- */
-
-/* Note : this file is intended to be included within zstd_compress.c */
-
-#ifndef ZSTD_OPT_H_91842398743
-#define ZSTD_OPT_H_91842398743
-
-#define ZSTD_LITFREQ_ADD 2
-#define ZSTD_FREQ_DIV 4
-#define ZSTD_MAX_PRICE (1 << 30)
-
-/*-*************************************
-* Price functions for optimal parser
-***************************************/
-FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr)
-{
- ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1);
- ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1);
- ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1);
- ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1);
- ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum));
-}
-
-ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize)
-{
- unsigned u;
-
- ssPtr->cachedLiterals = NULL;
- ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
- ssPtr->staticPrices = 0;
-
- if (ssPtr->litLengthSum == 0) {
- if (srcSize <= 1024)
- ssPtr->staticPrices = 1;
-
- for (u = 0; u <= MaxLit; u++)
- ssPtr->litFreq[u] = 0;
- for (u = 0; u < srcSize; u++)
- ssPtr->litFreq[src[u]]++;
-
- ssPtr->litSum = 0;
- ssPtr->litLengthSum = MaxLL + 1;
- ssPtr->matchLengthSum = MaxML + 1;
- ssPtr->offCodeSum = (MaxOff + 1);
- ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits);
-
- for (u = 0; u <= MaxLit; u++) {
- ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV);
- ssPtr->litSum += ssPtr->litFreq[u];
- }
- for (u = 0; u <= MaxLL; u++)
- ssPtr->litLengthFreq[u] = 1;
- for (u = 0; u <= MaxML; u++)
- ssPtr->matchLengthFreq[u] = 1;
- for (u = 0; u <= MaxOff; u++)
- ssPtr->offCodeFreq[u] = 1;
- } else {
- ssPtr->matchLengthSum = 0;
- ssPtr->litLengthSum = 0;
- ssPtr->offCodeSum = 0;
- ssPtr->matchSum = 0;
- ssPtr->litSum = 0;
-
- for (u = 0; u <= MaxLit; u++) {
- ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1));
- ssPtr->litSum += ssPtr->litFreq[u];
- }
- for (u = 0; u <= MaxLL; u++) {
- ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1));
- ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
- }
- for (u = 0; u <= MaxML; u++) {
- ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV);
- ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
- ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
- }
- ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
- for (u = 0; u <= MaxOff; u++) {
- ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV);
- ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
- }
- }
-
- ZSTD_setLog2Prices(ssPtr);
-}
-
-FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals)
-{
- U32 price, u;
-
- if (ssPtr->staticPrices)
- return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6);
-
- if (litLength == 0)
- return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1);
-
- /* literals */
- if (ssPtr->cachedLiterals == literals) {
- U32 const additional = litLength - ssPtr->cachedLitLength;
- const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
- price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
- for (u = 0; u < additional; u++)
- price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1);
- ssPtr->cachedPrice = price;
- ssPtr->cachedLitLength = litLength;
- } else {
- price = litLength * ssPtr->log2litSum;
- for (u = 0; u < litLength; u++)
- price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1);
-
- if (litLength >= 12) {
- ssPtr->cachedLiterals = literals;
- ssPtr->cachedPrice = price;
- ssPtr->cachedLitLength = litLength;
- }
- }
-
- /* literal Length */
- {
- const BYTE LL_deltaCode = 19;
- const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
- price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1);
- }
-
- return price;
-}
-
-FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra)
-{
- /* offset */
- U32 price;
- BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
-
- if (seqStorePtr->staticPrices)
- return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode;
-
- price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1);
- if (!ultra && offCode >= 20)
- price += (offCode - 19) * 2;
-
- /* match Length */
- {
- const BYTE ML_deltaCode = 36;
- const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
- price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1);
- }
-
- return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
-}
-
-ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength)
-{
- U32 u;
-
- /* literals */
- seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD;
- for (u = 0; u < litLength; u++)
- seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
-
- /* literal Length */
- {
- const BYTE LL_deltaCode = 19;
- const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
- seqStorePtr->litLengthFreq[llCode]++;
- seqStorePtr->litLengthSum++;
- }
-
- /* match offset */
- {
- BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
- seqStorePtr->offCodeSum++;
- seqStorePtr->offCodeFreq[offCode]++;
- }
-
- /* match Length */
- {
- const BYTE ML_deltaCode = 36;
- const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
- seqStorePtr->matchLengthFreq[mlCode]++;
- seqStorePtr->matchLengthSum++;
- }
-
- ZSTD_setLog2Prices(seqStorePtr);
-}
-
-#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \
- { \
- while (last_pos < pos) { \
- opt[last_pos + 1].price = ZSTD_MAX_PRICE; \
- last_pos++; \
- } \
- opt[pos].mlen = mlen_; \
- opt[pos].off = offset_; \
- opt[pos].litlen = litlen_; \
- opt[pos].price = price_; \
- }
-
-/* Update hashTable3 up to ip (excluded)
- Assumption : always within prefix (i.e. not within extDict) */
-FORCE_INLINE
-U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip)
-{
- U32 *const hashTable3 = zc->hashTable3;
- U32 const hashLog3 = zc->hashLog3;
- const BYTE *const base = zc->base;
- U32 idx = zc->nextToUpdate3;
- const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
- const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
-
- while (idx < target) {
- hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx;
- idx++;
- }
-
- return hashTable3[hash3];
-}
-
-/*-*************************************
-* Binary Tree search
-***************************************/
-static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict,
- ZSTD_match_t *matches, const U32 minMatchLen)
-{
- const BYTE *const base = zc->base;
- const U32 curr = (U32)(ip - base);
- const U32 hashLog = zc->params.cParams.hashLog;
- const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
- U32 *const hashTable = zc->hashTable;
- U32 matchIndex = hashTable[h];
- U32 *const bt = zc->chainTable;
- const U32 btLog = zc->params.cParams.chainLog - 1;
- const U32 btMask = (1U << btLog) - 1;
- size_t commonLengthSmaller = 0, commonLengthLarger = 0;
- const BYTE *const dictBase = zc->dictBase;
- const U32 dictLimit = zc->dictLimit;
- const BYTE *const dictEnd = dictBase + dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const U32 btLow = btMask >= curr ? 0 : curr - btMask;
- const U32 windowLow = zc->lowLimit;
- U32 *smallerPtr = bt + 2 * (curr & btMask);
- U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
- U32 matchEndIdx = curr + 8;
- U32 dummy32; /* to be nullified at the end */
- U32 mnum = 0;
-
- const U32 minMatch = (mls == 3) ? 3 : 4;
- size_t bestLength = minMatchLen - 1;
-
- if (minMatch == 3) { /* HC3 match finder */
- U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip);
- if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) {
- const BYTE *match;
- size_t currMl = 0;
- if ((!extDict) || matchIndex3 >= dictLimit) {
- match = base + matchIndex3;
- if (match[bestLength] == ip[bestLength])
- currMl = ZSTD_count(ip, match, iLimit);
- } else {
- match = dictBase + matchIndex3;
- if (ZSTD_readMINMATCH(match, MINMATCH) ==
- ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
- currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
- }
-
- /* save best solution */
- if (currMl > bestLength) {
- bestLength = currMl;
- matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3;
- matches[mnum].len = (U32)currMl;
- mnum++;
- if (currMl > ZSTD_OPT_NUM)
- goto update;
- if (ip + currMl == iLimit)
- goto update; /* best possible, and avoid read overflow*/
- }
- }
- }
-
- hashTable[h] = curr; /* Update Hash Table */
-
- while (nbCompares-- && (matchIndex > windowLow)) {
- U32 *nextPtr = bt + 2 * (matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- const BYTE *match;
-
- if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
- match = base + matchIndex;
- if (match[matchLength] == ip[matchLength]) {
- matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1;
- }
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart);
- if (matchIndex + matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- bestLength = matchLength;
- matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex;
- matches[mnum].len = (U32)matchLength;
- mnum++;
- if (matchLength > ZSTD_OPT_NUM)
- break;
- if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */
- break; /* drop, to guarantee consistency (miss a little bit of compression) */
- }
-
- if (match[matchLength] < ip[matchLength]) {
- /* match is smaller than curr */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) {
- smallerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */
- } else {
- /* match is larger than curr */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) {
- largerPtr = &dummy32;
- break;
- } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- }
- }
-
- *smallerPtr = *largerPtr = 0;
-
-update:
- zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
- return mnum;
-}
-
-/** Tree updater, providing best match */
-static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches,
- const U32 minMatchLen)
-{
- if (ip < zc->base + zc->nextToUpdate)
- return 0; /* skipped area */
- ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
-}
-
-static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
- ZSTD_match_t *matches, const U32 minMatchLen)
-{
- switch (matchLengthSearch) {
- case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
- default:
- case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
- case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
- case 7:
- case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
- }
-}
-
-/** Tree updater, providing best match */
-static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls,
- ZSTD_match_t *matches, const U32 minMatchLen)
-{
- if (ip < zc->base + zc->nextToUpdate)
- return 0; /* skipped area */
- ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
- return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
-}
-
-static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
- const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
- ZSTD_match_t *matches, const U32 minMatchLen)
-{
- switch (matchLengthSearch) {
- case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
- default:
- case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
- case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
- case 7:
- case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
- }
-}
-
-/*-*******************************
-* Optimal parser
-*********************************/
-FORCE_INLINE
-void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
-{
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- const BYTE *const base = ctx->base;
- const BYTE *const prefixStart = base + ctx->dictLimit;
-
- const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
- const U32 sufficient_len = ctx->params.cParams.targetLength;
- const U32 mls = ctx->params.cParams.searchLength;
- const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
-
- ZSTD_optimal_t *opt = seqStorePtr->priceTable;
- ZSTD_match_t *matches = seqStorePtr->matchTable;
- const BYTE *inr;
- U32 offset, rep[ZSTD_REP_NUM];
-
- /* init */
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
- ip += (ip == prefixStart);
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- rep[i] = ctx->rep[i];
- }
-
- /* Match Loop */
- while (ip < ilimit) {
- U32 cur, match_num, last_pos, litlen, price;
- U32 u, mlen, best_mlen, best_off, litLength;
- memset(opt, 0, sizeof(ZSTD_optimal_t));
- last_pos = 0;
- litlen = (U32)(ip - anchor);
-
- /* check repCode */
- {
- U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
- for (i = (ip == anchor); i < last_i; i++) {
- const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
- if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) &&
- (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
- mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch;
- if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen;
- best_off = i;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
- best_off = i - (ip == anchor);
- do {
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
- mlen--;
- } while (mlen >= minMatch);
- }
- }
- }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
-
- if (!last_pos && !match_num) {
- ip++;
- continue;
- }
-
- if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num - 1].len;
- best_off = matches[match_num - 1].off;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
-
- /* set prices using matches at position = 0 */
- best_mlen = (last_pos) ? last_pos : minMatch;
- for (u = 0; u < match_num; u++) {
- mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
- best_mlen = matches[u].len;
- while (mlen <= best_mlen) {
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
- mlen++;
- }
- }
-
- if (last_pos < minMatch) {
- ip++;
- continue;
- }
-
- /* initialize opt[0] */
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- opt[0].rep[i] = rep[i];
- }
- opt[0].mlen = 1;
- opt[0].litlen = litlen;
-
- /* check further positions */
- for (cur = 1; cur <= last_pos; cur++) {
- inr = ip + cur;
-
- if (opt[cur - 1].mlen == 1) {
- litlen = opt[cur - 1].litlen + 1;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
- } else
- price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
- } else {
- litlen = 1;
- price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
- }
-
- if (cur > last_pos || price <= opt[cur].price)
- SET_PRICE(cur, 1, 0, litlen, price);
-
- if (cur == last_pos)
- break;
-
- if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
- continue;
-
- mlen = opt[cur].mlen;
- if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
- opt[cur].rep[2] = opt[cur - mlen].rep[1];
- opt[cur].rep[1] = opt[cur - mlen].rep[0];
- opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
- } else {
- opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
- opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
- opt[cur].rep[0] =
- ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
- }
-
- best_mlen = minMatch;
- {
- U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
- for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */
- const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
- if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) &&
- (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
- mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch;
-
- if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen;
- best_off = i;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- best_off = i - (opt[cur].mlen != 1);
- if (mlen > best_mlen)
- best_mlen = mlen;
-
- do {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
- best_off, mlen - MINMATCH, ultra);
- } else
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
- SET_PRICE(cur + mlen, mlen, i, litlen, price);
- mlen--;
- } while (mlen >= minMatch);
- }
- }
- }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
-
- if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num - 1].len;
- best_off = matches[match_num - 1].off;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- /* set prices using matches at position = cur */
- for (u = 0; u < match_num; u++) {
- mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
- best_mlen = matches[u].len;
-
- while (mlen <= best_mlen) {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen)
- price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
- matches[u].off - 1, mlen - MINMATCH, ultra);
- else
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
- SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
-
- mlen++;
- }
- }
- }
-
- best_mlen = opt[last_pos].mlen;
- best_off = opt[last_pos].off;
- cur = last_pos - best_mlen;
-
- /* store sequence */
-_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
- opt[0].mlen = 1;
-
- while (1) {
- mlen = opt[cur].mlen;
- offset = opt[cur].off;
- opt[cur].mlen = best_mlen;
- opt[cur].off = best_off;
- best_mlen = mlen;
- best_off = offset;
- if (mlen > cur)
- break;
- cur -= mlen;
- }
-
- for (u = 0; u <= last_pos;) {
- u += opt[u].mlen;
- }
-
- for (cur = 0; cur < last_pos;) {
- mlen = opt[cur].mlen;
- if (mlen == 1) {
- ip++;
- cur++;
- continue;
- }
- offset = opt[cur].off;
- cur += mlen;
- litLength = (U32)(ip - anchor);
-
- if (offset > ZSTD_REP_MOVE_OPT) {
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = offset - ZSTD_REP_MOVE_OPT;
- offset--;
- } else {
- if (offset != 0) {
- best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
- if (offset != 1)
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = best_off;
- }
- if (litLength == 0)
- offset--;
- }
-
- ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
- anchor = ip = ip + mlen;
- }
- } /* for (cur=0; cur < last_pos; ) */
-
- /* Save reps for next block */
- {
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- ctx->repToConfirm[i] = rep[i];
- }
-
- /* Last Literals */
- {
- size_t const lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-FORCE_INLINE
-void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
-{
- seqStore_t *seqStorePtr = &(ctx->seqStore);
- const BYTE *const istart = (const BYTE *)src;
- const BYTE *ip = istart;
- const BYTE *anchor = istart;
- const BYTE *const iend = istart + srcSize;
- const BYTE *const ilimit = iend - 8;
- const BYTE *const base = ctx->base;
- const U32 lowestIndex = ctx->lowLimit;
- const U32 dictLimit = ctx->dictLimit;
- const BYTE *const prefixStart = base + dictLimit;
- const BYTE *const dictBase = ctx->dictBase;
- const BYTE *const dictEnd = dictBase + dictLimit;
-
- const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
- const U32 sufficient_len = ctx->params.cParams.targetLength;
- const U32 mls = ctx->params.cParams.searchLength;
- const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
-
- ZSTD_optimal_t *opt = seqStorePtr->priceTable;
- ZSTD_match_t *matches = seqStorePtr->matchTable;
- const BYTE *inr;
-
- /* init */
- U32 offset, rep[ZSTD_REP_NUM];
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- rep[i] = ctx->rep[i];
- }
-
- ctx->nextToUpdate3 = ctx->nextToUpdate;
- ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
- ip += (ip == prefixStart);
-
- /* Match Loop */
- while (ip < ilimit) {
- U32 cur, match_num, last_pos, litlen, price;
- U32 u, mlen, best_mlen, best_off, litLength;
- U32 curr = (U32)(ip - base);
- memset(opt, 0, sizeof(ZSTD_optimal_t));
- last_pos = 0;
- opt[0].litlen = (U32)(ip - anchor);
-
- /* check repCode */
- {
- U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
- for (i = (ip == anchor); i < last_i; i++) {
- const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
- const U32 repIndex = (U32)(curr - repCur);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if ((repCur > 0 && repCur <= (S32)curr) &&
- (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
- /* repcode detected we should take it */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
-
- if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen;
- best_off = i;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
-
- best_off = i - (ip == anchor);
- litlen = opt[0].litlen;
- do {
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
- mlen--;
- } while (mlen >= minMatch);
- }
- }
- }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
-
- if (!last_pos && !match_num) {
- ip++;
- continue;
- }
-
- {
- U32 i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- opt[0].rep[i] = rep[i];
- }
- opt[0].mlen = 1;
-
- if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num - 1].len;
- best_off = matches[match_num - 1].off;
- cur = 0;
- last_pos = 1;
- goto _storeSequence;
- }
-
- best_mlen = (last_pos) ? last_pos : minMatch;
-
- /* set prices using matches at position = 0 */
- for (u = 0; u < match_num; u++) {
- mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
- best_mlen = matches[u].len;
- litlen = opt[0].litlen;
- while (mlen <= best_mlen) {
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
- if (mlen > last_pos || price < opt[mlen].price)
- SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
- mlen++;
- }
- }
-
- if (last_pos < minMatch) {
- ip++;
- continue;
- }
-
- /* check further positions */
- for (cur = 1; cur <= last_pos; cur++) {
- inr = ip + cur;
-
- if (opt[cur - 1].mlen == 1) {
- litlen = opt[cur - 1].litlen + 1;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
- } else
- price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
- } else {
- litlen = 1;
- price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
- }
-
- if (cur > last_pos || price <= opt[cur].price)
- SET_PRICE(cur, 1, 0, litlen, price);
-
- if (cur == last_pos)
- break;
-
- if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
- continue;
-
- mlen = opt[cur].mlen;
- if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
- opt[cur].rep[2] = opt[cur - mlen].rep[1];
- opt[cur].rep[1] = opt[cur - mlen].rep[0];
- opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
- } else {
- opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
- opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
- opt[cur].rep[0] =
- ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
- }
-
- best_mlen = minMatch;
- {
- U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
- for (i = (mlen != 1); i < last_i; i++) {
- const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
- const U32 repIndex = (U32)(curr + cur - repCur);
- const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE *const repMatch = repBase + repIndex;
- if ((repCur > 0 && repCur <= (S32)(curr + cur)) &&
- (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
- && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
- /* repcode detected */
- const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
- mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
-
- if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
- best_mlen = mlen;
- best_off = i;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- best_off = i - (opt[cur].mlen != 1);
- if (mlen > best_mlen)
- best_mlen = mlen;
-
- do {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
- best_off, mlen - MINMATCH, ultra);
- } else
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
- SET_PRICE(cur + mlen, mlen, i, litlen, price);
- mlen--;
- } while (mlen >= minMatch);
- }
- }
- }
-
- match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
-
- if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
- best_mlen = matches[match_num - 1].len;
- best_off = matches[match_num - 1].off;
- last_pos = cur + 1;
- goto _storeSequence;
- }
-
- /* set prices using matches at position = cur */
- for (u = 0; u < match_num; u++) {
- mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
- best_mlen = matches[u].len;
-
- while (mlen <= best_mlen) {
- if (opt[cur].mlen == 1) {
- litlen = opt[cur].litlen;
- if (cur > litlen)
- price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
- matches[u].off - 1, mlen - MINMATCH, ultra);
- else
- price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
- } else {
- litlen = 0;
- price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
- }
-
- if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
- SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
-
- mlen++;
- }
- }
- } /* for (cur = 1; cur <= last_pos; cur++) */
-
- best_mlen = opt[last_pos].mlen;
- best_off = opt[last_pos].off;
- cur = last_pos - best_mlen;
-
- /* store sequence */
-_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
- opt[0].mlen = 1;
-
- while (1) {
- mlen = opt[cur].mlen;
- offset = opt[cur].off;
- opt[cur].mlen = best_mlen;
- opt[cur].off = best_off;
- best_mlen = mlen;
- best_off = offset;
- if (mlen > cur)
- break;
- cur -= mlen;
- }
-
- for (u = 0; u <= last_pos;) {
- u += opt[u].mlen;
- }
-
- for (cur = 0; cur < last_pos;) {
- mlen = opt[cur].mlen;
- if (mlen == 1) {
- ip++;
- cur++;
- continue;
- }
- offset = opt[cur].off;
- cur += mlen;
- litLength = (U32)(ip - anchor);
-
- if (offset > ZSTD_REP_MOVE_OPT) {
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = offset - ZSTD_REP_MOVE_OPT;
- offset--;
- } else {
- if (offset != 0) {
- best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
- if (offset != 1)
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = best_off;
- }
-
- if (litLength == 0)
- offset--;
- }
-
- ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
- anchor = ip = ip + mlen;
- }
- } /* for (cur=0; cur < last_pos; ) */
-
- /* Save reps for next block */
- {
- int i;
- for (i = 0; i < ZSTD_REP_NUM; i++)
- ctx->repToConfirm[i] = rep[i];
- }
-
- /* Last Literals */
- {
- size_t lastLLSize = iend - anchor;
- memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
- }
-}
-
-#endif /* ZSTD_OPT_H_91842398743 */
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index dc892119e88f48..7355cb534e4f8a 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -106,6 +106,16 @@ static int __init early_kasan_flag_stacktrace(char *arg)
}
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
+static inline const char *kasan_mode_info(void)
+{
+ if (kasan_mode == KASAN_MODE_ASYNC)
+ return "async";
+ else if (kasan_mode == KASAN_MODE_ASYMM)
+ return "asymm";
+ else
+ return "sync";
+}
+
/* kasan_init_hw_tags_cpu() is called for each CPU. */
void kasan_init_hw_tags_cpu(void)
{
@@ -177,7 +187,9 @@ void __init kasan_init_hw_tags(void)
break;
}
- pr_info("KernelAddressSanitizer initialized\n");
+ pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, stacktrace=%s)\n",
+ kasan_mode_info(),
+ kasan_stack_collection_enabled() ? "on" : "off");
}
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index bd3f540feb4728..77f13f391b577a 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -42,7 +42,7 @@ void __init kasan_init_sw_tags(void)
for_each_possible_cpu(cpu)
per_cpu(prng_state, cpu) = (u32)get_cycles();
- pr_info("KernelAddressSanitizer initialized\n");
+ pr_info("KernelAddressSanitizer initialized (sw-tags)\n");
}
/*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 508bcea7df5601..781605e920153c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2058,13 +2058,11 @@ again:
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
}
-EXPORT_SYMBOL(folio_memcg_lock);
void lock_page_memcg(struct page *page)
{
folio_memcg_lock(page_folio(page));
}
-EXPORT_SYMBOL(lock_page_memcg);
static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{
@@ -2092,13 +2090,11 @@ void folio_memcg_unlock(struct folio *folio)
{
__folio_memcg_unlock(folio_memcg(folio));
}
-EXPORT_SYMBOL(folio_memcg_unlock);
void unlock_page_memcg(struct page *page)
{
folio_memcg_unlock(page_folio(page));
}
-EXPORT_SYMBOL(unlock_page_memcg);
struct obj_stock {
#ifdef CONFIG_MEMCG_KMEM
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index f64ebb6226cbf6..07c875fdeaf0ca 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -58,7 +58,6 @@
#include <linux/ratelimit.h>
#include <linux/page-isolation.h>
#include <linux/pagewalk.h>
-#include <linux/shmem_fs.h>
#include "internal.h"
#include "ras/ras_event.h"
@@ -868,7 +867,6 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
{
int ret;
struct address_space *mapping;
- bool extra_pins;
delete_from_lru_cache(p);
@@ -898,23 +896,17 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
}
/*
- * The shmem page is kept in page cache instead of truncating
- * so is expected to have an extra refcount after error-handling.
- */
- extra_pins = shmem_mapping(mapping);
-
- /*
* Truncation is a bit tricky. Enable it per file system for now.
*
* Open: to take i_rwsem or not for this? Right now we don't.
*/
ret = truncate_error_page(p, page_to_pfn(p), mapping);
- if (has_extra_refcount(ps, p, extra_pins))
- ret = MF_FAILED;
-
out:
unlock_page(p);
+ if (has_extra_refcount(ps, p, false))
+ ret = MF_FAILED;
+
return ret;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index a11e948593df02..cf25b00f03c8e9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -404,12 +404,6 @@ int folio_migrate_mapping(struct address_space *mapping,
newzone = folio_zone(newfolio);
xas_lock_irq(&xas);
- if (folio_ref_count(folio) != expected_count ||
- xas_load(&xas) != folio) {
- xas_unlock_irq(&xas);
- return -EAGAIN;
- }
-
if (!folio_ref_freeze(folio, expected_count)) {
xas_unlock_irq(&xas);
return -EAGAIN;
@@ -2368,7 +2362,6 @@ again:
* can't be dropped from it).
*/
get_page(page);
- migrate->cpages++;
/*
* Optimize for the common case where page is only mapped once
@@ -2378,7 +2371,7 @@ again:
if (trylock_page(page)) {
pte_t swp_pte;
- mpfn |= MIGRATE_PFN_LOCKED;
+ migrate->cpages++;
ptep_get_and_clear(mm, addr, ptep);
/* Setup special migration page table entry */
@@ -2412,6 +2405,9 @@ again:
if (pte_present(pte))
unmapped++;
+ } else {
+ put_page(page);
+ mpfn = 0;
}
next:
@@ -2516,15 +2512,17 @@ static bool migrate_vma_check_page(struct page *page)
}
/*
- * migrate_vma_prepare() - lock pages and isolate them from the lru
+ * migrate_vma_unmap() - replace page mapping with special migration pte entry
* @migrate: migrate struct containing all migration information
*
- * This locks pages that have been collected by migrate_vma_collect(). Once each
- * page is locked it is isolated from the lru (for non-device pages). Finally,
- * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
- * migrated by concurrent kernel threads.
+ * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
+ * special migration pte entry and check if it has been pinned. Pinned pages are
+ * restored because we cannot migrate them.
+ *
+ * This is the last step before we call the device driver callback to allocate
+ * destination memory and copy contents of original page over to new page.
*/
-static void migrate_vma_prepare(struct migrate_vma *migrate)
+static void migrate_vma_unmap(struct migrate_vma *migrate)
{
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
@@ -2533,32 +2531,12 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
lru_add_drain();
- for (i = 0; (i < npages) && migrate->cpages; i++) {
+ for (i = 0; i < npages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
- bool remap = true;
if (!page)
continue;
- if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
- /*
- * Because we are migrating several pages there can be
- * a deadlock between 2 concurrent migration where each
- * are waiting on each other page lock.
- *
- * Make migrate_vma() a best effort thing and backoff
- * for any page we can not lock right away.
- */
- if (!trylock_page(page)) {
- migrate->src[i] = 0;
- migrate->cpages--;
- put_page(page);
- continue;
- }
- remap = false;
- migrate->src[i] |= MIGRATE_PFN_LOCKED;
- }
-
/* ZONE_DEVICE pages are not on LRU */
if (!is_zone_device_page(page)) {
if (!PageLRU(page) && allow_drain) {
@@ -2568,16 +2546,9 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
}
if (isolate_lru_page(page)) {
- if (remap) {
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
- migrate->cpages--;
- restore++;
- } else {
- migrate->src[i] = 0;
- unlock_page(page);
- migrate->cpages--;
- put_page(page);
- }
+ migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+ migrate->cpages--;
+ restore++;
continue;
}
@@ -2585,80 +2556,20 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
put_page(page);
}
- if (!migrate_vma_check_page(page)) {
- if (remap) {
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
- migrate->cpages--;
- restore++;
-
- if (!is_zone_device_page(page)) {
- get_page(page);
- putback_lru_page(page);
- }
- } else {
- migrate->src[i] = 0;
- unlock_page(page);
- migrate->cpages--;
+ if (page_mapped(page))
+ try_to_migrate(page, 0);
- if (!is_zone_device_page(page))
- putback_lru_page(page);
- else
- put_page(page);
+ if (page_mapped(page) || !migrate_vma_check_page(page)) {
+ if (!is_zone_device_page(page)) {
+ get_page(page);
+ putback_lru_page(page);
}
- }
- }
-
- for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
- struct page *page = migrate_pfn_to_page(migrate->src[i]);
-
- if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- remove_migration_pte(page, migrate->vma, addr, page);
-
- migrate->src[i] = 0;
- unlock_page(page);
- put_page(page);
- restore--;
- }
-}
-
-/*
- * migrate_vma_unmap() - replace page mapping with special migration pte entry
- * @migrate: migrate struct containing all migration information
- *
- * Replace page mapping (CPU page table pte) with a special migration pte entry
- * and check again if it has been pinned. Pinned pages are restored because we
- * cannot migrate them.
- *
- * This is the last step before we call the device driver callback to allocate
- * destination memory and copy contents of original page over to new page.
- */
-static void migrate_vma_unmap(struct migrate_vma *migrate)
-{
- const unsigned long npages = migrate->npages;
- const unsigned long start = migrate->start;
- unsigned long addr, i, restore = 0;
-
- for (i = 0; i < npages; i++) {
- struct page *page = migrate_pfn_to_page(migrate->src[i]);
- if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
+ migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+ migrate->cpages--;
+ restore++;
continue;
-
- if (page_mapped(page)) {
- try_to_migrate(page, 0);
- if (page_mapped(page))
- goto restore;
}
-
- if (migrate_vma_check_page(page))
- continue;
-
-restore:
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
- migrate->cpages--;
- restore++;
}
for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
@@ -2671,12 +2582,8 @@ restore:
migrate->src[i] = 0;
unlock_page(page);
+ put_page(page);
restore--;
-
- if (is_zone_device_page(page))
- put_page(page);
- else
- putback_lru_page(page);
}
}
@@ -2699,8 +2606,8 @@ restore:
* it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
* flag set). Once these are allocated and copied, the caller must update each
* corresponding entry in the dst array with the pfn value of the destination
- * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
- * (destination pages must have their struct pages locked, via lock_page()).
+ * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
+ * lock_page().
*
* Note that the caller does not have to migrate all the pages that are marked
* with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
@@ -2770,8 +2677,6 @@ int migrate_vma_setup(struct migrate_vma *args)
migrate_vma_collect(args);
if (args->cpages)
- migrate_vma_prepare(args);
- if (args->cpages)
migrate_vma_unmap(args);
/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2d498bb622484a..a613f8ef6a0209 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2967,7 +2967,7 @@ EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
*/
void folio_wait_stable(struct folio *folio)
{
- if (folio->mapping->host->i_sb->s_iflags & SB_I_STABLE_WRITES)
+ if (folio_inode(folio)->i_sb->s_iflags & SB_I_STABLE_WRITES)
folio_wait_writeback(folio);
}
EXPORT_SYMBOL_GPL(folio_wait_stable);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 79936db598593a..4f924957ce7a09 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -125,7 +125,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
return handle;
}
-void __reset_page_owner(struct page *page, unsigned int order)
+void __reset_page_owner(struct page *page, unsigned short order)
{
int i;
struct page_ext *page_ext;
@@ -149,7 +149,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
static inline void __set_page_owner_handle(struct page_ext *page_ext,
depot_stack_handle_t handle,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
struct page_owner *page_owner;
int i;
@@ -169,7 +169,7 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext,
}
}
-noinline void __set_page_owner(struct page *page, unsigned int order,
+noinline void __set_page_owner(struct page *page, unsigned short order,
gfp_t gfp_mask)
{
struct page_ext *page_ext = lookup_page_ext(page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 23c91a8beb781d..dc038ce78700cc 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2456,7 +2456,6 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t index = pos >> PAGE_SHIFT;
- int ret = 0;
/* i_rwsem is held by caller */
if (unlikely(info->seals & (F_SEAL_GROW |
@@ -2467,15 +2466,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
return -EPERM;
}
- ret = shmem_getpage(inode, index, pagep, SGP_WRITE);
-
- if (*pagep && PageHWPoison(*pagep)) {
- unlock_page(*pagep);
- put_page(*pagep);
- ret = -EIO;
- }
-
- return ret;
+ return shmem_getpage(inode, index, pagep, SGP_WRITE);
}
static int
@@ -2562,12 +2553,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (sgp == SGP_CACHE)
set_page_dirty(page);
unlock_page(page);
-
- if (PageHWPoison(page)) {
- put_page(page);
- error = -EIO;
- break;
- }
}
/*
@@ -2960,28 +2945,6 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
return shmem_unlink(dir, dentry);
}
-static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
-{
- bool old_is_dir = d_is_dir(old_dentry);
- bool new_is_dir = d_is_dir(new_dentry);
-
- if (old_dir != new_dir && old_is_dir != new_is_dir) {
- if (old_is_dir) {
- drop_nlink(old_dir);
- inc_nlink(new_dir);
- } else {
- drop_nlink(new_dir);
- inc_nlink(old_dir);
- }
- }
- old_dir->i_ctime = old_dir->i_mtime =
- new_dir->i_ctime = new_dir->i_mtime =
- d_inode(old_dentry)->i_ctime =
- d_inode(new_dentry)->i_ctime = current_time(old_dir);
-
- return 0;
-}
-
static int shmem_whiteout(struct user_namespace *mnt_userns,
struct inode *old_dir, struct dentry *old_dentry)
{
@@ -3027,7 +2990,7 @@ static int shmem_rename2(struct user_namespace *mnt_userns,
return -EINVAL;
if (flags & RENAME_EXCHANGE)
- return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
+ return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
if (!simple_empty(new_dentry))
return -ENOTEMPTY;
@@ -3129,8 +3092,7 @@ static const char *shmem_get_link(struct dentry *dentry,
page = find_get_page(inode->i_mapping, 0);
if (!page)
return ERR_PTR(-ECHILD);
- if (PageHWPoison(page) ||
- !PageUptodate(page)) {
+ if (!PageUptodate(page)) {
put_page(page);
return ERR_PTR(-ECHILD);
}
@@ -3138,11 +3100,6 @@ static const char *shmem_get_link(struct dentry *dentry,
error = shmem_getpage(inode, 0, &page, SGP_READ);
if (error)
return ERR_PTR(error);
- if (page && PageHWPoison(page)) {
- unlock_page(page);
- put_page(page);
- return ERR_PTR(-ECHILD);
- }
unlock_page(page);
}
set_delayed_call(done, shmem_put_link, page);
@@ -3793,13 +3750,6 @@ static void shmem_destroy_inodecache(void)
kmem_cache_destroy(shmem_inode_cachep);
}
-/* Keep the page in page cache instead of truncating it */
-static int shmem_error_remove_page(struct address_space *mapping,
- struct page *page)
-{
- return 0;
-}
-
const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.set_page_dirty = __set_page_dirty_no_writeback,
@@ -3810,7 +3760,7 @@ const struct address_space_operations shmem_aops = {
#ifdef CONFIG_MIGRATION
.migratepage = migrate_page,
#endif
- .error_remove_page = shmem_error_remove_page,
+ .error_remove_page = generic_error_remove_page,
};
EXPORT_SYMBOL(shmem_aops);
@@ -4221,10 +4171,6 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
page = ERR_PTR(error);
else
unlock_page(page);
-
- if (PageHWPoison(page))
- page = ERR_PTR(-EIO);
-
return page;
#else
/*
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 0780c2a57ff11b..ac6f036298cd6f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -232,11 +232,6 @@ static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
goto out;
}
- if (PageHWPoison(page)) {
- ret = -EIO;
- goto out_release;
- }
-
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
page, false, wp_copy);
if (ret)
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 55275ef9a31a7c..a3a0a5e994f5ae 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -123,9 +123,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
}
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
-
- /* Get rid of the vlan's reference to real_dev */
- dev_put(real_dev);
}
int vlan_check_real_dev(struct net_device *real_dev,
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 90330b893134f8..ab6dee28536daa 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -843,6 +843,9 @@ static void vlan_dev_free(struct net_device *dev)
free_percpu(vlan->vlan_pcpu_stats);
vlan->vlan_pcpu_stats = NULL;
+
+ /* Get rid of the vlan's reference to real_dev */
+ dev_put(vlan->real_dev);
}
void vlan_setup(struct net_device *dev)
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index 9bc55ecb37f9fa..8452b0fbb78c9a 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -75,6 +75,13 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
skcb->addr.pgn = (cf->can_id >> 8) & J1939_PGN_MAX;
/* set default message type */
skcb->addr.type = J1939_TP;
+
+ if (!j1939_address_is_valid(skcb->addr.sa)) {
+ netdev_err_once(priv->ndev, "%s: sa is broadcast address, ignoring!\n",
+ __func__);
+ goto done;
+ }
+
if (j1939_pgn_is_pdu1(skcb->addr.pgn)) {
/* Type 1: with destination address */
skcb->addr.da = skcb->addr.pgn;
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 6c0a0ebdd024c2..a271688780a2c1 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -2023,6 +2023,11 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
extd = J1939_ETP;
fallthrough;
case J1939_TP_CMD_BAM:
+ if (cmd == J1939_TP_CMD_BAM && !j1939_cb_is_broadcast(skcb)) {
+ netdev_err_once(priv->ndev, "%s: BAM to unicast (%02x), ignoring!\n",
+ __func__, skcb->addr.sa);
+ return;
+ }
fallthrough;
case J1939_TP_CMD_RTS:
if (skcb->addr.type != extd)
@@ -2085,6 +2090,12 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
break;
case J1939_ETP_CMD_ABORT: /* && J1939_TP_CMD_ABORT */
+ if (j1939_cb_is_broadcast(skcb)) {
+ netdev_err_once(priv->ndev, "%s: abort to broadcast (%02x), ignoring!\n",
+ __func__, skcb->addr.sa);
+ return;
+ }
+
if (j1939_tp_im_transmitter(skcb))
j1939_xtp_rx_abort(priv, skb, true);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 013cbdb6cfe26b..6a6898ee40495d 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1153,12 +1153,11 @@ static int build_initial_monmap(struct ceph_mon_client *monc)
int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
{
- int err = 0;
+ int err;
dout("init\n");
memset(monc, 0, sizeof(*monc));
monc->client = cl;
- monc->monmap = NULL;
mutex_init(&monc->mutex);
err = build_initial_monmap(monc);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index ff8624a7c96438..1c5815530e0dd0 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -5310,14 +5310,14 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
}
-static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
- u64 src_snapid, u64 src_version,
- struct ceph_object_id *src_oid,
- struct ceph_object_locator *src_oloc,
- u32 src_fadvise_flags,
- u32 dst_fadvise_flags,
- u32 truncate_seq, u64 truncate_size,
- u8 copy_from_flags)
+int osd_req_op_copy_from_init(struct ceph_osd_request *req,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ u32 dst_fadvise_flags,
+ u32 truncate_seq, u64 truncate_size,
+ u8 copy_from_flags)
{
struct ceph_osd_req_op *op;
struct page **pages;
@@ -5346,49 +5346,7 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
op->indata_len, 0, false, true);
return 0;
}
-
-int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
- u64 src_snapid, u64 src_version,
- struct ceph_object_id *src_oid,
- struct ceph_object_locator *src_oloc,
- u32 src_fadvise_flags,
- struct ceph_object_id *dst_oid,
- struct ceph_object_locator *dst_oloc,
- u32 dst_fadvise_flags,
- u32 truncate_seq, u64 truncate_size,
- u8 copy_from_flags)
-{
- struct ceph_osd_request *req;
- int ret;
-
- req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- req->r_flags = CEPH_OSD_FLAG_WRITE;
-
- ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
- ceph_oid_copy(&req->r_t.base_oid, dst_oid);
-
- ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
- src_oloc, src_fadvise_flags,
- dst_fadvise_flags, truncate_seq,
- truncate_size, copy_from_flags);
- if (ret)
- goto out;
-
- ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
- if (ret)
- goto out;
-
- ceph_osdc_start_request(osdc, req, false);
- ret = ceph_osdc_wait_request(osdc, req);
-
-out:
- ceph_osdc_put_request(req);
- return ret;
-}
-EXPORT_SYMBOL(ceph_osdc_copy_from);
+EXPORT_SYMBOL(osd_req_op_copy_from_init);
int __init ceph_osdc_setup(void)
{
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 15ab9ffb27fe99..ee290776c661d0 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -646,7 +646,8 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
skb->truesize += truesize;
if (sk && sk->sk_type == SOCK_STREAM) {
sk_wmem_queued_add(sk, truesize);
- sk_mem_charge(sk, truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_charge(sk, truesize);
} else {
refcount_add(truesize, &skb->sk->sk_wmem_alloc);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index edeb811c454e6c..15ac064b5562d7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6928,7 +6928,7 @@ void napi_disable(struct napi_struct *n)
might_sleep();
set_bit(NAPI_STATE_DISABLE, &n->state);
- do {
+ for ( ; ; ) {
val = READ_ONCE(n->state);
if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
usleep_range(20, 200);
@@ -6937,7 +6937,10 @@ void napi_disable(struct napi_struct *n)
new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
- } while (cmpxchg(&n->state, val, new) != val);
+
+ if (cmpxchg(&n->state, val, new) == val)
+ break;
+ }
hrtimer_cancel(&n->timer);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 6b5ee862429ebe..5ba4f9434acda1 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -66,7 +66,7 @@ struct devlink {
u8 reload_failed:1;
refcount_t refcount;
struct completion comp;
- char priv[0] __aligned(NETDEV_ALIGN);
+ char priv[] __aligned(NETDEV_ALIGN);
};
void *devlink_priv(struct devlink *devlink)
diff --git a/net/core/filter.c b/net/core/filter.c
index 8e8d3b49c29767..e471c9b0967057 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -9756,22 +9756,46 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
static struct bpf_insn *bpf_convert_data_end_access(const struct bpf_insn *si,
struct bpf_insn *insn)
{
- /* si->dst_reg = skb->data */
+ int reg;
+ int temp_reg_off = offsetof(struct sk_buff, cb) +
+ offsetof(struct sk_skb_cb, temp_reg);
+
+ if (si->src_reg == si->dst_reg) {
+ /* We need an extra register, choose and save a register. */
+ reg = BPF_REG_9;
+ if (si->src_reg == reg || si->dst_reg == reg)
+ reg--;
+ if (si->src_reg == reg || si->dst_reg == reg)
+ reg--;
+ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, temp_reg_off);
+ } else {
+ reg = si->dst_reg;
+ }
+
+ /* reg = skb->data */
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
- si->dst_reg, si->src_reg,
+ reg, si->src_reg,
offsetof(struct sk_buff, data));
/* AX = skb->len */
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len),
BPF_REG_AX, si->src_reg,
offsetof(struct sk_buff, len));
- /* si->dst_reg = skb->data + skb->len */
- *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
+ /* reg = skb->data + skb->len */
+ *insn++ = BPF_ALU64_REG(BPF_ADD, reg, BPF_REG_AX);
/* AX = skb->data_len */
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data_len),
BPF_REG_AX, si->src_reg,
offsetof(struct sk_buff, data_len));
- /* si->dst_reg = skb->data + skb->len - skb->data_len */
- *insn++ = BPF_ALU64_REG(BPF_SUB, si->dst_reg, BPF_REG_AX);
+
+ /* reg = skb->data + skb->len - skb->data_len */
+ *insn++ = BPF_ALU64_REG(BPF_SUB, reg, BPF_REG_AX);
+
+ if (si->src_reg == si->dst_reg) {
+ /* Restore the saved register */
+ *insn++ = BPF_MOV64_REG(BPF_REG_AX, si->src_reg);
+ *insn++ = BPF_MOV64_REG(si->dst_reg, reg);
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, BPF_REG_AX, temp_reg_off);
+ }
return insn;
}
@@ -9782,11 +9806,33 @@ static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
struct bpf_prog *prog, u32 *target_size)
{
struct bpf_insn *insn = insn_buf;
+ int off;
switch (si->off) {
case offsetof(struct __sk_buff, data_end):
insn = bpf_convert_data_end_access(si, insn);
break;
+ case offsetof(struct __sk_buff, cb[0]) ...
+ offsetofend(struct __sk_buff, cb[4]) - 1:
+ BUILD_BUG_ON(sizeof_field(struct sk_skb_cb, data) < 20);
+ BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
+ offsetof(struct sk_skb_cb, data)) %
+ sizeof(__u64));
+
+ prog->cb_access = 1;
+ off = si->off;
+ off -= offsetof(struct __sk_buff, cb[0]);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct sk_skb_cb, data);
+ if (type == BPF_WRITE)
+ *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
+ si->src_reg, off);
+ else
+ *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
+ si->src_reg, off);
+ break;
+
+
default:
return bpf_convert_ctx_access(type, si, insn_buf, prog,
target_size);
@@ -10423,8 +10469,10 @@ BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx,
return -EINVAL;
if (unlikely(sk && sk_is_refcounted(sk)))
return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */
- if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED))
- return -ESOCKTNOSUPPORT; /* reject connected sockets */
+ if (unlikely(sk && sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN))
+ return -ESOCKTNOSUPPORT; /* only accept TCP socket in LISTEN */
+ if (unlikely(sk && sk_is_udp(sk) && sk->sk_state != TCP_CLOSE))
+ return -ESOCKTNOSUPPORT; /* only accept UDP socket in CLOSE */
/* Check if socket is suitable for packet L3/L4 protocol */
if (sk && sk->sk_protocol != ctx->protocol)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 67a9188d8a49c8..ba2f38246f07e5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3433,8 +3433,9 @@ static inline void skb_split_no_header(struct sk_buff *skb,
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
{
int pos = skb_headlen(skb);
+ const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY;
- skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
+ skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
skb_zerocopy_clone(skb1, skb, 0);
if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos);
@@ -3449,19 +3450,7 @@ EXPORT_SYMBOL(skb_split);
*/
static int skb_prepare_for_shift(struct sk_buff *skb)
{
- int ret = 0;
-
- if (skb_cloned(skb)) {
- /* Save and restore truesize: pskb_expand_head() may reallocate
- * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we
- * cannot change truesize at this point.
- */
- unsigned int save_truesize = skb->truesize;
-
- ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- skb->truesize = save_truesize;
- }
- return ret;
+ return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
}
/**
diff --git a/net/core/sock.c b/net/core/sock.c
index 9862eefce21ede..8f2b2f2c0e7b1d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -976,7 +976,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
bool charged;
int pages;
- if (!mem_cgroup_sockets_enabled || !sk->sk_memcg)
+ if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk))
return -EOPNOTSUPP;
if (!bytes)
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index e252b8ec2b85e9..f39ef79ced679d 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -511,12 +511,6 @@ static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
}
-static bool sk_is_tcp(const struct sock *sk)
-{
- return sk->sk_type == SOCK_STREAM &&
- sk->sk_protocol == IPPROTO_TCP;
-}
-
static bool sock_map_redirect_allowed(const struct sock *sk)
{
if (sk_is_tcp(sk))
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index cd60b94fc175f7..de1c849a0a7055 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -101,6 +101,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct dsa_port *dp;
u8 *extraction;
u16 vlan_tpid;
+ u64 rew_val;
/* Revert skb->data by the amount consumed by the DSA master,
* so it points to the beginning of the frame.
@@ -130,6 +131,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
ocelot_xfh_get_qos_class(extraction, &qos_class);
ocelot_xfh_get_tag_type(extraction, &tag_type);
ocelot_xfh_get_vlan_tci(extraction, &vlan_tci);
+ ocelot_xfh_get_rew_val(extraction, &rew_val);
skb->dev = dsa_master_find_slave(netdev, 0, src_port);
if (!skb->dev)
@@ -143,6 +145,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
dsa_default_offload_fwd_mark(skb);
skb->priority = qos_class;
+ OCELOT_SKB_CB(skb)->tstamp_lo = rew_val;
/* Ocelot switches copy frames unmodified to the CPU. However, it is
* possible for the user to request a VLAN modification through
diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
index 9009f412151e78..ee1e5806bc93a4 100644
--- a/net/ethtool/pause.c
+++ b/net/ethtool/pause.c
@@ -56,8 +56,7 @@ static int pause_reply_size(const struct ethnl_req_info *req_base,
if (req_base->flags & ETHTOOL_FLAG_STATS)
n += nla_total_size(0) + /* _PAUSE_STATS */
- nla_total_size_64bit(sizeof(u64)) *
- (ETHTOOL_A_PAUSE_STAT_MAX - 2);
+ nla_total_size_64bit(sizeof(u64)) * ETHTOOL_PAUSE_STAT_CNT;
return n;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3c6498dab6bd5c..b7796b4cf0a099 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -862,6 +862,7 @@ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
if (likely(skb)) {
bool mem_scheduled;
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
if (force_schedule) {
mem_scheduled = true;
sk_forced_mem_schedule(sk, skb->truesize);
@@ -1318,6 +1319,15 @@ new_segment:
copy = min_t(int, copy, pfrag->size - pfrag->offset);
+ /* skb changing from pure zc to mixed, must charge zc */
+ if (unlikely(skb_zcopy_pure(skb))) {
+ if (!sk_wmem_schedule(sk, skb->data_len))
+ goto wait_for_space;
+
+ sk_mem_charge(sk, skb->data_len);
+ skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
+ }
+
if (!sk_wmem_schedule(sk, copy))
goto wait_for_space;
@@ -1338,8 +1348,16 @@ new_segment:
}
pfrag->offset += copy;
} else {
- if (!sk_wmem_schedule(sk, copy))
- goto wait_for_space;
+ /* First append to a fragless skb builds initial
+ * pure zerocopy skb
+ */
+ if (!skb->len)
+ skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
+
+ if (!skb_zcopy_pure(skb)) {
+ if (!sk_wmem_schedule(sk, copy))
+ goto wait_for_space;
+ }
err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
if (err == -EMSGSIZE || err == -EEXIST) {
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 5f4d6f45d87f78..f70aa0932bd6c9 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -172,6 +172,41 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
return ret;
}
+static int tcp_bpf_recvmsg_parser(struct sock *sk,
+ struct msghdr *msg,
+ size_t len,
+ int nonblock,
+ int flags,
+ int *addr_len)
+{
+ struct sk_psock *psock;
+ int copied;
+
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+
+ lock_sock(sk);
+msg_bytes_ready:
+ copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
+ if (!copied) {
+ long timeo;
+ int data;
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+ data = tcp_msg_wait_data(sk, psock, timeo);
+ if (data && !sk_psock_queue_empty(psock))
+ goto msg_bytes_ready;
+ copied = -EAGAIN;
+ }
+ release_sock(sk);
+ sk_psock_put(sk, psock);
+ return copied;
+}
+
static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
@@ -464,6 +499,8 @@ enum {
enum {
TCP_BPF_BASE,
TCP_BPF_TX,
+ TCP_BPF_RX,
+ TCP_BPF_TXRX,
TCP_BPF_NUM_CFGS,
};
@@ -475,7 +512,6 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
struct proto *base)
{
prot[TCP_BPF_BASE] = *base;
- prot[TCP_BPF_BASE].unhash = sock_map_unhash;
prot[TCP_BPF_BASE].close = sock_map_close;
prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable;
@@ -483,6 +519,12 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage;
+
+ prot[TCP_BPF_RX] = prot[TCP_BPF_BASE];
+ prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser;
+
+ prot[TCP_BPF_TXRX] = prot[TCP_BPF_TX];
+ prot[TCP_BPF_TXRX].recvmsg = tcp_bpf_recvmsg_parser;
}
static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops)
@@ -520,6 +562,10 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
+ if (psock->progs.stream_verdict || psock->progs.skb_verdict) {
+ config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX;
+ }
+
if (restore) {
if (inet_csk_has_ulp(sk)) {
/* TLS does not have an unhash proto in SW cases,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6fbbf155803372..2e6e5a70168ebd 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -408,13 +408,13 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp)
return tp->snd_una != tp->snd_up;
}
-#define OPTION_SACK_ADVERTISE (1 << 0)
-#define OPTION_TS (1 << 1)
-#define OPTION_MD5 (1 << 2)
-#define OPTION_WSCALE (1 << 3)
-#define OPTION_FAST_OPEN_COOKIE (1 << 8)
-#define OPTION_SMC (1 << 9)
-#define OPTION_MPTCP (1 << 10)
+#define OPTION_SACK_ADVERTISE BIT(0)
+#define OPTION_TS BIT(1)
+#define OPTION_MD5 BIT(2)
+#define OPTION_WSCALE BIT(3)
+#define OPTION_FAST_OPEN_COOKIE BIT(8)
+#define OPTION_SMC BIT(9)
+#define OPTION_MPTCP BIT(10)
static void smc_options_write(__be32 *ptr, u16 *options)
{
@@ -1559,7 +1559,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
return -ENOMEM;
}
- if (skb_unclone(skb, gfp))
+ if (skb_unclone_keeptruesize(skb, gfp))
return -ENOMEM;
/* Get a new skb... force flag on. */
@@ -1667,7 +1667,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
{
u32 delta_truesize;
- if (skb_unclone(skb, GFP_ATOMIC))
+ if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
return -ENOMEM;
delta_truesize = __pskb_trim_head(skb, len);
@@ -1677,7 +1677,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
if (delta_truesize) {
skb->truesize -= delta_truesize;
sk_wmem_queued_add(sk, -delta_truesize);
- sk_mem_uncharge(sk, delta_truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_uncharge(sk, delta_truesize);
}
/* Any change of skb->len requires recalculation of tso factor. */
@@ -2295,7 +2296,9 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
if (len <= skb->len)
break;
- if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
+ if (unlikely(TCP_SKB_CB(skb)->eor) ||
+ tcp_has_tx_tstamp(skb) ||
+ !skb_pure_zcopy_same(skb, next))
return false;
len -= skb->len;
@@ -3166,7 +3169,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
cur_mss, GFP_ATOMIC))
return -ENOMEM; /* We'll try again later. */
} else {
- if (skb_unclone(skb, GFP_ATOMIC))
+ if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
return -ENOMEM;
diff = tcp_skb_pcount(skb);
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 5daa1c3ed83ba8..a8b5784afb1ae1 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -378,7 +378,7 @@ static int __net_init seg6_net_init(struct net *net)
kfree(rcu_dereference_raw(sdata->tun_src));
kfree(sdata);
return -ENOMEM;
- };
+ }
#endif
return 0;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2cc9b0e53ad1c8..551fce49841d7f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1263,7 +1263,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
- newinet = inet_sk(newsk);
newnp = tcp_inet6_sk(newsk);
newtp = tcp_sk(newsk);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 12c12619ee357d..e43b31d25fb61c 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -700,9 +700,9 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
ret = encap_rcv(sk, skb);
if (ret <= 0) {
- __UDP_INC_STATS(sock_net(sk),
- UDP_MIB_INDATAGRAMS,
- is_udplite);
+ __UDP6_INC_STATS(sock_net(sk),
+ UDP_MIB_INDATAGRAMS,
+ is_udplite);
return -ret;
}
}
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index d344b02a1cde68..871cf62661258f 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -33,6 +33,19 @@ static int mctp_release(struct socket *sock)
return 0;
}
+/* Generic sockaddr checks, padding checks only so far */
+static bool mctp_sockaddr_is_ok(const struct sockaddr_mctp *addr)
+{
+ return !addr->__smctp_pad0 && !addr->__smctp_pad1;
+}
+
+static bool mctp_sockaddr_ext_is_ok(const struct sockaddr_mctp_ext *addr)
+{
+ return !addr->__smctp_pad0[0] &&
+ !addr->__smctp_pad0[1] &&
+ !addr->__smctp_pad0[2];
+}
+
static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
{
struct sock *sk = sock->sk;
@@ -52,6 +65,9 @@ static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
/* it's a valid sockaddr for MCTP, cast and do protocol checks */
smctp = (struct sockaddr_mctp *)addr;
+ if (!mctp_sockaddr_is_ok(smctp))
+ return -EINVAL;
+
lock_sock(sk);
/* TODO: allow rebind */
@@ -87,6 +103,8 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
return -EINVAL;
if (addr->smctp_family != AF_MCTP)
return -EINVAL;
+ if (!mctp_sockaddr_is_ok(addr))
+ return -EINVAL;
if (addr->smctp_tag & ~(MCTP_TAG_MASK | MCTP_TAG_OWNER))
return -EINVAL;
@@ -124,7 +142,8 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
extaddr, msg->msg_name);
- if (extaddr->smctp_halen > sizeof(cb->haddr)) {
+ if (!mctp_sockaddr_ext_is_ok(extaddr) ||
+ extaddr->smctp_halen > sizeof(cb->haddr)) {
rc = -EINVAL;
goto err_free;
}
@@ -198,11 +217,13 @@ static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
addr = msg->msg_name;
addr->smctp_family = AF_MCTP;
+ addr->__smctp_pad0 = 0;
addr->smctp_network = cb->net;
addr->smctp_addr.s_addr = hdr->src;
addr->smctp_type = type;
addr->smctp_tag = hdr->flags_seq_tag &
(MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
+ addr->__smctp_pad1 = 0;
msg->msg_namelen = sizeof(*addr);
if (msk->addr_ext) {
@@ -211,6 +232,7 @@ static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
msg->msg_namelen = sizeof(*ae);
ae->smctp_ifindex = cb->ifindex;
ae->smctp_halen = cb->halen;
+ memset(ae->__smctp_pad0, 0x0, sizeof(ae->__smctp_pad0));
memset(ae->smctp_haddr, 0x0, sizeof(ae->smctp_haddr));
memcpy(ae->smctp_haddr, cb->haddr, cb->halen);
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 816f74dadfa385..39c523bd775c3a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -47,6 +47,8 @@
#include <net/ip_vs.h>
+MODULE_ALIAS_GENL_FAMILY(IPVS_GENL_NAME);
+
/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
static DEFINE_MUTEX(__ip_vs_mutex);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 4c3fbaaeb10303..4acc4b8e9fe5a0 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -560,7 +560,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
goto nla_put_failure;
if (indev && entskb->dev &&
- entskb->mac_header != entskb->network_header) {
+ skb_mac_header_was_set(entskb)) {
struct nfqnl_msg_packet_hw phw;
int len;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 49089c50872e68..334f63c9529efa 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1664,31 +1664,37 @@ static const struct genl_ops nfc_genl_ops[] = {
.cmd = NFC_CMD_DEV_UP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_dev_up,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_DEV_DOWN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_dev_down,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_START_POLL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_start_poll,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_STOP_POLL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_stop_poll,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_DEP_LINK_UP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_dep_link_up,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_DEP_LINK_DOWN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_dep_link_down,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_GET_TARGET,
@@ -1706,26 +1712,31 @@ static const struct genl_ops nfc_genl_ops[] = {
.cmd = NFC_CMD_LLC_SET_PARAMS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_llc_set_params,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_LLC_SDREQ,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_llc_sdreq,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_FW_DOWNLOAD,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_fw_download,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_ENABLE_SE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_enable_se,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_DISABLE_SE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_disable_se,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_GET_SE,
@@ -1737,21 +1748,25 @@ static const struct genl_ops nfc_genl_ops[] = {
.cmd = NFC_CMD_SE_IO,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_se_io,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_ACTIVATE_TARGET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_activate_target,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_VENDOR,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_vendor_cmd,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NFC_CMD_DEACTIVATE_TARGET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nfc_genl_deactivate_target,
+ .flags = GENL_ADMIN_PERM,
},
};
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 9ab068fa2672be..377f896bdedc45 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -95,18 +95,22 @@ static ktime_t sched_base_time(const struct sched_gate_list *sched)
return ns_to_ktime(sched->base_time);
}
-static ktime_t taprio_get_time(struct taprio_sched *q)
+static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
{
- ktime_t mono = ktime_get();
+ /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
+ enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
- switch (q->tk_offset) {
+ switch (tk_offset) {
case TK_OFFS_MAX:
return mono;
default:
- return ktime_mono_to_any(mono, q->tk_offset);
+ return ktime_mono_to_any(mono, tk_offset);
}
+}
- return KTIME_MAX;
+static ktime_t taprio_get_time(const struct taprio_sched *q)
+{
+ return taprio_mono_to_any(q, ktime_get());
}
static void taprio_free_sched_cb(struct rcu_head *head)
@@ -319,7 +323,7 @@ static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
return 0;
}
- return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
+ return taprio_mono_to_any(q, skb->skb_mstamp_ns);
}
/* There are a few scenarios where we will have to modify the txtime from
@@ -1352,6 +1356,7 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
}
} else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
+ enum tk_offsets tk_offset;
/* We only support static clockids and we don't allow
* for it to be modified after the first init.
@@ -1366,22 +1371,24 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
switch (clockid) {
case CLOCK_REALTIME:
- q->tk_offset = TK_OFFS_REAL;
+ tk_offset = TK_OFFS_REAL;
break;
case CLOCK_MONOTONIC:
- q->tk_offset = TK_OFFS_MAX;
+ tk_offset = TK_OFFS_MAX;
break;
case CLOCK_BOOTTIME:
- q->tk_offset = TK_OFFS_BOOT;
+ tk_offset = TK_OFFS_BOOT;
break;
case CLOCK_TAI:
- q->tk_offset = TK_OFFS_TAI;
+ tk_offset = TK_OFFS_TAI;
break;
default:
NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
err = -EINVAL;
goto out;
}
+ /* This pairs with READ_ONCE() in taprio_mono_to_any */
+ WRITE_ONCE(q->tk_offset, tk_offset);
q->clockid = clockid;
} else {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index fb3da4d8f4a340..354c1c4de19bdc 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -326,11 +326,6 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
struct sctp_packet *packet;
int len;
- /* Update socket peer label if first association. */
- if (security_sctp_assoc_request((struct sctp_endpoint *)ep,
- chunk->skb))
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
@@ -415,6 +410,12 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
if (!new_asoc)
goto nomem;
+ /* Update socket peer label if first association. */
+ if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
sctp_scope(sctp_source(chunk)),
GFP_ATOMIC) < 0)
@@ -780,6 +781,10 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
}
}
+ if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
/* Delay state machine commands until later.
*
@@ -1517,11 +1522,6 @@ static enum sctp_disposition sctp_sf_do_unexpected_init(
struct sctp_packet *packet;
int len;
- /* Update socket peer label if first association. */
- if (security_sctp_assoc_request((struct sctp_endpoint *)ep,
- chunk->skb))
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
@@ -1594,6 +1594,12 @@ static enum sctp_disposition sctp_sf_do_unexpected_init(
if (!new_asoc)
goto nomem;
+ /* Update socket peer label if first association. */
+ if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
goto nomem;
@@ -2255,8 +2261,7 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
}
/* Update socket peer label if first association. */
- if (security_sctp_assoc_request((struct sctp_endpoint *)ep,
- chunk->skb)) {
+ if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
@@ -4893,9 +4898,6 @@ static enum sctp_disposition sctp_sf_violation_chunk(
{
static const char err_str[] = "The following chunk violates protocol:";
- if (!asoc)
- return sctp_sf_violation(net, ep, asoc, type, arg, commands);
-
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6b937bfd475159..33391254fa82b2 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -9412,7 +9412,6 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct inet_sock *inet = inet_sk(sk);
struct inet_sock *newinet;
struct sctp_sock *sp = sctp_sk(sk);
- struct sctp_endpoint *ep = sp->ep;
newsk->sk_type = sk->sk_type;
newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
@@ -9457,9 +9456,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
net_enable_timestamp();
/* Set newsk security attributes from original sk and connection
- * security attribute from ep.
+ * security attribute from asoc.
*/
- security_sctp_sk_clone(ep, sk, newsk);
+ security_sctp_sk_clone(asoc, sk, newsk);
}
static inline void sctp_copy_descendant(struct sock *sk_to,
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 0cf7ed2f5d41b5..59284da9116d78 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -149,14 +149,18 @@ static int __smc_release(struct smc_sock *smc)
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
} else {
- if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
- sock_put(sk); /* passive closing */
- if (sk->sk_state == SMC_LISTEN) {
- /* wake up clcsock accept */
- rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+ if (sk->sk_state != SMC_CLOSED) {
+ if (sk->sk_state != SMC_LISTEN &&
+ sk->sk_state != SMC_INIT)
+ sock_put(sk); /* passive closing */
+ if (sk->sk_state == SMC_LISTEN) {
+ /* wake up clcsock accept */
+ rc = kernel_sock_shutdown(smc->clcsock,
+ SHUT_RDWR);
+ }
+ sk->sk_state = SMC_CLOSED;
+ sk->sk_state_change(sk);
}
- sk->sk_state = SMC_CLOSED;
- sk->sk_state_change(sk);
smc_restore_fallback_changes(smc);
}
diff --git a/net/smc/smc_tracepoint.h b/net/smc/smc_tracepoint.h
index b4c36795a9280f..ec17f29646f570 100644
--- a/net/smc/smc_tracepoint.h
+++ b/net/smc/smc_tracepoint.h
@@ -99,7 +99,7 @@ TRACE_EVENT(smcr_link_down,
__entry->location = location;
),
- TP_printk("lnk=%p lgr=%p state=%d dev=%s location=%p",
+ TP_printk("lnk=%p lgr=%p state=%d dev=%s location=%pS",
__entry->lnk, __entry->lgr,
__entry->state, __get_str(name),
__entry->location)
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 9c0343568d2a06..1a72c67afed5e6 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -27,18 +27,10 @@
static struct workqueue_struct *strp_wq;
-struct _strp_msg {
- /* Internal cb structure. struct strp_msg must be first for passing
- * to upper layer.
- */
- struct strp_msg strp;
- int accum_len;
-};
-
static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
{
return (struct _strp_msg *)((void *)skb->cb +
- offsetof(struct qdisc_skb_cb, data));
+ offsetof(struct sk_skb_cb, strp));
}
/* Lower lock held */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7d851eb3a68307..ed0df839c38ce6 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1322,6 +1322,8 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
* non-blocking call.
*/
err = -EALREADY;
+ if (flags & O_NONBLOCK)
+ goto out;
break;
default:
if ((sk->sk_state == TCP_LISTEN) ||
diff --git a/scripts/coccinelle/misc/do_div.cocci b/scripts/coccinelle/misc/do_div.cocci
new file mode 100644
index 00000000000000..79db083c520870
--- /dev/null
+++ b/scripts/coccinelle/misc/do_div.cocci
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/// do_div() does a 64-by-32 division.
+/// When the divisor is long, unsigned long, u64, or s64,
+/// do_div() truncates it to 32 bits, this means it can test
+/// non-zero and be truncated to 0 for division on 64bit platforms.
+///
+//# This makes an effort to find those inappropriate do_div() calls.
+//
+// Confidence: Moderate
+// Copyright: (C) 2020 Wen Yang, Alibaba.
+// Comments:
+// Options: --no-includes --include-headers
+
+virtual context
+virtual org
+virtual report
+
+@initialize:python@
+@@
+
+def get_digit_type_and_value(str):
+ is_digit = False
+ value = 0
+
+ try:
+ if (str.isdigit()):
+ is_digit = True
+ value = int(str, 0)
+ elif (str.upper().endswith('ULL')):
+ is_digit = True
+ value = int(str[:-3], 0)
+ elif (str.upper().endswith('LL')):
+ is_digit = True
+ value = int(str[:-2], 0)
+ elif (str.upper().endswith('UL')):
+ is_digit = True
+ value = int(str[:-2], 0)
+ elif (str.upper().endswith('L')):
+ is_digit = True
+ value = int(str[:-1], 0)
+ elif (str.upper().endswith('U')):
+ is_digit = True
+ value = int(str[:-1], 0)
+ except Exception as e:
+ print('Error:',e)
+ is_digit = False
+ value = 0
+ finally:
+ return is_digit, value
+
+def filter_out_safe_constants(str):
+ is_digit, value = get_digit_type_and_value(str)
+ if (is_digit):
+ if (value >= 0x100000000):
+ return True
+ else:
+ return False
+ else:
+ return True
+
+def construct_warnings(suggested_fun):
+ msg="WARNING: do_div() does a 64-by-32 division, please consider using %s instead."
+ return msg % suggested_fun
+
+@depends on context@
+expression f;
+long l: script:python() { filter_out_safe_constants(l) };
+unsigned long ul : script:python() { filter_out_safe_constants(ul) };
+u64 ul64 : script:python() { filter_out_safe_constants(ul64) };
+s64 sl64 : script:python() { filter_out_safe_constants(sl64) };
+
+@@
+(
+* do_div(f, l);
+|
+* do_div(f, ul);
+|
+* do_div(f, ul64);
+|
+* do_div(f, sl64);
+)
+
+@r depends on (org || report)@
+expression f;
+position p;
+long l: script:python() { filter_out_safe_constants(l) };
+unsigned long ul : script:python() { filter_out_safe_constants(ul) };
+u64 ul64 : script:python() { filter_out_safe_constants(ul64) };
+s64 sl64 : script:python() { filter_out_safe_constants(sl64) };
+@@
+(
+do_div@p(f, l);
+|
+do_div@p(f, ul);
+|
+do_div@p(f, ul64);
+|
+do_div@p(f, sl64);
+)
+
+@script:python depends on org@
+p << r.p;
+ul << r.ul;
+@@
+
+coccilib.org.print_todo(p[0], construct_warnings("div64_ul"))
+
+@script:python depends on org@
+p << r.p;
+l << r.l;
+@@
+
+coccilib.org.print_todo(p[0], construct_warnings("div64_long"))
+
+@script:python depends on org@
+p << r.p;
+ul64 << r.ul64;
+@@
+
+coccilib.org.print_todo(p[0], construct_warnings("div64_u64"))
+
+@script:python depends on org@
+p << r.p;
+sl64 << r.sl64;
+@@
+
+coccilib.org.print_todo(p[0], construct_warnings("div64_s64"))
+
+@script:python depends on report@
+p << r.p;
+ul << r.ul;
+@@
+
+coccilib.report.print_report(p[0], construct_warnings("div64_ul"))
+
+@script:python depends on report@
+p << r.p;
+l << r.l;
+@@
+
+coccilib.report.print_report(p[0], construct_warnings("div64_long"))
+
+@script:python depends on report@
+p << r.p;
+sl64 << r.sl64;
+@@
+
+coccilib.report.print_report(p[0], construct_warnings("div64_s64"))
+
+@script:python depends on report@
+p << r.p;
+ul64 << r.ul64;
+@@
+
+coccilib.report.print_report(p[0], construct_warnings("div64_u64"))
diff --git a/scripts/remove-stale-files b/scripts/remove-stale-files
index c3eb81c3f7de1c..0114c41e69388b 100755
--- a/scripts/remove-stale-files
+++ b/scripts/remove-stale-files
@@ -28,4 +28,9 @@ if [ -n "${building_out_of_srctree}" ]; then
do
rm -f arch/arm/boot/compressed/${f}
done
+
+ for f in uart-ath79.c ashldi3.c bswapdi.c bswapsi.c
+ do
+ rm -f arch/mips/boot/compressed/${f}
+ done
fi
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 2ee3b3d29f10b4..0797edb2fb3dc6 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -812,8 +812,6 @@ struct multi_transaction {
};
#define MULTI_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct multi_transaction))
-/* TODO: replace with per file lock */
-static DEFINE_SPINLOCK(multi_transaction_lock);
static void multi_transaction_kref(struct kref *kref)
{
@@ -847,10 +845,10 @@ static void multi_transaction_set(struct file *file,
AA_BUG(n > MULTI_TRANSACTION_LIMIT);
new->size = n;
- spin_lock(&multi_transaction_lock);
+ spin_lock(&file->f_lock);
old = (struct multi_transaction *) file->private_data;
file->private_data = new;
- spin_unlock(&multi_transaction_lock);
+ spin_unlock(&file->f_lock);
put_multi_transaction(old);
}
@@ -879,9 +877,10 @@ static ssize_t multi_transaction_read(struct file *file, char __user *buf,
struct multi_transaction *t;
ssize_t ret;
- spin_lock(&multi_transaction_lock);
+ spin_lock(&file->f_lock);
t = get_multi_transaction(file->private_data);
- spin_unlock(&multi_transaction_lock);
+ spin_unlock(&file->f_lock);
+
if (!t)
return 0;
@@ -1358,7 +1357,7 @@ static int rawdata_open(struct inode *inode, struct file *file)
struct aa_loaddata *loaddata;
struct rawdata_f_data *private;
- if (!policy_view_capable(NULL))
+ if (!aa_current_policy_view_capable(NULL))
return -EACCES;
loaddata = __aa_get_loaddata(inode->i_private);
@@ -2114,7 +2113,7 @@ static struct aa_profile *__first_profile(struct aa_ns *root,
/**
* __next_profile - step to the next profile in a profile tree
- * @profile: current profile in tree (NOT NULL)
+ * @p: current profile in tree (NOT NULL)
*
* Perform a depth first traversal on the profile tree in a namespace
*
@@ -2265,7 +2264,7 @@ static const struct seq_operations aa_sfs_profiles_op = {
static int profiles_open(struct inode *inode, struct file *file)
{
- if (!policy_view_capable(NULL))
+ if (!aa_current_policy_view_capable(NULL))
return -EACCES;
return seq_open(file, &aa_sfs_profiles_op);
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index d4f8948517d99f..7517605a183d3d 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -167,7 +167,7 @@ int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
* @perms: permission table indexed by the matched state accept entry of @dfa
* @trans: transition table for indexed by named x transitions
*
- * File permission are determined by matching a path against @dfa and then
+ * File permission are determined by matching a path against @dfa and
* then using the value of the accept entry for the matching state as
* an index into @perms. If a named exec transition is required it is
* looked up in the transition table.
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h
index 1e90384b1523ac..9101c2c76d9e7e 100644
--- a/security/apparmor/include/label.h
+++ b/security/apparmor/include/label.h
@@ -77,10 +77,6 @@ struct aa_labelset {
#define __labelset_for_each(LS, N) \
for ((N) = rb_first(&(LS)->root); (N); (N) = rb_next(N))
-void aa_labelset_destroy(struct aa_labelset *ls);
-void aa_labelset_init(struct aa_labelset *ls);
-
-
enum label_flags {
FLAG_HAT = 1, /* profile is a hat */
FLAG_UNCONFINED = 2, /* label unconfined only if all */
@@ -148,6 +144,7 @@ do { \
#define __label_make_stale(X) ((X)->flags |= FLAG_STALE)
#define labels_ns(X) (vec_ns(&((X)->vec[0]), (X)->size))
#define labels_set(X) (&labels_ns(X)->labels)
+#define labels_view(X) labels_ns(X)
#define labels_profile(X) ((X)->vec[(X)->size - 1])
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 7d27db740bc2f1..e2e8df0c6f1c93 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -31,12 +31,17 @@
#define AA_WARN(X) WARN((X), "APPARMOR WARN %s: %s\n", __func__, #X)
-#define AA_BUG(X, args...) AA_BUG_FMT((X), "" args)
+#define AA_BUG(X, args...) \
+ do { \
+ _Pragma("GCC diagnostic ignored \"-Wformat-zero-length\""); \
+ AA_BUG_FMT((X), "" args); \
+ _Pragma("GCC diagnostic warning \"-Wformat-zero-length\""); \
+ } while (0)
#ifdef CONFIG_SECURITY_APPARMOR_DEBUG_ASSERTS
#define AA_BUG_FMT(X, fmt, args...) \
WARN((X), "AppArmor WARN %s: (" #X "): " fmt, __func__, ##args)
#else
-#define AA_BUG_FMT(X, fmt, args...)
+#define AA_BUG_FMT(X, fmt, args...) no_printk(fmt, ##args)
#endif
#define AA_ERROR(fmt, args...) \
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index b5b4b8190e654e..cb5ef21991b72c 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -301,9 +301,11 @@ static inline int AUDIT_MODE(struct aa_profile *profile)
return profile->audit;
}
-bool policy_view_capable(struct aa_ns *ns);
-bool policy_admin_capable(struct aa_ns *ns);
+bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns);
+bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns);
int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns,
u32 mask);
+bool aa_current_policy_view_capable(struct aa_ns *ns);
+bool aa_current_policy_admin_capable(struct aa_ns *ns);
#endif /* __AA_POLICY_H */
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index e68bcedca976b9..0b0265da19267a 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -425,8 +425,7 @@ struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp)
AA_BUG(size < 1);
/* + 1 for null terminator entry on vec */
- new = kzalloc(sizeof(*new) + sizeof(struct aa_profile *) * (size + 1),
- gfp);
+ new = kzalloc(struct_size(new, vec, size + 1), gfp);
AA_DEBUG("%s (%p)\n", __func__, new);
if (!new)
goto fail;
@@ -1454,7 +1453,7 @@ bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp)
if (label->hname || labels_ns(label) != ns)
return res;
- if (aa_label_acntsxprint(&name, ns, label, FLAGS_NONE, gfp) == -1)
+ if (aa_label_acntsxprint(&name, ns, label, FLAGS_NONE, gfp) < 0)
return res;
ls = labels_set(label);
@@ -1704,7 +1703,7 @@ int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
/**
* aa_label_acntsxprint - allocate a __counted string buffer and print label
- * @strp: buffer to write to. (MAY BE NULL if @size == 0)
+ * @strp: buffer to write to.
* @ns: namespace profile is being viewed from
* @label: label to view (NOT NULL)
* @flags: flags controlling what label info is printed
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index f72406fe1bf273..0d6585056f3df5 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -1402,7 +1402,7 @@ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp
{
if (!apparmor_enabled)
return -EINVAL;
- if (apparmor_initialized && !policy_admin_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
return -EPERM;
return param_set_bool(val, kp);
}
@@ -1411,7 +1411,7 @@ static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
- if (apparmor_initialized && !policy_view_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return param_get_bool(buffer, kp);
}
@@ -1420,7 +1420,7 @@ static int param_set_aabool(const char *val, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
- if (apparmor_initialized && !policy_admin_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
return -EPERM;
return param_set_bool(val, kp);
}
@@ -1429,7 +1429,7 @@ static int param_get_aabool(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
- if (apparmor_initialized && !policy_view_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return param_get_bool(buffer, kp);
}
@@ -1455,7 +1455,7 @@ static int param_get_aauint(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
- if (apparmor_initialized && !policy_view_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return param_get_uint(buffer, kp);
}
@@ -1526,7 +1526,7 @@ static int param_get_aacompressionlevel(char *buffer,
{
if (!apparmor_enabled)
return -EINVAL;
- if (apparmor_initialized && !policy_view_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return param_get_int(buffer, kp);
}
@@ -1535,7 +1535,7 @@ static int param_get_audit(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
- if (apparmor_initialized && !policy_view_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
}
@@ -1548,7 +1548,7 @@ static int param_set_audit(const char *val, const struct kernel_param *kp)
return -EINVAL;
if (!val)
return -EINVAL;
- if (apparmor_initialized && !policy_admin_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
return -EPERM;
i = match_string(audit_mode_names, AUDIT_MAX_INDEX, val);
@@ -1563,7 +1563,7 @@ static int param_get_mode(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
- if (apparmor_initialized && !policy_view_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
@@ -1577,7 +1577,7 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
return -EINVAL;
if (!val)
return -EINVAL;
- if (apparmor_initialized && !policy_admin_capable(NULL))
+ if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
return -EPERM;
i = match_string(aa_profile_mode_names, APPARMOR_MODE_NAMES_MAX_INDEX,
@@ -1713,7 +1713,7 @@ static int __init alloc_buffers(void)
static int apparmor_dointvec(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- if (!policy_admin_capable(NULL))
+ if (!aa_current_policy_admin_capable(NULL))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
@@ -1773,32 +1773,16 @@ static unsigned int apparmor_ip_postroute(void *priv,
}
-static unsigned int apparmor_ipv4_postroute(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return apparmor_ip_postroute(priv, skb, state);
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static unsigned int apparmor_ipv6_postroute(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return apparmor_ip_postroute(priv, skb, state);
-}
-#endif
-
static const struct nf_hook_ops apparmor_nf_ops[] = {
{
- .hook = apparmor_ipv4_postroute,
+ .hook = apparmor_ip_postroute,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_SELINUX_FIRST,
},
#if IS_ENABLED(CONFIG_IPV6)
{
- .hook = apparmor_ipv6_postroute,
+ .hook = apparmor_ip_postroute,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_SELINUX_FIRST,
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index b02dfdbff7cd6b..45ec994b558d7b 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -83,7 +83,7 @@ static int disconnect(const struct path *path, char *buf, char **name,
*
* Returns: %0 else error code if path lookup fails
* When no error the path name is returned in @name which points to
- * to a position in @buf
+ * a position in @buf
*/
static int d_namespace_path(const struct path *path, char *buf, char **name,
int flags, const char *disconnected)
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 4c010c9a6af1d3..b0cbc4906cb3b1 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -260,8 +260,7 @@ struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy,
struct aa_profile *profile;
/* freed by free_profile - usually through aa_put_profile */
- profile = kzalloc(sizeof(*profile) + sizeof(struct aa_profile *) * 2,
- gfp);
+ profile = kzalloc(struct_size(profile, label.vec, 2), gfp);
if (!profile)
return NULL;
@@ -632,18 +631,35 @@ static int audit_policy(struct aa_label *label, const char *op,
return error;
}
+/* don't call out to other LSMs in the stack for apparmor policy admin
+ * permissions
+ */
+static int policy_ns_capable(struct aa_label *label,
+ struct user_namespace *userns, int cap)
+{
+ int err;
+
+ /* check for MAC_ADMIN cap in cred */
+ err = cap_capable(current_cred(), userns, cap, CAP_OPT_NONE);
+ if (!err)
+ err = aa_capable(label, cap, CAP_OPT_NONE);
+
+ return err;
+}
+
/**
- * policy_view_capable - check if viewing policy in at @ns is allowed
- * ns: namespace being viewed by current task (may be NULL)
+ * aa_policy_view_capable - check if viewing policy in at @ns is allowed
+ * label: label that is trying to view policy in ns
+ * ns: namespace being viewed by @label (may be NULL if @label's ns)
* Returns: true if viewing policy is allowed
*
* If @ns is NULL then the namespace being viewed is assumed to be the
* tasks current namespace.
*/
-bool policy_view_capable(struct aa_ns *ns)
+bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
{
struct user_namespace *user_ns = current_user_ns();
- struct aa_ns *view_ns = aa_get_current_ns();
+ struct aa_ns *view_ns = labels_view(label);
bool root_in_user_ns = uid_eq(current_euid(), make_kuid(user_ns, 0)) ||
in_egroup_p(make_kgid(user_ns, 0));
bool response = false;
@@ -655,20 +671,44 @@ bool policy_view_capable(struct aa_ns *ns)
(unprivileged_userns_apparmor_policy != 0 &&
user_ns->level == view_ns->level)))
response = true;
- aa_put_ns(view_ns);
return response;
}
-bool policy_admin_capable(struct aa_ns *ns)
+bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns)
{
struct user_namespace *user_ns = current_user_ns();
- bool capable = ns_capable(user_ns, CAP_MAC_ADMIN);
+ bool capable = policy_ns_capable(label, user_ns, CAP_MAC_ADMIN) == 0;
AA_DEBUG("cap_mac_admin? %d\n", capable);
AA_DEBUG("policy locked? %d\n", aa_g_lock_policy);
- return policy_view_capable(ns) && capable && !aa_g_lock_policy;
+ return aa_policy_view_capable(label, ns) && capable &&
+ !aa_g_lock_policy;
+}
+
+bool aa_current_policy_view_capable(struct aa_ns *ns)
+{
+ struct aa_label *label;
+ bool res;
+
+ label = __begin_current_label_crit_section();
+ res = aa_policy_view_capable(label, ns);
+ __end_current_label_crit_section(label);
+
+ return res;
+}
+
+bool aa_current_policy_admin_capable(struct aa_ns *ns)
+{
+ struct aa_label *label;
+ bool res;
+
+ label = __begin_current_label_crit_section();
+ res = aa_policy_admin_capable(label, ns);
+ __end_current_label_crit_section(label);
+
+ return res;
}
/**
@@ -694,7 +734,7 @@ int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
return audit_policy(label, op, NULL, NULL, "policy_locked",
-EACCES);
- if (!policy_admin_capable(ns))
+ if (!aa_policy_admin_capable(label, ns))
return audit_policy(label, op, NULL, NULL, "not policy admin",
-EACCES);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 4e1f96b216a8ba..0acca6f2a93fcb 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -39,7 +39,7 @@
/*
* The AppArmor interface treats data as a type byte followed by the
- * actual data. The interface has the notion of a a named entry
+ * actual data. The interface has the notion of a named entry
* which has a name (AA_NAME typecode followed by name string) followed by
* the entries typecode and data. Named types allow for optional
* elements and extensions to be added and tested for without breaking
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index c929bf4a3df1c0..fde332e0ea7da3 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -21,8 +21,6 @@
* @profile: the profile to print profile info about (NOT NULL)
* @string: Returns - string containing the profile info (NOT NULL)
*
- * Returns: length of @string on success else error on failure
- *
* Requires: profile != NULL
*
* Creates a string containing the namespace_name://profile_name for
diff --git a/security/security.c b/security/security.c
index 95e30fadba7895..c88167a414b411 100644
--- a/security/security.c
+++ b/security/security.c
@@ -2367,9 +2367,9 @@ int security_tun_dev_open(void *security)
}
EXPORT_SYMBOL(security_tun_dev_open);
-int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb)
+int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb)
{
- return call_int_hook(sctp_assoc_request, 0, ep, skb);
+ return call_int_hook(sctp_assoc_request, 0, asoc, skb);
}
EXPORT_SYMBOL(security_sctp_assoc_request);
@@ -2381,10 +2381,10 @@ int security_sctp_bind_connect(struct sock *sk, int optname,
}
EXPORT_SYMBOL(security_sctp_bind_connect);
-void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
+void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk)
{
- call_void_hook(sctp_sk_clone, ep, sk, newsk);
+ call_void_hook(sctp_sk_clone, asoc, sk, newsk);
}
EXPORT_SYMBOL(security_sctp_sk_clone);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index ea7b2876a5ae26..62d30c0a30c291 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -5339,10 +5339,10 @@ static void selinux_sock_graft(struct sock *sk, struct socket *parent)
* connect(2), sctp_connectx(3) or sctp_sendmsg(3) (with no association
* already present).
*/
-static int selinux_sctp_assoc_request(struct sctp_endpoint *ep,
+static int selinux_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
- struct sk_security_struct *sksec = ep->base.sk->sk_security;
+ struct sk_security_struct *sksec = asoc->base.sk->sk_security;
struct common_audit_data ad;
struct lsm_network_audit net = {0,};
u8 peerlbl_active;
@@ -5359,7 +5359,7 @@ static int selinux_sctp_assoc_request(struct sctp_endpoint *ep,
/* This will return peer_sid = SECSID_NULL if there are
* no peer labels, see security_net_peersid_resolve().
*/
- err = selinux_skb_peerlbl_sid(skb, ep->base.sk->sk_family,
+ err = selinux_skb_peerlbl_sid(skb, asoc->base.sk->sk_family,
&peer_sid);
if (err)
return err;
@@ -5383,7 +5383,7 @@ static int selinux_sctp_assoc_request(struct sctp_endpoint *ep,
*/
ad.type = LSM_AUDIT_DATA_NET;
ad.u.net = &net;
- ad.u.net->sk = ep->base.sk;
+ ad.u.net->sk = asoc->base.sk;
err = avc_has_perm(&selinux_state,
sksec->peer_sid, peer_sid, sksec->sclass,
SCTP_SOCKET__ASSOCIATION, &ad);
@@ -5392,7 +5392,7 @@ static int selinux_sctp_assoc_request(struct sctp_endpoint *ep,
}
/* Compute the MLS component for the connection and store
- * the information in ep. This will be used by SCTP TCP type
+ * the information in asoc. This will be used by SCTP TCP type
* sockets and peeled off connections as they cause a new
* socket to be generated. selinux_sctp_sk_clone() will then
* plug this into the new socket.
@@ -5401,11 +5401,11 @@ static int selinux_sctp_assoc_request(struct sctp_endpoint *ep,
if (err)
return err;
- ep->secid = conn_sid;
- ep->peer_secid = peer_sid;
+ asoc->secid = conn_sid;
+ asoc->peer_secid = peer_sid;
/* Set any NetLabel labels including CIPSO/CALIPSO options. */
- return selinux_netlbl_sctp_assoc_request(ep, skb);
+ return selinux_netlbl_sctp_assoc_request(asoc, skb);
}
/* Check if sctp IPv4/IPv6 addresses are valid for binding or connecting
@@ -5490,7 +5490,7 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
}
/* Called whenever a new socket is created by accept(2) or sctp_peeloff(3). */
-static void selinux_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
+static void selinux_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk)
{
struct sk_security_struct *sksec = sk->sk_security;
@@ -5502,8 +5502,8 @@ static void selinux_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
if (!selinux_policycap_extsockclass())
return selinux_sk_clone_security(sk, newsk);
- newsksec->sid = ep->secid;
- newsksec->peer_sid = ep->peer_secid;
+ newsksec->sid = asoc->secid;
+ newsksec->peer_sid = asoc->peer_secid;
newsksec->sclass = sksec->sclass;
selinux_netlbl_sctp_sk_clone(sk, newsk);
}
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index 0c58f62dc6abf8..4d0456d3d45933 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -39,7 +39,7 @@ int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
u16 family,
u32 sid);
-int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
+int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb);
int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family);
void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family);
@@ -98,7 +98,7 @@ static inline int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
return 0;
}
-static inline int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
+static inline int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
return 0;
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 29b88e81869beb..1321f15799e2f8 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -261,30 +261,30 @@ skbuff_setsid_return:
/**
* selinux_netlbl_sctp_assoc_request - Label an incoming sctp association.
- * @ep: incoming association endpoint.
+ * @asoc: incoming association.
* @skb: the packet.
*
* Description:
- * A new incoming connection is represented by @ep, ......
+ * A new incoming connection is represented by @asoc, ......
* Returns zero on success, negative values on failure.
*
*/
-int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
+int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
int rc;
struct netlbl_lsm_secattr secattr;
- struct sk_security_struct *sksec = ep->base.sk->sk_security;
+ struct sk_security_struct *sksec = asoc->base.sk->sk_security;
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
- if (ep->base.sk->sk_family != PF_INET &&
- ep->base.sk->sk_family != PF_INET6)
+ if (asoc->base.sk->sk_family != PF_INET &&
+ asoc->base.sk->sk_family != PF_INET6)
return 0;
netlbl_secattr_init(&secattr);
rc = security_netlbl_sid_to_secattr(&selinux_state,
- ep->secid, &secattr);
+ asoc->secid, &secattr);
if (rc != 0)
goto assoc_request_return;
@@ -294,11 +294,11 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
if (ip_hdr(skb)->version == 4) {
addr4.sin_family = AF_INET;
addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
- rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr4, &secattr);
+ rc = netlbl_conn_setattr(asoc->base.sk, (void *)&addr4, &secattr);
} else if (IS_ENABLED(CONFIG_IPV6) && ip_hdr(skb)->version == 6) {
addr6.sin6_family = AF_INET6;
addr6.sin6_addr = ipv6_hdr(skb)->saddr;
- rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr6, &secattr);
+ rc = netlbl_conn_setattr(asoc->base.sk, (void *)&addr6, &secattr);
} else {
rc = -EAFNOSUPPORT;
}
diff --git a/sound/core/Makefile b/sound/core/Makefile
index 350d704ced9846..79e1407cd0de79 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -19,6 +19,7 @@ snd-$(CONFIG_SND_JACK) += ctljack.o jack.o
snd-pcm-y := pcm.o pcm_native.o pcm_lib.o pcm_misc.o \
pcm_memory.o memalloc.o
snd-pcm-$(CONFIG_SND_PCM_TIMER) += pcm_timer.o
+snd-pcm-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
snd-pcm-$(CONFIG_SND_PCM_ELD) += pcm_drm_eld.o
snd-pcm-$(CONFIG_SND_PCM_IEC958) += pcm_iec958.o
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 99cd0f67daa1e7..9fc971a704a9ea 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -183,8 +183,11 @@ EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
- const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+ const struct snd_malloc_ops *ops;
+ if (!dmab)
+ return -ENOENT;
+ ops = snd_dma_get_ops(dmab);
if (ops && ops->mmap)
return ops->mmap(dmab, area);
else
@@ -549,60 +552,73 @@ static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
}
}
-static const struct snd_malloc_ops snd_dma_noncontig_ops = {
- .alloc = snd_dma_noncontig_alloc,
- .free = snd_dma_noncontig_free,
- .mmap = snd_dma_noncontig_mmap,
- .sync = snd_dma_noncontig_sync,
- /* re-use vmalloc helpers for get_* ops */
- .get_addr = snd_dma_vmalloc_get_addr,
- .get_page = snd_dma_vmalloc_get_page,
- .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
-};
+static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
+ struct sg_page_iter *piter,
+ size_t offset)
+{
+ struct sg_table *sgt = dmab->private_data;
-/* x86-specific SG-buffer with WC pages */
-#ifdef CONFIG_SND_DMA_SGBUF
-#define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v))
+ __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
+ offset >> PAGE_SHIFT);
+}
-static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
+static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
{
- void *p = snd_dma_noncontig_alloc(dmab, size);
- size_t ofs;
+ struct sg_dma_page_iter iter;
- if (!p)
- return NULL;
- for (ofs = 0; ofs < size; ofs += PAGE_SIZE)
- set_memory_uc(vmalloc_to_virt(p + ofs), 1);
- return p;
+ snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
+ __sg_page_iter_dma_next(&iter);
+ return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
}
-static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
+static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
+ size_t offset)
{
- size_t ofs;
+ struct sg_page_iter iter;
- for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE)
- set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1);
- snd_dma_noncontig_free(dmab);
+ snd_dma_noncontig_iter_set(dmab, &iter, offset);
+ __sg_page_iter_next(&iter);
+ return sg_page_iter_page(&iter);
}
-static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
- struct vm_area_struct *area)
+static unsigned int
+snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
{
- area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
- /* FIXME: dma_mmap_noncontiguous() works? */
- return -ENOENT; /* continue with the default mmap handler */
+ struct sg_dma_page_iter iter;
+ unsigned int start, end;
+ unsigned long addr;
+
+ start = ALIGN_DOWN(ofs, PAGE_SIZE);
+ end = ofs + size - 1; /* the last byte address */
+ snd_dma_noncontig_iter_set(dmab, &iter.base, start);
+ if (!__sg_page_iter_dma_next(&iter))
+ return 0;
+ /* check page continuity */
+ addr = sg_page_iter_dma_address(&iter);
+ for (;;) {
+ start += PAGE_SIZE;
+ if (start > end)
+ break;
+ addr += PAGE_SIZE;
+ if (!__sg_page_iter_dma_next(&iter) ||
+ sg_page_iter_dma_address(&iter) != addr)
+ return start - ofs;
+ }
+ /* ok, all on continuous pages */
+ return size;
}
-const struct snd_malloc_ops snd_dma_sg_wc_ops = {
- .alloc = snd_dma_sg_wc_alloc,
- .free = snd_dma_sg_wc_free,
- .mmap = snd_dma_sg_wc_mmap,
+static const struct snd_malloc_ops snd_dma_noncontig_ops = {
+ .alloc = snd_dma_noncontig_alloc,
+ .free = snd_dma_noncontig_free,
+ .mmap = snd_dma_noncontig_mmap,
.sync = snd_dma_noncontig_sync,
- .get_addr = snd_dma_vmalloc_get_addr,
- .get_page = snd_dma_vmalloc_get_page,
- .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+ .get_addr = snd_dma_noncontig_get_addr,
+ .get_page = snd_dma_noncontig_get_page,
+ .get_chunk_size = snd_dma_noncontig_get_chunk_size,
};
-#endif /* CONFIG_SND_DMA_SGBUF */
/*
* Non-coherent pages allocator
@@ -663,17 +679,20 @@ static const struct snd_malloc_ops *dma_ops[] = {
[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
-#ifdef CONFIG_SND_DMA_SGBUF
- [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
-#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
#endif /* CONFIG_GENERIC_ALLOCATOR */
#endif /* CONFIG_HAS_DMA */
+#ifdef CONFIG_SND_DMA_SGBUF
+ [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
+ [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
+#endif
};
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
{
+ if (WARN_ON_ONCE(!dmab))
+ return NULL;
if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
dmab->dev.type >= ARRAY_SIZE(dma_ops)))
return NULL;
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
new file mode 100644
index 00000000000000..8352a5cdb19f57
--- /dev/null
+++ b/sound/core/sgbuf.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Scatter-Gather buffer
+ *
+ * Copyright (c) by Takashi Iwai <tiwai@suse.de>
+ */
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <sound/memalloc.h>
+#include "memalloc_local.h"
+
+struct snd_sg_page {
+ void *buf;
+ dma_addr_t addr;
+};
+
+struct snd_sg_buf {
+ int size; /* allocated byte size */
+ int pages; /* allocated pages */
+ int tblsize; /* allocated table size */
+ struct snd_sg_page *table; /* address table */
+ struct page **page_table; /* page table (for vmap/vunmap) */
+ struct device *dev;
+};
+
+/* table entries are align to 32 */
+#define SGBUF_TBL_ALIGN 32
+#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
+
+static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
+{
+ struct snd_sg_buf *sgbuf = dmab->private_data;
+ struct snd_dma_buffer tmpb;
+ int i;
+
+ if (!sgbuf)
+ return;
+
+ vunmap(dmab->area);
+ dmab->area = NULL;
+
+ tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
+ tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC;
+ tmpb.dev.dev = sgbuf->dev;
+ for (i = 0; i < sgbuf->pages; i++) {
+ if (!(sgbuf->table[i].addr & ~PAGE_MASK))
+ continue; /* continuous pages */
+ tmpb.area = sgbuf->table[i].buf;
+ tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
+ tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
+ snd_dma_free_pages(&tmpb);
+ }
+
+ kfree(sgbuf->table);
+ kfree(sgbuf->page_table);
+ kfree(sgbuf);
+ dmab->private_data = NULL;
+}
+
+#define MAX_ALLOC_PAGES 32
+
+static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct snd_sg_buf *sgbuf;
+ unsigned int i, pages, chunk, maxpages;
+ struct snd_dma_buffer tmpb;
+ struct snd_sg_page *table;
+ struct page **pgtable;
+ int type = SNDRV_DMA_TYPE_DEV;
+ pgprot_t prot = PAGE_KERNEL;
+ void *area;
+
+ dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
+ if (!sgbuf)
+ return NULL;
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) {
+ type = SNDRV_DMA_TYPE_DEV_WC;
+#ifdef pgprot_noncached
+ prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+ }
+ sgbuf->dev = dmab->dev.dev;
+ pages = snd_sgbuf_aligned_pages(size);
+ sgbuf->tblsize = sgbuf_align_table(pages);
+ table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ goto _failed;
+ sgbuf->table = table;
+ pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
+ if (!pgtable)
+ goto _failed;
+ sgbuf->page_table = pgtable;
+
+ /* allocate pages */
+ maxpages = MAX_ALLOC_PAGES;
+ while (pages > 0) {
+ chunk = pages;
+ /* don't be too eager to take a huge chunk */
+ if (chunk > maxpages)
+ chunk = maxpages;
+ chunk <<= PAGE_SHIFT;
+ if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
+ chunk, &tmpb) < 0) {
+ if (!sgbuf->pages)
+ goto _failed;
+ size = sgbuf->pages * PAGE_SIZE;
+ break;
+ }
+ chunk = tmpb.bytes >> PAGE_SHIFT;
+ for (i = 0; i < chunk; i++) {
+ table->buf = tmpb.area;
+ table->addr = tmpb.addr;
+ if (!i)
+ table->addr |= chunk; /* mark head */
+ table++;
+ *pgtable++ = virt_to_page(tmpb.area);
+ tmpb.area += PAGE_SIZE;
+ tmpb.addr += PAGE_SIZE;
+ }
+ sgbuf->pages += chunk;
+ pages -= chunk;
+ if (chunk < maxpages)
+ maxpages = chunk;
+ }
+
+ sgbuf->size = size;
+ area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
+ if (!area)
+ goto _failed;
+ return area;
+
+ _failed:
+ snd_dma_sg_free(dmab); /* free the table */
+ return NULL;
+}
+
+static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ struct snd_sg_buf *sgbuf = dmab->private_data;
+ dma_addr_t addr;
+
+ addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
+ addr &= ~((dma_addr_t)PAGE_SIZE - 1);
+ return addr + offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ struct snd_sg_buf *sgbuf = dmab->private_data;
+ unsigned int idx = offset >> PAGE_SHIFT;
+
+ if (idx >= (unsigned int)sgbuf->pages)
+ return NULL;
+ return sgbuf->page_table[idx];
+}
+
+static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs,
+ unsigned int size)
+{
+ struct snd_sg_buf *sg = dmab->private_data;
+ unsigned int start, end, pg;
+
+ start = ofs >> PAGE_SHIFT;
+ end = (ofs + size - 1) >> PAGE_SHIFT;
+ /* check page continuity */
+ pg = sg->table[start].addr >> PAGE_SHIFT;
+ for (;;) {
+ start++;
+ if (start > end)
+ break;
+ pg++;
+ if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
+ return (start << PAGE_SHIFT) - ofs;
+ }
+ /* ok, all on continuous pages */
+ return size;
+}
+
+static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ return -ENOENT; /* continue with the default mmap handler */
+}
+
+const struct snd_malloc_ops snd_dma_sg_ops = {
+ .alloc = snd_dma_sg_alloc,
+ .free = snd_dma_sg_free,
+ .get_addr = snd_dma_sg_get_addr,
+ .get_page = snd_dma_sg_get_page,
+ .get_chunk_size = snd_dma_sg_get_chunk_size,
+ .mmap = snd_dma_sg_mmap,
+};
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 92b7008fcdb86a..b3214baa89193a 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -624,13 +624,13 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
result = -EBUSY;
goto unlock;
}
- list_del_init(&timeri->ack_list);
- list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown)
goto unlock;
if (stop) {
@@ -665,23 +665,22 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
+ bool running;
spin_lock_irqsave(&slave_active_lock, flags);
- if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
- spin_unlock_irqrestore(&slave_active_lock, flags);
- return -EBUSY;
- }
+ running = timeri->flags & SNDRV_TIMER_IFLG_RUNNING;
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
- snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
- SNDRV_TIMER_EVENT_PAUSE);
+ if (running)
+ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
+ SNDRV_TIMER_EVENT_PAUSE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
- return 0;
+ return running ? 0 : -EBUSY;
}
/*
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index fd109bea4c53b4..22b6c779682a3e 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -169,6 +169,7 @@ config SND_FIREWIRE_MOTU
* 828
* 896
* 828mk2
+ * 896hd
* Traveler
* Ultralite
* 8pre
@@ -176,7 +177,9 @@ config SND_FIREWIRE_MOTU
* 828mk3 (Hybrid)
* Ultralite mk3 (FireWire only)
* Ultralite mk3 (Hybrid)
+ * Traveler mk3
* Audio Express
+ * Track 16
* 4pre
To compile this driver as a module, choose M here: the module
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index ac66f08acd6bce..53dbd4d4b0d02b 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -50,8 +50,9 @@ static int init_stream(struct snd_efw *efw, struct amdtp_stream *stream)
efw->firmware_version == 0x5070300 ||
efw->firmware_version == 0x5080000))
efw->tx_stream.flags |= CIP_UNALIGHED_DBC;
- // AudioFire9 always reports wrong dbs.
- if (efw->is_af9)
+ // AudioFire9 always reports wrong dbs. Onyx 1200F with the latest firmware (v4.6.0)
+ // also report wrong dbs at 88.2 kHz or greater.
+ if (efw->is_af9 || efw->firmware_version == 0x4060000)
efw->tx_stream.flags |= CIP_WRONG_DBS;
// Firmware version 5.5 reports fixed interval for dbc.
if (efw->firmware_version == 0x5050000)
diff --git a/sound/firewire/motu/motu-protocol-v3.c b/sound/firewire/motu/motu-protocol-v3.c
index 05608e8ca0bcce..8a0426920a760b 100644
--- a/sound/firewire/motu/motu-protocol-v3.c
+++ b/sound/firewire/motu/motu-protocol-v3.c
@@ -16,6 +16,7 @@
#define V3_CLOCK_SRC_INTERNAL 0x00
#define V3_CLOCK_SRC_WORD_ON_BNC 0x01
#define V3_CLOCK_SRC_SPH 0x02
+#define V3_CLOCK_SRC_AESEBU_ON_XLR 0x08
#define V3_CLOCK_SRC_SPDIF_ON_COAX 0x10
#define V3_CLOCK_SRC_OPT_IFACE_A 0x18
#define V3_CLOCK_SRC_OPT_IFACE_B 0x19
@@ -126,6 +127,9 @@ int snd_motu_protocol_v3_get_clock_source(struct snd_motu *motu,
case V3_CLOCK_SRC_SPH:
*src = SND_MOTU_CLOCK_SOURCE_SPH;
break;
+ case V3_CLOCK_SRC_AESEBU_ON_XLR:
+ *src = SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR;
+ break;
case V3_CLOCK_SRC_SPDIF_ON_COAX:
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
break;
@@ -185,7 +189,7 @@ int snd_motu_protocol_v3_switch_fetching_mode(struct snd_motu *motu,
sizeof(reg));
}
-static int detect_packet_formats_828mk3(struct snd_motu *motu, u32 data)
+static int detect_packet_formats_with_opt_ifaces(struct snd_motu *motu, u32 data)
{
if (data & V3_ENABLE_OPT_IN_IFACE_A) {
if (data & V3_NO_ADAT_OPT_IN_IFACE_A) {
@@ -255,8 +259,11 @@ int snd_motu_protocol_v3_cache_packet_formats(struct snd_motu *motu)
motu->spec->rx_fixed_pcm_chunks,
sizeof(motu->rx_packet_formats.pcm_chunks));
- if (motu->spec == &snd_motu_spec_828mk3_fw || motu->spec == &snd_motu_spec_828mk3_hybrid)
- return detect_packet_formats_828mk3(motu, data);
+ if (motu->spec == &snd_motu_spec_828mk3_fw ||
+ motu->spec == &snd_motu_spec_828mk3_hybrid ||
+ motu->spec == &snd_motu_spec_traveler_mk3 ||
+ motu->spec == &snd_motu_spec_track16)
+ return detect_packet_formats_with_opt_ifaces(motu, data);
else
return 0;
}
@@ -281,6 +288,16 @@ const struct snd_motu_spec snd_motu_spec_828mk3_hybrid = {
.rx_fixed_pcm_chunks = {14, 14, 14}, // Additional 4 dummy chunks at higher rate.
};
+const struct snd_motu_spec snd_motu_spec_traveler_mk3 = {
+ .name = "TravelerMk3",
+ .protocol_version = SND_MOTU_PROTOCOL_V3,
+ .flags = SND_MOTU_SPEC_RX_MIDI_3RD_Q |
+ SND_MOTU_SPEC_TX_MIDI_3RD_Q |
+ SND_MOTU_SPEC_COMMAND_DSP,
+ .tx_fixed_pcm_chunks = {18, 14, 10},
+ .rx_fixed_pcm_chunks = {14, 14, 10},
+};
+
const struct snd_motu_spec snd_motu_spec_ultralite_mk3 = {
.name = "UltraLiteMk3",
.protocol_version = SND_MOTU_PROTOCOL_V3,
@@ -301,6 +318,16 @@ const struct snd_motu_spec snd_motu_spec_audio_express = {
.rx_fixed_pcm_chunks = {10, 10, 0},
};
+const struct snd_motu_spec snd_motu_spec_track16 = {
+ .name = "Track16",
+ .protocol_version = SND_MOTU_PROTOCOL_V3,
+ .flags = SND_MOTU_SPEC_RX_MIDI_3RD_Q |
+ SND_MOTU_SPEC_TX_MIDI_3RD_Q |
+ SND_MOTU_SPEC_COMMAND_DSP,
+ .tx_fixed_pcm_chunks = {14, 14, 14},
+ .rx_fixed_pcm_chunks = {6, 6, 6},
+};
+
const struct snd_motu_spec snd_motu_spec_4pre = {
.name = "4pre",
.protocol_version = SND_MOTU_PROTOCOL_V3,
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index 5fc7ae47553736..f8b7fe38751c64 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -169,9 +169,11 @@ static const struct ieee1394_device_id motu_id_table[] = {
SND_MOTU_DEV_ENTRY(0x00000f, &snd_motu_spec_8pre),
SND_MOTU_DEV_ENTRY(0x000015, &snd_motu_spec_828mk3_fw), // FireWire only.
SND_MOTU_DEV_ENTRY(0x000019, &snd_motu_spec_ultralite_mk3), // FireWire only.
+ SND_MOTU_DEV_ENTRY(0x00001b, &snd_motu_spec_traveler_mk3),
SND_MOTU_DEV_ENTRY(0x000030, &snd_motu_spec_ultralite_mk3), // Hybrid.
SND_MOTU_DEV_ENTRY(0x000035, &snd_motu_spec_828mk3_hybrid), // Hybrid.
SND_MOTU_DEV_ENTRY(0x000033, &snd_motu_spec_audio_express),
+ SND_MOTU_DEV_ENTRY(0x000039, &snd_motu_spec_track16),
SND_MOTU_DEV_ENTRY(0x000045, &snd_motu_spec_4pre),
{ }
};
diff --git a/sound/firewire/motu/motu.h b/sound/firewire/motu/motu.h
index 79704ae6a73e3b..4189f2192284ab 100644
--- a/sound/firewire/motu/motu.h
+++ b/sound/firewire/motu/motu.h
@@ -138,8 +138,10 @@ extern const struct snd_motu_spec snd_motu_spec_8pre;
extern const struct snd_motu_spec snd_motu_spec_828mk3_fw;
extern const struct snd_motu_spec snd_motu_spec_828mk3_hybrid;
+extern const struct snd_motu_spec snd_motu_spec_traveler_mk3;
extern const struct snd_motu_spec snd_motu_spec_ultralite_mk3;
extern const struct snd_motu_spec snd_motu_spec_audio_express;
+extern const struct snd_motu_spec snd_motu_spec_track16;
extern const struct snd_motu_spec snd_motu_spec_4pre;
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7762718cf42936..fe51163f2d82df 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2327,7 +2327,8 @@ static int azx_probe_continue(struct azx *chip)
out_free:
if (err < 0) {
- azx_free(chip);
+ pci_set_drvdata(pci, NULL);
+ snd_card_free(chip->card);
return err;
}
@@ -2364,6 +2365,7 @@ static void azx_remove(struct pci_dev *pci)
cancel_delayed_work_sync(&hda->probe_work);
device_lock(&pci->dev);
+ pci_set_drvdata(pci, NULL);
snd_card_free(card);
}
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8a3e2fe4210625..2f1727faec698c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6739,6 +6739,7 @@ enum {
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
ALC287_FIXUP_13S_GEN2_SPEAKERS,
ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
+ ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8450,6 +8451,15 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc245_fixup_hp_gpio_led,
},
+ [ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11120 }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8486,6 +8496,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x141f, "Acer Spin SP513-54N", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
@@ -8625,6 +8636,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8728, "HP EliteBook 840 G7", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
@@ -8687,6 +8699,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1970, "ASUS UX550VE", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1982, "ASUS B1400CEPE", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
@@ -8750,11 +8763,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x40a1, "Clevo NL40GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x40c1, "Clevo NL40[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x40d1, "Clevo NL41DU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x5015, "Clevo NH5[58]H[HJK]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x5017, "Clevo NH7[79]H[HJK]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50a3, "Clevo NJ51GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50b3, "Clevo NK50S[BEZ]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50b6, "Clevo NK50S5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50e1, "Clevo NH5[58]HPQ", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50e2, "Clevo NH7[79]HPQ", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 75aa2ea733a59f..96c12dfb24cf9d 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -468,8 +468,11 @@ struct hdsp {
unsigned char ss_out_channels;
u32 io_loopback; /* output loopback channel states*/
- struct snd_dma_buffer *capture_dma_buf;
- struct snd_dma_buffer *playback_dma_buf;
+ /* DMA buffers; those are copied instances from the original snd_dma_buf
+ * objects (which are managed via devres) for the address alignments
+ */
+ struct snd_dma_buffer capture_dma_buf;
+ struct snd_dma_buffer playback_dma_buf;
unsigned char *capture_buffer; /* suitably aligned address */
unsigned char *playback_buffer; /* suitably aligned address */
@@ -3764,30 +3767,32 @@ static void snd_hdsp_proc_init(struct hdsp *hdsp)
static int snd_hdsp_initialize_memory(struct hdsp *hdsp)
{
- unsigned long pb_bus, cb_bus;
+ struct snd_dma_buffer *capture_dma, *playback_dma;
- hdsp->capture_dma_buf =
- snd_hammerfall_get_buffer(hdsp->pci, HDSP_DMA_AREA_BYTES);
- hdsp->playback_dma_buf =
- snd_hammerfall_get_buffer(hdsp->pci, HDSP_DMA_AREA_BYTES);
- if (!hdsp->capture_dma_buf || !hdsp->playback_dma_buf) {
+ capture_dma = snd_hammerfall_get_buffer(hdsp->pci, HDSP_DMA_AREA_BYTES);
+ playback_dma = snd_hammerfall_get_buffer(hdsp->pci, HDSP_DMA_AREA_BYTES);
+ if (!capture_dma || !playback_dma) {
dev_err(hdsp->card->dev,
"%s: no buffers available\n", hdsp->card_name);
return -ENOMEM;
}
- /* Align to bus-space 64K boundary */
+ /* copy to the own data for alignment */
+ hdsp->capture_dma_buf = *capture_dma;
+ hdsp->playback_dma_buf = *playback_dma;
- cb_bus = ALIGN(hdsp->capture_dma_buf->addr, 0x10000ul);
- pb_bus = ALIGN(hdsp->playback_dma_buf->addr, 0x10000ul);
+ /* Align to bus-space 64K boundary */
+ hdsp->capture_dma_buf.addr = ALIGN(capture_dma->addr, 0x10000ul);
+ hdsp->playback_dma_buf.addr = ALIGN(playback_dma->addr, 0x10000ul);
/* Tell the card where it is */
+ hdsp_write(hdsp, HDSP_inputBufferAddress, hdsp->capture_dma_buf.addr);
+ hdsp_write(hdsp, HDSP_outputBufferAddress, hdsp->playback_dma_buf.addr);
- hdsp_write(hdsp, HDSP_inputBufferAddress, cb_bus);
- hdsp_write(hdsp, HDSP_outputBufferAddress, pb_bus);
-
- hdsp->capture_buffer = hdsp->capture_dma_buf->area + (cb_bus - hdsp->capture_dma_buf->addr);
- hdsp->playback_buffer = hdsp->playback_dma_buf->area + (pb_bus - hdsp->playback_dma_buf->addr);
+ hdsp->capture_dma_buf.area += hdsp->capture_dma_buf.addr - capture_dma->addr;
+ hdsp->playback_dma_buf.area += hdsp->playback_dma_buf.addr - playback_dma->addr;
+ hdsp->capture_buffer = hdsp->capture_dma_buf.area;
+ hdsp->playback_buffer = hdsp->playback_dma_buf.area;
return 0;
}
@@ -4507,7 +4512,7 @@ static int snd_hdsp_playback_open(struct snd_pcm_substream *substream)
snd_pcm_set_sync(substream);
runtime->hw = snd_hdsp_playback_subinfo;
- snd_pcm_set_runtime_buffer(substream, hdsp->playback_dma_buf);
+ snd_pcm_set_runtime_buffer(substream, &hdsp->playback_dma_buf);
hdsp->playback_pid = current->pid;
hdsp->playback_substream = substream;
@@ -4583,7 +4588,7 @@ static int snd_hdsp_capture_open(struct snd_pcm_substream *substream)
snd_pcm_set_sync(substream);
runtime->hw = snd_hdsp_capture_subinfo;
- snd_pcm_set_runtime_buffer(substream, hdsp->capture_dma_buf);
+ snd_pcm_set_runtime_buffer(substream, &hdsp->capture_dma_buf);
hdsp->capture_pid = current->pid;
hdsp->capture_substream = substream;
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index e76f737ac9e8e9..7755e19aa77617 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -208,8 +208,11 @@ struct snd_rme9652 {
unsigned char ds_channels;
unsigned char ss_channels; /* different for hammerfall/hammerfall-light */
- struct snd_dma_buffer *playback_dma_buf;
- struct snd_dma_buffer *capture_dma_buf;
+ /* DMA buffers; those are copied instances from the original snd_dma_buf
+ * objects (which are managed via devres) for the address alignments
+ */
+ struct snd_dma_buffer playback_dma_buf;
+ struct snd_dma_buffer capture_dma_buf;
unsigned char *capture_buffer; /* suitably aligned address */
unsigned char *playback_buffer; /* suitably aligned address */
@@ -1719,30 +1722,32 @@ static void snd_rme9652_card_free(struct snd_card *card)
static int snd_rme9652_initialize_memory(struct snd_rme9652 *rme9652)
{
- unsigned long pb_bus, cb_bus;
+ struct snd_dma_buffer *capture_dma, *playback_dma;
- rme9652->capture_dma_buf =
- snd_hammerfall_get_buffer(rme9652->pci, RME9652_DMA_AREA_BYTES);
- rme9652->playback_dma_buf =
- snd_hammerfall_get_buffer(rme9652->pci, RME9652_DMA_AREA_BYTES);
- if (!rme9652->capture_dma_buf || !rme9652->playback_dma_buf) {
+ capture_dma = snd_hammerfall_get_buffer(rme9652->pci, RME9652_DMA_AREA_BYTES);
+ playback_dma = snd_hammerfall_get_buffer(rme9652->pci, RME9652_DMA_AREA_BYTES);
+ if (!capture_dma || !playback_dma) {
dev_err(rme9652->card->dev,
"%s: no buffers available\n", rme9652->card_name);
return -ENOMEM;
}
- /* Align to bus-space 64K boundary */
+ /* copy to the own data for alignment */
+ rme9652->capture_dma_buf = *capture_dma;
+ rme9652->playback_dma_buf = *playback_dma;
- cb_bus = ALIGN(rme9652->capture_dma_buf->addr, 0x10000ul);
- pb_bus = ALIGN(rme9652->playback_dma_buf->addr, 0x10000ul);
+ /* Align to bus-space 64K boundary */
+ rme9652->capture_dma_buf.addr = ALIGN(capture_dma->addr, 0x10000ul);
+ rme9652->playback_dma_buf.addr = ALIGN(playback_dma->addr, 0x10000ul);
/* Tell the card where it is */
+ rme9652_write(rme9652, RME9652_rec_buffer, rme9652->capture_dma_buf.addr);
+ rme9652_write(rme9652, RME9652_play_buffer, rme9652->playback_dma_buf.addr);
- rme9652_write(rme9652, RME9652_rec_buffer, cb_bus);
- rme9652_write(rme9652, RME9652_play_buffer, pb_bus);
-
- rme9652->capture_buffer = rme9652->capture_dma_buf->area + (cb_bus - rme9652->capture_dma_buf->addr);
- rme9652->playback_buffer = rme9652->playback_dma_buf->area + (pb_bus - rme9652->playback_dma_buf->addr);
+ rme9652->capture_dma_buf.area += rme9652->capture_dma_buf.addr - capture_dma->addr;
+ rme9652->playback_dma_buf.area += rme9652->playback_dma_buf.addr - playback_dma->addr;
+ rme9652->capture_buffer = rme9652->capture_dma_buf.area;
+ rme9652->playback_buffer = rme9652->playback_dma_buf.area;
return 0;
}
@@ -2259,7 +2264,7 @@ static int snd_rme9652_playback_open(struct snd_pcm_substream *substream)
snd_pcm_set_sync(substream);
runtime->hw = snd_rme9652_playback_subinfo;
- snd_pcm_set_runtime_buffer(substream, rme9652->playback_dma_buf);
+ snd_pcm_set_runtime_buffer(substream, &rme9652->playback_dma_buf);
if (rme9652->capture_substream == NULL) {
rme9652_stop(rme9652);
@@ -2318,7 +2323,7 @@ static int snd_rme9652_capture_open(struct snd_pcm_substream *substream)
snd_pcm_set_sync(substream);
runtime->hw = snd_rme9652_capture_subinfo;
- snd_pcm_set_runtime_buffer(substream, rme9652->capture_dma_buf);
+ snd_pcm_set_runtime_buffer(substream, &rme9652->capture_dma_buf);
if (rme9652->playback_substream == NULL) {
rme9652_stop(rme9652);
diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c
index 49d1976a132c02..5ed8e36d2e043d 100644
--- a/sound/synth/emux/emux.c
+++ b/sound/synth/emux/emux.c
@@ -88,7 +88,7 @@ int snd_emux_register(struct snd_emux *emu, struct snd_card *card, int index, ch
emu->name = kstrdup(name, GFP_KERNEL);
emu->voices = kcalloc(emu->max_voices, sizeof(struct snd_emux_voice),
GFP_KERNEL);
- if (emu->voices == NULL)
+ if (emu->name == NULL || emu->voices == NULL)
return -ENOMEM;
/* create soundfont list */
diff --git a/tools/Makefile b/tools/Makefile
index 5da1fde03a9a31..db2f7b8ebed59a 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -24,7 +24,6 @@ help:
@echo ' intel-speed-select - Intel Speed Select tool'
@echo ' kvm_stat - top-like utility for displaying kvm statistics'
@echo ' leds - LEDs tools'
- @echo ' liblockdep - user-space wrapper for kernel locking-validator'
@echo ' objtool - an ELF object analysis tool'
@echo ' pci - PCI tools'
@echo ' perf - Linux performance measurement and analysis tool'
@@ -72,9 +71,6 @@ cgroup counter firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objto
bpf/%: FORCE
$(call descend,$@)
-liblockdep: FORCE
- $(call descend,lib/lockdep)
-
libapi: FORCE
$(call descend,lib/api)
@@ -101,7 +97,7 @@ freefall: FORCE
kvm_stat: FORCE
$(call descend,kvm/$@)
-all: acpi cgroup counter cpupower gpio hv firewire liblockdep \
+all: acpi cgroup counter cpupower gpio hv firewire \
perf selftests bootconfig spi turbostat usb \
virtio vm bpf x86_energy_perf_policy \
tmon freefall iio objtool kvm_stat wmi \
@@ -116,9 +112,6 @@ cpupower_install:
cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
$(call descend,$(@:_install=),install)
-liblockdep_install:
- $(call descend,lib/lockdep,install)
-
selftests_install:
$(call descend,testing/$(@:_install=),install)
@@ -135,7 +128,7 @@ kvm_stat_install:
$(call descend,kvm/$(@:_install=),install)
install: acpi_install cgroup_install counter_install cpupower_install gpio_install \
- hv_install firewire_install iio_install liblockdep_install \
+ hv_install firewire_install iio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install bpf_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install \
@@ -151,9 +144,6 @@ cpupower_clean:
cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
$(call descend,$(@:_clean=),clean)
-liblockdep_clean:
- $(call descend,lib/lockdep,clean)
-
libapi_clean:
$(call descend,lib/api,clean)
@@ -185,7 +175,7 @@ build_clean:
clean: acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
- freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
+ freefall_clean build_clean libbpf_clean libsubcmd_clean \
gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
intel-speed-select_clean tracing_clean
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index a7c413432b33d8..01e2650b958591 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -625,6 +625,8 @@
#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
+#define MSR_IA32_XFD 0x000001c4
+#define MSR_IA32_XFD_ERR 0x000001c5
#define MSR_IA32_XSS 0x00000da0
#define MSR_IA32_APICBASE 0x0000001b
diff --git a/tools/arch/x86/include/uapi/asm/prctl.h b/tools/arch/x86/include/uapi/asm/prctl.h
index 5a6aac9fa41f7b..754a0785681714 100644
--- a/tools/arch/x86/include/uapi/asm/prctl.h
+++ b/tools/arch/x86/include/uapi/asm/prctl.h
@@ -10,6 +10,10 @@
#define ARCH_GET_CPUID 0x1011
#define ARCH_SET_CPUID 0x1012
+#define ARCH_GET_XCOMP_SUPP 0x1021
+#define ARCH_GET_XCOMP_PERM 0x1022
+#define ARCH_REQ_XCOMP_PERM 0x1023
+
#define ARCH_MAP_VDSO_X32 0x2001
#define ARCH_MAP_VDSO_32 0x2002
#define ARCH_MAP_VDSO_64 0x2003
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index c0c30e56988f2c..7cfba11c30146d 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -22,24 +22,29 @@ else
_OUTPUT := $(CURDIR)
endif
BOOTSTRAP_OUTPUT := $(_OUTPUT)/bootstrap/
+
LIBBPF_OUTPUT := $(_OUTPUT)/libbpf/
LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include
LIBBPF_HDRS_DIR := $(LIBBPF_INCLUDE)/bpf
+LIBBPF := $(LIBBPF_OUTPUT)libbpf.a
-LIBBPF = $(LIBBPF_OUTPUT)libbpf.a
-LIBBPF_BOOTSTRAP_OUTPUT = $(BOOTSTRAP_OUTPUT)libbpf/
-LIBBPF_BOOTSTRAP = $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a
+LIBBPF_BOOTSTRAP_OUTPUT := $(BOOTSTRAP_OUTPUT)libbpf/
+LIBBPF_BOOTSTRAP_DESTDIR := $(LIBBPF_BOOTSTRAP_OUTPUT)
+LIBBPF_BOOTSTRAP_INCLUDE := $(LIBBPF_BOOTSTRAP_DESTDIR)/include
+LIBBPF_BOOTSTRAP_HDRS_DIR := $(LIBBPF_BOOTSTRAP_INCLUDE)/bpf
+LIBBPF_BOOTSTRAP := $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a
# We need to copy hashmap.h and nlattr.h which is not otherwise exported by
# libbpf, but still required by bpftool.
LIBBPF_INTERNAL_HDRS := $(addprefix $(LIBBPF_HDRS_DIR)/,hashmap.h nlattr.h)
+LIBBPF_BOOTSTRAP_INTERNAL_HDRS := $(addprefix $(LIBBPF_BOOTSTRAP_HDRS_DIR)/,hashmap.h)
ifeq ($(BPFTOOL_VERSION),)
BPFTOOL_VERSION := $(shell make -rR --no-print-directory -sC ../../.. kernelversion)
endif
-$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT) $(LIBBPF_HDRS_DIR):
+$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT) $(LIBBPF_HDRS_DIR) $(LIBBPF_BOOTSTRAP_HDRS_DIR):
$(QUIET_MKDIR)mkdir -p $@
$(LIBBPF): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_OUTPUT)
@@ -52,7 +57,12 @@ $(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_
$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
- ARCH= CC=$(HOSTCC) LD=$(HOSTLD) $@
+ DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR) prefix= \
+ ARCH= CC=$(HOSTCC) LD=$(HOSTLD) $@ install_headers
+
+$(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR)
+ $(call QUIET_INSTALL, $@)
+ $(Q)install -m 644 -t $(LIBBPF_BOOTSTRAP_HDRS_DIR) $<
$(LIBBPF)-clean: FORCE | $(LIBBPF_OUTPUT)
$(call QUIET_CLEAN, libbpf)
@@ -172,11 +182,11 @@ else
$(Q)cp "$(VMLINUX_H)" $@
endif
-$(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF)
+$(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
$(QUIET_CLANG)$(CLANG) \
-I$(if $(OUTPUT),$(OUTPUT),.) \
-I$(srctree)/tools/include/uapi/ \
- -I$(LIBBPF_INCLUDE) \
+ -I$(LIBBPF_BOOTSTRAP_INCLUDE) \
-g -O2 -Wall -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@
$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
@@ -209,8 +219,10 @@ $(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
-$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT)
- $(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $<
+$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_BOOTSTRAP_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT)
+ $(QUIET_CC)$(HOSTCC) \
+ $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),$(CFLAGS)) \
+ -c -MMD -o $@ $<
$(OUTPUT)%.o: %.c
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
@@ -257,6 +269,6 @@ doc-uninstall:
FORCE:
.SECONDARY:
-.PHONY: all FORCE clean install-bin install uninstall
+.PHONY: all FORCE bootstrap clean install-bin install uninstall
.PHONY: doc doc-clean doc-install doc-uninstall
.DEFAULT_GOAL := all
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 1c5fb86d455aba..4557a8b6086f4f 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -880,8 +880,11 @@ __SYSCALL(__NR_memfd_secret, sys_memfd_secret)
#define __NR_process_mrelease 448
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
+#define __NR_futex_waitv 449
+__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
+
#undef __NR_syscalls
-#define __NR_syscalls 449
+#define __NR_syscalls 450
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index bde5860b368626..914ebd9290e519 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -1522,6 +1522,12 @@ struct drm_i915_gem_caching {
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
+/*
+ * Do not add new tiling types here. The I915_TILING_* values are for
+ * de-tiling fence registers that no longer exist on modern platforms. Although
+ * the hardware may support new types of tiling in general (e.g., Tile4), we
+ * do not need to add them to the uapi that is specific to now-defunct ioctls.
+ */
#define I915_TILING_LAST I915_TILING_Y
#define I915_BIT_6_SWIZZLE_NONE 0
@@ -1824,6 +1830,7 @@ struct drm_i915_gem_context_param {
* Extensions:
* i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
*/
#define I915_CONTEXT_PARAM_ENGINES 0xa
@@ -1846,6 +1853,55 @@ struct drm_i915_gem_context_param {
* attempted to use it, never re-use this context param number.
*/
#define I915_CONTEXT_PARAM_RINGSIZE 0xc
+
+/*
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ *
+ * Mark that the context makes use of protected content, which will result
+ * in the context being invalidated when the protected content session is.
+ * Given that the protected content session is killed on suspend, the device
+ * is kept awake for the lifetime of a protected context, so the user should
+ * make sure to dispose of them once done.
+ * This flag can only be set at context creation time and, when set to true,
+ * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
+ * to false. This flag can't be set to true in conjunction with setting the
+ * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_context_create_ext_setparam p_protected = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
+ * .value = 1,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_norecover = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * .next_extension = to_user_pointer(&p_protected),
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_RECOVERABLE,
+ * .value = 0,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_norecover);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * In addition to the normal failure cases, setting this flag during context
+ * creation can result in the following errors:
+ *
+ * -ENODEV: feature not available
+ * -EPERM: trying to mark a recoverable or not bannable context as protected
+ */
+#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/* Must be kept compact -- no holes and well documented */
__u64 value;
@@ -2050,6 +2106,135 @@ struct i915_context_engines_bond {
} __attribute__((packed)) name__
/**
+ * struct i915_context_engines_parallel_submit - Configure engine for
+ * parallel submission.
+ *
+ * Setup a slot in the context engine map to allow multiple BBs to be submitted
+ * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
+ * in parallel. Multiple hardware contexts are created internally in the i915 to
+ * run these BBs. Once a slot is configured for N BBs only N BBs can be
+ * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
+ * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
+ * many BBs there are based on the slot's configuration. The N BBs are the last
+ * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
+ *
+ * The default placement behavior is to create implicit bonds between each
+ * context if each context maps to more than 1 physical engine (e.g. context is
+ * a virtual engine). Also we only allow contexts of same engine class and these
+ * contexts must be in logically contiguous order. Examples of the placement
+ * behavior are described below. Lastly, the default is to not allow BBs to be
+ * preempted mid-batch. Rather insert coordinated preemption points on all
+ * hardware contexts between each set of BBs. Flags could be added in the future
+ * to change both of these default behaviors.
+ *
+ * Returns -EINVAL if hardware context placement configuration is invalid or if
+ * the placement configuration isn't supported on the platform / submission
+ * interface.
+ * Returns -ENODEV if extension isn't supported on the platform / submission
+ * interface.
+ *
+ * .. code-block:: none
+ *
+ * Examples syntax:
+ * CS[X] = generic engine of same class, logical instance X
+ * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *
+ * Example 1 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=1,
+ * engines=CS[0],CS[1])
+ *
+ * Results in the following valid placement:
+ * CS[0], CS[1]
+ *
+ * Example 2 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[2],CS[1],CS[3])
+ *
+ * Results in the following valid placements:
+ * CS[0], CS[1]
+ * CS[2], CS[3]
+ *
+ * This can be thought of as two virtual engines, each containing two
+ * engines thereby making a 2D array. However, there are bonds tying the
+ * entries together and placing restrictions on how they can be scheduled.
+ * Specifically, the scheduler can choose only vertical columns from the 2D
+ * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
+ * scheduler wants to submit to CS[0], it must also choose CS[1] and vice
+ * versa. Same for CS[2] requires also using CS[3].
+ * VE[0] = CS[0], CS[2]
+ * VE[1] = CS[1], CS[3]
+ *
+ * Example 3 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[1],CS[1],CS[3])
+ *
+ * Results in the following valid and invalid placements:
+ * CS[0], CS[1]
+ * CS[1], CS[3] - Not logically contiguous, return -EINVAL
+ */
+struct i915_context_engines_parallel_submit {
+ /**
+ * @base: base user extension.
+ */
+ struct i915_user_extension base;
+
+ /**
+ * @engine_index: slot for parallel engine
+ */
+ __u16 engine_index;
+
+ /**
+ * @width: number of contexts per parallel engine or in other words the
+ * number of batches in each submission
+ */
+ __u16 width;
+
+ /**
+ * @num_siblings: number of siblings per context or in other words the
+ * number of possible placements for each submission
+ */
+ __u16 num_siblings;
+
+ /**
+ * @mbz16: reserved for future use; must be zero
+ */
+ __u16 mbz16;
+
+ /**
+ * @flags: all undefined flags must be zero, currently not defined flags
+ */
+ __u64 flags;
+
+ /**
+ * @mbz64: reserved for future use; must be zero
+ */
+ __u64 mbz64[3];
+
+ /**
+ * @engines: 2-d array of engine instances to configure parallel engine
+ *
+ * length = width (i) * num_siblings (j)
+ * index = j + i * num_siblings
+ */
+ struct i915_engine_class_instance engines[0];
+
+} __packed;
+
+#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 width; \
+ __u16 num_siblings; \
+ __u16 mbz16; \
+ __u64 flags; \
+ __u64 mbz64[3]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/**
* DOC: Context Engine Map uAPI
*
* Context engine map is a new way of addressing engines when submitting batch-
@@ -2108,6 +2293,7 @@ struct i915_context_param_engines {
__u64 extensions; /* linked chain of extension blocks, 0 terminates */
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
struct i915_engine_class_instance engines[0];
} __attribute__((packed));
@@ -2726,14 +2912,20 @@ struct drm_i915_engine_info {
/** @flags: Engine flags. */
__u64 flags;
+#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
/** @capabilities: Capabilities of this engine. */
__u64 capabilities;
#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
+ /** @logical_instance: Logical instance of engine */
+ __u16 logical_instance;
+
/** @rsvd1: Reserved fields. */
- __u64 rsvd1[4];
+ __u16 rsvd1[3];
+ /** @rsvd2: Reserved fields. */
+ __u64 rsvd2[3];
};
/**
@@ -2979,8 +3171,12 @@ struct drm_i915_gem_create_ext {
*
* For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
* struct drm_i915_gem_create_ext_memory_regions.
+ *
+ * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
+ * struct drm_i915_gem_create_ext_protected_content.
*/
#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
__u64 extensions;
};
@@ -3038,6 +3234,50 @@ struct drm_i915_gem_create_ext_memory_regions {
__u64 regions;
};
+/**
+ * struct drm_i915_gem_create_ext_protected_content - The
+ * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
+ *
+ * If this extension is provided, buffer contents are expected to be protected
+ * by PXP encryption and require decryption for scan out and processing. This
+ * is only possible on platforms that have PXP enabled, on all other scenarios
+ * using this extension will cause the ioctl to fail and return -ENODEV. The
+ * flags parameter is reserved for future expansion and must currently be set
+ * to zero.
+ *
+ * The buffer contents are considered invalid after a PXP session teardown.
+ *
+ * The encryption is guaranteed to be processed correctly only if the object
+ * is submitted with a context created using the
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
+ * at submission time on the validity of the objects involved.
+ *
+ * Below is an example on how to create a protected object:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_create_ext_protected_content protected_ext = {
+ * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
+ * .flags = 0,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = PAGE_SIZE,
+ * .extensions = (uintptr_t)&protected_ext,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ */
+struct drm_i915_gem_create_ext_protected_content {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+ /** @flags: reserved for future usage, currently MBZ */
+ __u32 flags;
+};
+
+/* ID of the protected content session managed by i915 when PXP is active */
+#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+
#if defined(__cplusplus)
}
#endif
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index de45fcd2dcbe61..bb73e9a0b24fc1 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -268,5 +268,8 @@ struct prctl_mm_map {
# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
# define PR_SCHED_CORE_MAX 4
+# define PR_SCHED_CORE_SCOPE_THREAD 0
+# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
+# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 5859ca0a1439be..5fbb79e30819af 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -1002,7 +1002,7 @@ typedef int __bitwise snd_ctl_elem_iface_t;
#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1)
#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE)
#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */
-// (1 << 3) is unused.
+/* (1 << 3) is unused. */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index c09cbb868c9f12..725701235fd855 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -515,6 +515,7 @@ int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
{
union bpf_attr attr;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
@@ -522,7 +523,8 @@ int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, _
attr.value = ptr_to_u64(value);
attr.flags = flags;
- return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
}
int bpf_map_delete_elem(int fd, const void *key)
diff --git a/tools/lib/lockdep/.gitignore b/tools/lib/lockdep/.gitignore
deleted file mode 100644
index 6c308ac4388cf3..00000000000000
--- a/tools/lib/lockdep/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-liblockdep.so.*
diff --git a/tools/lib/lockdep/Build b/tools/lib/lockdep/Build
deleted file mode 100644
index 6f667355b06879..00000000000000
--- a/tools/lib/lockdep/Build
+++ /dev/null
@@ -1 +0,0 @@
-liblockdep-y += common.o lockdep.o preload.o rbtree.o
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
deleted file mode 100644
index 9dafb8cb752fe0..00000000000000
--- a/tools/lib/lockdep/Makefile
+++ /dev/null
@@ -1,162 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# file format version
-FILE_VERSION = 1
-
-LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
-
-# Makefiles suck: This macro sets a default value of $(2) for the
-# variable named by $(1), unless the variable has been set by
-# environment or command line. This is necessary for CC and AR
-# because make sets default values, so the simpler ?= approach
-# won't work as expected.
-define allow-override
- $(if $(or $(findstring environment,$(origin $(1))),\
- $(findstring command line,$(origin $(1)))),,\
- $(eval $(1) = $(2)))
-endef
-
-# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-$(call allow-override,LD,$(CROSS_COMPILE)ld)
-
-INSTALL = install
-
-# Use DESTDIR for installing into a different root directory.
-# This is useful for building a package. The program will be
-# installed in this directory as if it was the root directory.
-# Then the build tool can move it later.
-DESTDIR ?=
-DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
-
-prefix ?= /usr/local
-libdir_relative = lib
-libdir = $(prefix)/$(libdir_relative)
-bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
-
-export DESTDIR DESTDIR_SQ INSTALL
-
-MAKEFLAGS += --no-print-directory
-
-include ../../scripts/Makefile.include
-
-# copy a bit from Linux kbuild
-
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(CURDIR)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
-# Shell quotes
-libdir_SQ = $(subst ','\'',$(libdir))
-bindir_SQ = $(subst ','\'',$(bindir))
-
-LIB_IN := $(OUTPUT)liblockdep-in.o
-
-BIN_FILE = lockdep
-LIB_FILE = $(OUTPUT)liblockdep.a $(OUTPUT)liblockdep.so.$(LIBLOCKDEP_VERSION)
-
-CONFIG_INCLUDES =
-CONFIG_LIBS =
-CONFIG_FLAGS =
-
-OBJ = $@
-N =
-
-export Q VERBOSE
-
-INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
-
-# Set compile option CFLAGS if not set elsewhere
-CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
-CFLAGS += -fPIC
-CFLAGS += -Wall
-
-override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
-
-ifeq ($(VERBOSE),1)
- Q =
- print_shared_lib_compile =
- print_install =
-else
- Q = @
- print_shared_lib_compile = echo ' LD '$(OBJ);
- print_static_lib_build = echo ' LD '$(OBJ);
- print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
-endif
-
-all:
-
-export srctree OUTPUT CC LD CFLAGS V
-include $(srctree)/tools/build/Makefile.include
-
-do_compile_shared_library = \
- ($(print_shared_lib_compile) \
- $(CC) $(LDFLAGS) --shared $^ -o $@ -lpthread -ldl -Wl,-soname='$(@F)';$(shell ln -sf $(@F) $(@D)/liblockdep.so))
-
-do_build_static_lib = \
- ($(print_static_lib_build) \
- $(RM) $@; $(AR) rcs $@ $^)
-
-CMD_TARGETS = $(LIB_FILE)
-
-TARGETS = $(CMD_TARGETS)
-
-
-all: fixdep all_cmd
-
-all_cmd: $(CMD_TARGETS)
-
-$(LIB_IN): force
- $(Q)$(MAKE) $(build)=liblockdep
-
-$(OUTPUT)liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN)
- $(Q)$(do_compile_shared_library)
-
-$(OUTPUT)liblockdep.a: $(LIB_IN)
- $(Q)$(do_build_static_lib)
-
-tags: force
- $(RM) tags
- find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
- --regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/'
-
-TAGS: force
- $(RM) TAGS
- find . -name '*.[ch]' | xargs etags \
- --regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/'
-
-define do_install
- $(print_install) \
- if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
- fi; \
- $(INSTALL) $1 '$(DESTDIR_SQ)$2'
-endef
-
-install_lib: all_cmd
- $(Q)$(call do_install,$(LIB_FILE),$(libdir_SQ))
- $(Q)$(call do_install,$(BIN_FILE),$(bindir_SQ))
-
-install: install_lib
-
-clean:
- $(RM) $(OUTPUT)*.o *~ $(TARGETS) $(OUTPUT)*.a $(OUTPUT)*liblockdep*.so* $(VERSION_FILES) $(OUTPUT).*.d $(OUTPUT).*.cmd
- $(RM) tags TAGS
-
-PHONY += force
-force:
-
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable so we can use it in if_changed and friends.
-.PHONY: $(PHONY)
diff --git a/tools/lib/lockdep/common.c b/tools/lib/lockdep/common.c
deleted file mode 100644
index 5c3b58cce8a9b9..00000000000000
--- a/tools/lib/lockdep/common.c
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stddef.h>
-#include <stdbool.h>
-#include <linux/compiler.h>
-#include <linux/lockdep.h>
-#include <unistd.h>
-#include <sys/syscall.h>
-
-static __thread struct task_struct current_obj;
-
-/* lockdep wants these */
-bool debug_locks = true;
-bool debug_locks_silent;
-
-__attribute__((destructor)) static void liblockdep_exit(void)
-{
- debug_check_no_locks_held();
-}
-
-struct task_struct *__curr(void)
-{
- if (current_obj.pid == 0) {
- /* Makes lockdep output pretty */
- prctl(PR_GET_NAME, current_obj.comm);
- current_obj.pid = syscall(__NR_gettid);
- }
-
- return &current_obj;
-}
diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h
deleted file mode 100644
index a6d7ee5f18ba94..00000000000000
--- a/tools/lib/lockdep/include/liblockdep/common.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_COMMON_H
-#define _LIBLOCKDEP_COMMON_H
-
-#include <pthread.h>
-
-#define NR_LOCKDEP_CACHING_CLASSES 2
-#define MAX_LOCKDEP_SUBCLASSES 8UL
-
-#ifndef CALLER_ADDR0
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#endif
-
-#ifndef _RET_IP_
-#define _RET_IP_ CALLER_ADDR0
-#endif
-
-#ifndef _THIS_IP_
-#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
-#endif
-
-struct lockdep_subclass_key {
- char __one_byte;
-};
-
-struct lock_class_key {
- struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
-};
-
-struct lockdep_map {
- struct lock_class_key *key;
- struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
- const char *name;
-#ifdef CONFIG_LOCK_STAT
- int cpu;
- unsigned long ip;
-#endif
-};
-
-void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass);
-void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
- int trylock, int read, int check,
- struct lockdep_map *nest_lock, unsigned long ip);
-void lock_release(struct lockdep_map *lock, unsigned long ip);
-void lockdep_reset_lock(struct lockdep_map *lock);
-void lockdep_register_key(struct lock_class_key *key);
-void lockdep_unregister_key(struct lock_class_key *key);
-extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-
-#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
- { .name = (_name), .key = (void *)(_key), }
-
-#endif
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
deleted file mode 100644
index bd106b82759b76..00000000000000
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_MUTEX_H
-#define _LIBLOCKDEP_MUTEX_H
-
-#include <pthread.h>
-#include "common.h"
-
-struct liblockdep_pthread_mutex {
- pthread_mutex_t mutex;
- struct lock_class_key key;
- struct lockdep_map dep_map;
-};
-
-typedef struct liblockdep_pthread_mutex liblockdep_pthread_mutex_t;
-
-#define LIBLOCKDEP_PTHREAD_MUTEX_INITIALIZER(mtx) \
- (const struct liblockdep_pthread_mutex) { \
- .mutex = PTHREAD_MUTEX_INITIALIZER, \
- .dep_map = STATIC_LOCKDEP_MAP_INIT(#mtx, &((&(mtx))->dep_map)), \
-}
-
-static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
- const char *name,
- struct lock_class_key *key,
- const pthread_mutexattr_t *__mutexattr)
-{
- lockdep_init_map(&lock->dep_map, name, key, 0);
- return pthread_mutex_init(&lock->mutex, __mutexattr);
-}
-
-#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
-({ \
- lockdep_register_key(&(mutex)->key); \
- __mutex_init((mutex), #mutex, &(mutex)->key, (mutexattr)); \
-})
-
-static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_mutex_lock(&lock->mutex);
-}
-
-static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lock)
-{
- lock_release(&lock->dep_map, (unsigned long)_RET_IP_);
- return pthread_mutex_unlock(&lock->mutex);
-}
-
-static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
-}
-
-static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock)
-{
- lockdep_reset_lock(&lock->dep_map);
- lockdep_unregister_key(&lock->key);
- return pthread_mutex_destroy(&lock->mutex);
-}
-
-#ifdef __USE_LIBLOCKDEP
-
-#define pthread_mutex_t liblockdep_pthread_mutex_t
-#define pthread_mutex_init liblockdep_pthread_mutex_init
-#define pthread_mutex_lock liblockdep_pthread_mutex_lock
-#define pthread_mutex_unlock liblockdep_pthread_mutex_unlock
-#define pthread_mutex_trylock liblockdep_pthread_mutex_trylock
-#define pthread_mutex_destroy liblockdep_pthread_mutex_destroy
-
-#endif
-
-#endif
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
deleted file mode 100644
index 6d5d2932bf4d99..00000000000000
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_RWLOCK_H
-#define _LIBLOCKDEP_RWLOCK_H
-
-#include <pthread.h>
-#include "common.h"
-
-struct liblockdep_pthread_rwlock {
- pthread_rwlock_t rwlock;
- struct lockdep_map dep_map;
-};
-
-typedef struct liblockdep_pthread_rwlock liblockdep_pthread_rwlock_t;
-
-#define LIBLOCKDEP_PTHREAD_RWLOCK_INITIALIZER(rwl) \
- (struct liblockdep_pthread_rwlock) { \
- .rwlock = PTHREAD_RWLOCK_INITIALIZER, \
- .dep_map = STATIC_LOCKDEP_MAP_INIT(#rwl, &((&(rwl))->dep_map)), \
-}
-
-static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
- const char *name,
- struct lock_class_key *key,
- const pthread_rwlockattr_t *attr)
-{
- lockdep_init_map(&lock->dep_map, name, key, 0);
-
- return pthread_rwlock_init(&lock->rwlock, attr);
-}
-
-#define liblockdep_pthread_rwlock_init(lock, attr) \
-({ \
- static struct lock_class_key __key; \
- \
- __rwlock_init((lock), #lock, &__key, (attr)); \
-})
-
-static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_rwlock_rdlock(&lock->rwlock);
-
-}
-
-static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_release(&lock->dep_map, (unsigned long)_RET_IP_);
- return pthread_rwlock_unlock(&lock->rwlock);
-}
-
-static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_rwlock_wrlock(&lock->rwlock);
-}
-
-static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
-}
-
-static inline int liblockdep_pthread_rwlock_trywrlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_rwlock_trywrlock(&lock->rwlock) == 0 ? 1 : 0;
-}
-
-static inline int liblockdep_rwlock_destroy(liblockdep_pthread_rwlock_t *lock)
-{
- return pthread_rwlock_destroy(&lock->rwlock);
-}
-
-#ifdef __USE_LIBLOCKDEP
-
-#define pthread_rwlock_t liblockdep_pthread_rwlock_t
-#define pthread_rwlock_init liblockdep_pthread_rwlock_init
-#define pthread_rwlock_rdlock liblockdep_pthread_rwlock_rdlock
-#define pthread_rwlock_unlock liblockdep_pthread_rwlock_unlock
-#define pthread_rwlock_wrlock liblockdep_pthread_rwlock_wrlock
-#define pthread_rwlock_tryrdlock liblockdep_pthread_rwlock_tryrdlock
-#define pthread_rwlock_trywrlock liblockdep_pthread_rwlock_trywrlock
-#define pthread_rwlock_destroy liblockdep_rwlock_destroy
-
-#endif
-
-#endif
diff --git a/tools/lib/lockdep/lockdep b/tools/lib/lockdep/lockdep
deleted file mode 100755
index 49af9fe19f5b05..00000000000000
--- a/tools/lib/lockdep/lockdep
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-LD_PRELOAD="./liblockdep.so $LD_PRELOAD" "$@"
diff --git a/tools/lib/lockdep/lockdep.c b/tools/lib/lockdep/lockdep.c
deleted file mode 100644
index 348a9d0fb766a4..00000000000000
--- a/tools/lib/lockdep/lockdep.c
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/lockdep.h>
-#include <stdlib.h>
-
-/* Trivial API wrappers, we don't (yet) have RCU in user-space: */
-#define hlist_for_each_entry_rcu hlist_for_each_entry
-#define hlist_add_head_rcu hlist_add_head
-#define hlist_del_rcu hlist_del
-#define list_for_each_entry_rcu list_for_each_entry
-#define list_add_tail_rcu list_add_tail
-
-u32 prandom_u32(void)
-{
- /* Used only by lock_pin_lock() which is dead code */
- abort();
-}
-
-void print_irqtrace_events(struct task_struct *curr)
-{
- abort();
-}
-
-static struct new_utsname *init_utsname(void)
-{
- static struct new_utsname n = (struct new_utsname) {
- .release = "liblockdep",
- .version = LIBLOCKDEP_VERSION,
- };
-
- return &n;
-}
-
-#include "../../../kernel/locking/lockdep.c"
diff --git a/tools/lib/lockdep/lockdep_internals.h b/tools/lib/lockdep/lockdep_internals.h
deleted file mode 100644
index 29d0c954cc24a7..00000000000000
--- a/tools/lib/lockdep/lockdep_internals.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../kernel/locking/lockdep_internals.h"
diff --git a/tools/lib/lockdep/lockdep_states.h b/tools/lib/lockdep/lockdep_states.h
deleted file mode 100644
index 248d235efda92e..00000000000000
--- a/tools/lib/lockdep/lockdep_states.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../kernel/locking/lockdep_states.h"
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
deleted file mode 100644
index 8f1adbe887b2f3..00000000000000
--- a/tools/lib/lockdep/preload.c
+++ /dev/null
@@ -1,443 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define _GNU_SOURCE
-#include <pthread.h>
-#include <stdio.h>
-#include <dlfcn.h>
-#include <stdlib.h>
-#include <sysexits.h>
-#include <unistd.h>
-#include "include/liblockdep/mutex.h"
-#include "../../include/linux/rbtree.h"
-
-/**
- * struct lock_lookup - liblockdep's view of a single unique lock
- * @orig: pointer to the original pthread lock, used for lookups
- * @dep_map: lockdep's dep_map structure
- * @key: lockdep's key structure
- * @node: rb-tree node used to store the lock in a global tree
- * @name: a unique name for the lock
- */
-struct lock_lookup {
- void *orig; /* Original pthread lock, used for lookups */
- struct lockdep_map dep_map; /* Since all locks are dynamic, we need
- * a dep_map and a key for each lock */
- /*
- * Wait, there's no support for key classes? Yup :(
- * Most big projects wrap the pthread api with their own calls to
- * be compatible with different locking methods. This means that
- * "classes" will be brokes since the function that creates all
- * locks will point to a generic locking function instead of the
- * actual code that wants to do the locking.
- */
- struct lock_class_key key;
- struct rb_node node;
-#define LIBLOCKDEP_MAX_LOCK_NAME 22
- char name[LIBLOCKDEP_MAX_LOCK_NAME];
-};
-
-/* This is where we store our locks */
-static struct rb_root locks = RB_ROOT;
-static pthread_rwlock_t locks_rwlock = PTHREAD_RWLOCK_INITIALIZER;
-
-/* pthread mutex API */
-
-#ifdef __GLIBC__
-extern int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
-extern int __pthread_mutex_lock(pthread_mutex_t *mutex);
-extern int __pthread_mutex_trylock(pthread_mutex_t *mutex);
-extern int __pthread_mutex_unlock(pthread_mutex_t *mutex);
-extern int __pthread_mutex_destroy(pthread_mutex_t *mutex);
-#else
-#define __pthread_mutex_init NULL
-#define __pthread_mutex_lock NULL
-#define __pthread_mutex_trylock NULL
-#define __pthread_mutex_unlock NULL
-#define __pthread_mutex_destroy NULL
-#endif
-static int (*ll_pthread_mutex_init)(pthread_mutex_t *mutex,
- const pthread_mutexattr_t *attr) = __pthread_mutex_init;
-static int (*ll_pthread_mutex_lock)(pthread_mutex_t *mutex) = __pthread_mutex_lock;
-static int (*ll_pthread_mutex_trylock)(pthread_mutex_t *mutex) = __pthread_mutex_trylock;
-static int (*ll_pthread_mutex_unlock)(pthread_mutex_t *mutex) = __pthread_mutex_unlock;
-static int (*ll_pthread_mutex_destroy)(pthread_mutex_t *mutex) = __pthread_mutex_destroy;
-
-/* pthread rwlock API */
-
-#ifdef __GLIBC__
-extern int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
-extern int __pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
-#else
-#define __pthread_rwlock_init NULL
-#define __pthread_rwlock_destroy NULL
-#define __pthread_rwlock_wrlock NULL
-#define __pthread_rwlock_trywrlock NULL
-#define __pthread_rwlock_rdlock NULL
-#define __pthread_rwlock_tryrdlock NULL
-#define __pthread_rwlock_unlock NULL
-#endif
-
-static int (*ll_pthread_rwlock_init)(pthread_rwlock_t *rwlock,
- const pthread_rwlockattr_t *attr) = __pthread_rwlock_init;
-static int (*ll_pthread_rwlock_destroy)(pthread_rwlock_t *rwlock) = __pthread_rwlock_destroy;
-static int (*ll_pthread_rwlock_rdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_rdlock;
-static int (*ll_pthread_rwlock_tryrdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_tryrdlock;
-static int (*ll_pthread_rwlock_trywrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_trywrlock;
-static int (*ll_pthread_rwlock_wrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_wrlock;
-static int (*ll_pthread_rwlock_unlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_unlock;
-
-enum { none, prepare, done, } __init_state;
-static void init_preload(void);
-static void try_init_preload(void)
-{
- if (__init_state != done)
- init_preload();
-}
-
-static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent)
-{
- struct rb_node **node = &locks.rb_node;
- struct lock_lookup *l;
-
- *parent = NULL;
-
- while (*node) {
- l = rb_entry(*node, struct lock_lookup, node);
-
- *parent = *node;
- if (lock < l->orig)
- node = &l->node.rb_left;
- else if (lock > l->orig)
- node = &l->node.rb_right;
- else
- return node;
- }
-
- return node;
-}
-
-#ifndef LIBLOCKDEP_STATIC_ENTRIES
-#define LIBLOCKDEP_STATIC_ENTRIES 1024
-#endif
-
-static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES];
-static int __locks_nr;
-
-static inline bool is_static_lock(struct lock_lookup *lock)
-{
- return lock >= __locks && lock < __locks + ARRAY_SIZE(__locks);
-}
-
-static struct lock_lookup *alloc_lock(void)
-{
- if (__init_state != done) {
- /*
- * Some programs attempt to initialize and use locks in their
- * allocation path. This means that a call to malloc() would
- * result in locks being initialized and locked.
- *
- * Why is it an issue for us? dlsym() below will try allocating
- * to give us the original function. Since this allocation will
- * result in a locking operations, we have to let pthread deal
- * with it, but we can't! we don't have the pointer to the
- * original API since we're inside dlsym() trying to get it
- */
-
- int idx = __locks_nr++;
- if (idx >= ARRAY_SIZE(__locks)) {
- dprintf(STDERR_FILENO,
- "LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
- exit(EX_UNAVAILABLE);
- }
- return __locks + idx;
- }
-
- return malloc(sizeof(struct lock_lookup));
-}
-
-static inline void free_lock(struct lock_lookup *lock)
-{
- if (likely(!is_static_lock(lock)))
- free(lock);
-}
-
-/**
- * __get_lock - find or create a lock instance
- * @lock: pointer to a pthread lock function
- *
- * Try to find an existing lock in the rbtree using the provided pointer. If
- * one wasn't found - create it.
- */
-static struct lock_lookup *__get_lock(void *lock)
-{
- struct rb_node **node, *parent;
- struct lock_lookup *l;
-
- ll_pthread_rwlock_rdlock(&locks_rwlock);
- node = __get_lock_node(lock, &parent);
- ll_pthread_rwlock_unlock(&locks_rwlock);
- if (*node) {
- return rb_entry(*node, struct lock_lookup, node);
- }
-
- /* We didn't find the lock, let's create it */
- l = alloc_lock();
- if (l == NULL)
- return NULL;
-
- l->orig = lock;
- /*
- * Currently the name of the lock is the ptr value of the pthread lock,
- * while not optimal, it makes debugging a bit easier.
- *
- * TODO: Get the real name of the lock using libdwarf
- */
- sprintf(l->name, "%p", lock);
- lockdep_init_map(&l->dep_map, l->name, &l->key, 0);
-
- ll_pthread_rwlock_wrlock(&locks_rwlock);
- /* This might have changed since the last time we fetched it */
- node = __get_lock_node(lock, &parent);
- rb_link_node(&l->node, parent, node);
- rb_insert_color(&l->node, &locks);
- ll_pthread_rwlock_unlock(&locks_rwlock);
-
- return l;
-}
-
-static void __del_lock(struct lock_lookup *lock)
-{
- ll_pthread_rwlock_wrlock(&locks_rwlock);
- rb_erase(&lock->node, &locks);
- ll_pthread_rwlock_unlock(&locks_rwlock);
- free_lock(lock);
-}
-
-int pthread_mutex_init(pthread_mutex_t *mutex,
- const pthread_mutexattr_t *attr)
-{
- int r;
-
- /*
- * We keep trying to init our preload module because there might be
- * code in init sections that tries to touch locks before we are
- * initialized, in that case we'll need to manually call preload
- * to get us going.
- *
- * Funny enough, kernel's lockdep had the same issue, and used
- * (almost) the same solution. See look_up_lock_class() in
- * kernel/locking/lockdep.c for details.
- */
- try_init_preload();
-
- r = ll_pthread_mutex_init(mutex, attr);
- if (r == 0)
- /*
- * We do a dummy initialization here so that lockdep could
- * warn us if something fishy is going on - such as
- * initializing a held lock.
- */
- __get_lock(mutex);
-
- return r;
-}
-
-int pthread_mutex_lock(pthread_mutex_t *mutex)
-{
- int r;
-
- try_init_preload();
-
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
- (unsigned long)_RET_IP_);
- /*
- * Here's the thing with pthread mutexes: unlike the kernel variant,
- * they can fail.
- *
- * This means that the behaviour here is a bit different from what's
- * going on in the kernel: there we just tell lockdep that we took the
- * lock before actually taking it, but here we must deal with the case
- * that locking failed.
- *
- * To do that we'll "release" the lock if locking failed - this way
- * we'll get lockdep doing the correct checks when we try to take
- * the lock, and if that fails - we'll be back to the correct
- * state by releasing it.
- */
- r = ll_pthread_mutex_lock(mutex);
- if (r)
- lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_mutex_trylock(pthread_mutex_t *mutex)
-{
- int r;
-
- try_init_preload();
-
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_mutex_trylock(mutex);
- if (r)
- lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_mutex_unlock(pthread_mutex_t *mutex)
-{
- int r;
-
- try_init_preload();
-
- lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
- /*
- * Just like taking a lock, only in reverse!
- *
- * If we fail releasing the lock, tell lockdep we're holding it again.
- */
- r = ll_pthread_mutex_unlock(mutex);
- if (r)
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_mutex_destroy(pthread_mutex_t *mutex)
-{
- try_init_preload();
-
- /*
- * Let's see if we're releasing a lock that's held.
- *
- * TODO: Hook into free() and add that check there as well.
- */
- debug_check_no_locks_freed(mutex, sizeof(*mutex));
- __del_lock(__get_lock(mutex));
- return ll_pthread_mutex_destroy(mutex);
-}
-
-/* This is the rwlock part, very similar to what happened with mutex above */
-int pthread_rwlock_init(pthread_rwlock_t *rwlock,
- const pthread_rwlockattr_t *attr)
-{
- int r;
-
- try_init_preload();
-
- r = ll_pthread_rwlock_init(rwlock, attr);
- if (r == 0)
- __get_lock(rwlock);
-
- return r;
-}
-
-int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
-{
- try_init_preload();
-
- debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
- __del_lock(__get_lock(rwlock));
- return ll_pthread_rwlock_destroy(rwlock);
-}
-
-int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_rdlock(rwlock);
- if (r)
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_tryrdlock(rwlock);
- if (r)
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_trywrlock(rwlock);
- if (r)
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_wrlock(rwlock);
- if (r)
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_unlock(rwlock);
- if (r)
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-__attribute__((constructor)) static void init_preload(void)
-{
- if (__init_state == done)
- return;
-
-#ifndef __GLIBC__
- __init_state = prepare;
-
- ll_pthread_mutex_init = dlsym(RTLD_NEXT, "pthread_mutex_init");
- ll_pthread_mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
- ll_pthread_mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
- ll_pthread_mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
- ll_pthread_mutex_destroy = dlsym(RTLD_NEXT, "pthread_mutex_destroy");
-
- ll_pthread_rwlock_init = dlsym(RTLD_NEXT, "pthread_rwlock_init");
- ll_pthread_rwlock_destroy = dlsym(RTLD_NEXT, "pthread_rwlock_destroy");
- ll_pthread_rwlock_rdlock = dlsym(RTLD_NEXT, "pthread_rwlock_rdlock");
- ll_pthread_rwlock_tryrdlock = dlsym(RTLD_NEXT, "pthread_rwlock_tryrdlock");
- ll_pthread_rwlock_wrlock = dlsym(RTLD_NEXT, "pthread_rwlock_wrlock");
- ll_pthread_rwlock_trywrlock = dlsym(RTLD_NEXT, "pthread_rwlock_trywrlock");
- ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
-#endif
-
- __init_state = done;
-}
diff --git a/tools/lib/lockdep/rbtree.c b/tools/lib/lockdep/rbtree.c
deleted file mode 100644
index 297c304571f8b8..00000000000000
--- a/tools/lib/lockdep/rbtree.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../lib/rbtree.c"
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
deleted file mode 100755
index 11f425662b432c..00000000000000
--- a/tools/lib/lockdep/run_tests.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#! /bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-if ! make >/dev/null; then
- echo "Building liblockdep failed."
- echo "FAILED!"
- exit 1
-fi
-
-find tests -name '*.c' | sort | while read -r i; do
- testname=$(basename "$i" .c)
- echo -ne "$testname... "
- if gcc -o "tests/$testname" -pthread "$i" liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &&
- timeout 1 "tests/$testname" 2>&1 | /bin/bash "tests/${testname}.sh"; then
- echo "PASSED!"
- else
- echo "FAILED!"
- fi
- rm -f "tests/$testname"
-done
-
-find tests -name '*.c' | sort | while read -r i; do
- testname=$(basename "$i" .c)
- echo -ne "(PRELOAD) $testname... "
- if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
- timeout 1 ./lockdep "tests/$testname" 2>&1 |
- /bin/bash "tests/${testname}.sh"; then
- echo "PASSED!"
- else
- echo "FAILED!"
- fi
- rm -f "tests/$testname"
-done
-
-find tests -name '*.c' | sort | while read -r i; do
- testname=$(basename "$i" .c)
- echo -ne "(PRELOAD + Valgrind) $testname... "
- if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
- { timeout 10 valgrind --read-var-info=yes ./lockdep "./tests/$testname" >& "tests/${testname}.vg.out"; true; } &&
- /bin/bash "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
- ! grep -Eq '(^==[0-9]*== (Invalid |Uninitialised ))|Mismatched free|Source and destination overlap| UME ' "tests/${testname}.vg.out"; then
- echo "PASSED!"
- else
- echo "FAILED!"
- fi
- rm -f "tests/$testname"
-done
diff --git a/tools/lib/lockdep/tests/AA.c b/tools/lib/lockdep/tests/AA.c
deleted file mode 100644
index 63c7ce97bda3a9..00000000000000
--- a/tools/lib/lockdep/tests/AA.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-
-int main(void)
-{
- pthread_mutex_t a;
-
- pthread_mutex_init(&a, NULL);
-
- pthread_mutex_lock(&a);
- pthread_mutex_lock(&a);
-
- return 0;
-}
diff --git a/tools/lib/lockdep/tests/AA.sh b/tools/lib/lockdep/tests/AA.sh
deleted file mode 100644
index f39b3286507412..00000000000000
--- a/tools/lib/lockdep/tests/AA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible recursive locking detected'
diff --git a/tools/lib/lockdep/tests/ABA.c b/tools/lib/lockdep/tests/ABA.c
deleted file mode 100644
index efa39b23f05d0c..00000000000000
--- a/tools/lib/lockdep/tests/ABA.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-
-void main(void)
-{
- pthread_mutex_t a, b;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
-
- pthread_mutex_lock(&a);
- pthread_mutex_lock(&b);
- pthread_mutex_lock(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABA.sh b/tools/lib/lockdep/tests/ABA.sh
deleted file mode 100644
index f39b3286507412..00000000000000
--- a/tools/lib/lockdep/tests/ABA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible recursive locking detected'
diff --git a/tools/lib/lockdep/tests/ABBA.c b/tools/lib/lockdep/tests/ABBA.c
deleted file mode 100644
index 543789bc3e375c..00000000000000
--- a/tools/lib/lockdep/tests/ABBA.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(b, a);
-
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(b, a);
-
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABBA.sh b/tools/lib/lockdep/tests/ABBA.sh
deleted file mode 100644
index fc31c607a5a8bf..00000000000000
--- a/tools/lib/lockdep/tests/ABBA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABBA_2threads.c b/tools/lib/lockdep/tests/ABBA_2threads.c
deleted file mode 100644
index 39325ef8a2ac17..00000000000000
--- a/tools/lib/lockdep/tests/ABBA_2threads.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include <pthread.h>
-
-pthread_mutex_t a = PTHREAD_MUTEX_INITIALIZER;
-pthread_mutex_t b = PTHREAD_MUTEX_INITIALIZER;
-pthread_barrier_t bar;
-
-void *ba_lock(void *arg)
-{
- int ret, i;
-
- pthread_mutex_lock(&b);
-
- if (pthread_barrier_wait(&bar) == PTHREAD_BARRIER_SERIAL_THREAD)
- pthread_barrier_destroy(&bar);
-
- pthread_mutex_lock(&a);
-
- pthread_mutex_unlock(&a);
- pthread_mutex_unlock(&b);
-}
-
-int main(void)
-{
- pthread_t t;
-
- pthread_barrier_init(&bar, NULL, 2);
-
- if (pthread_create(&t, NULL, ba_lock, NULL)) {
- fprintf(stderr, "pthread_create() failed\n");
- return 1;
- }
- pthread_mutex_lock(&a);
-
- if (pthread_barrier_wait(&bar) == PTHREAD_BARRIER_SERIAL_THREAD)
- pthread_barrier_destroy(&bar);
-
- pthread_mutex_lock(&b);
-
- pthread_mutex_unlock(&b);
- pthread_mutex_unlock(&a);
-
- pthread_join(t, NULL);
-
- return 0;
-}
diff --git a/tools/lib/lockdep/tests/ABBA_2threads.sh b/tools/lib/lockdep/tests/ABBA_2threads.sh
deleted file mode 100644
index fc31c607a5a8bf..00000000000000
--- a/tools/lib/lockdep/tests/ABBA_2threads.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABBCCA.c b/tools/lib/lockdep/tests/ABBCCA.c
deleted file mode 100644
index 48446129d49662..00000000000000
--- a/tools/lib/lockdep/tests/ABBCCA.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(b, c);
- LOCK_UNLOCK_2(c, a);
-
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABBCCA.sh b/tools/lib/lockdep/tests/ABBCCA.sh
deleted file mode 100644
index fc31c607a5a8bf..00000000000000
--- a/tools/lib/lockdep/tests/ABBCCA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABBCCDDA.c b/tools/lib/lockdep/tests/ABBCCDDA.c
deleted file mode 100644
index 3570bf7b38042f..00000000000000
--- a/tools/lib/lockdep/tests/ABBCCDDA.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c, d;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
- pthread_mutex_init(&d, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(b, c);
- LOCK_UNLOCK_2(c, d);
- LOCK_UNLOCK_2(d, a);
-
- pthread_mutex_destroy(&d);
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABBCCDDA.sh b/tools/lib/lockdep/tests/ABBCCDDA.sh
deleted file mode 100644
index fc31c607a5a8bf..00000000000000
--- a/tools/lib/lockdep/tests/ABBCCDDA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABCABC.c b/tools/lib/lockdep/tests/ABCABC.c
deleted file mode 100644
index a1c4659894cd6a..00000000000000
--- a/tools/lib/lockdep/tests/ABCABC.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(c, a);
- LOCK_UNLOCK_2(b, c);
-
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABCABC.sh b/tools/lib/lockdep/tests/ABCABC.sh
deleted file mode 100644
index fc31c607a5a8bf..00000000000000
--- a/tools/lib/lockdep/tests/ABCABC.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABCDBCDA.c b/tools/lib/lockdep/tests/ABCDBCDA.c
deleted file mode 100644
index 335af1c90ab538..00000000000000
--- a/tools/lib/lockdep/tests/ABCDBCDA.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c, d;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
- pthread_mutex_init(&d, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(c, d);
- LOCK_UNLOCK_2(b, c);
- LOCK_UNLOCK_2(d, a);
-
- pthread_mutex_destroy(&d);
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABCDBCDA.sh b/tools/lib/lockdep/tests/ABCDBCDA.sh
deleted file mode 100644
index fc31c607a5a8bf..00000000000000
--- a/tools/lib/lockdep/tests/ABCDBCDA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABCDBDDA.c b/tools/lib/lockdep/tests/ABCDBDDA.c
deleted file mode 100644
index 3c59728630493f..00000000000000
--- a/tools/lib/lockdep/tests/ABCDBDDA.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c, d;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
- pthread_mutex_init(&d, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(c, d);
- LOCK_UNLOCK_2(b, d);
- LOCK_UNLOCK_2(d, a);
-
- pthread_mutex_destroy(&d);
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABCDBDDA.sh b/tools/lib/lockdep/tests/ABCDBDDA.sh
deleted file mode 100644
index fc31c607a5a8bf..00000000000000
--- a/tools/lib/lockdep/tests/ABCDBDDA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/WW.c b/tools/lib/lockdep/tests/WW.c
deleted file mode 100644
index eee88df7fc41b9..00000000000000
--- a/tools/lib/lockdep/tests/WW.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/rwlock.h>
-
-void main(void)
-{
- pthread_rwlock_t a, b;
-
- pthread_rwlock_init(&a, NULL);
- pthread_rwlock_init(&b, NULL);
-
- pthread_rwlock_wrlock(&a);
- pthread_rwlock_rdlock(&b);
- pthread_rwlock_wrlock(&a);
-}
diff --git a/tools/lib/lockdep/tests/WW.sh b/tools/lib/lockdep/tests/WW.sh
deleted file mode 100644
index f39b3286507412..00000000000000
--- a/tools/lib/lockdep/tests/WW.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible recursive locking detected'
diff --git a/tools/lib/lockdep/tests/common.h b/tools/lib/lockdep/tests/common.h
deleted file mode 100644
index 3026c29ccb5c36..00000000000000
--- a/tools/lib/lockdep/tests/common.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_TEST_COMMON_H
-#define _LIBLOCKDEP_TEST_COMMON_H
-
-#define LOCK_UNLOCK_2(a, b) \
- do { \
- pthread_mutex_lock(&(a)); \
- pthread_mutex_lock(&(b)); \
- pthread_mutex_unlock(&(b)); \
- pthread_mutex_unlock(&(a)); \
- } while(0)
-
-#endif
diff --git a/tools/lib/lockdep/tests/unlock_balance.c b/tools/lib/lockdep/tests/unlock_balance.c
deleted file mode 100644
index dba25064b50a83..00000000000000
--- a/tools/lib/lockdep/tests/unlock_balance.c
+++ /dev/null
@@ -1,15 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-
-void main(void)
-{
- pthread_mutex_t a;
-
- pthread_mutex_init(&a, NULL);
-
- pthread_mutex_lock(&a);
- pthread_mutex_unlock(&a);
- pthread_mutex_unlock(&a);
-
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/unlock_balance.sh b/tools/lib/lockdep/tests/unlock_balance.sh
deleted file mode 100644
index c6e3952303fe38..00000000000000
--- a/tools/lib/lockdep/tests/unlock_balance.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: bad unlock balance detected'
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index add39902166d6b..21735829b860ca 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -3310,6 +3310,9 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
if (!insn->func)
return false;
+ if (insn->func->static_call_tramp)
+ return true;
+
/*
* CONFIG_UBSAN_TRAP inserts a UD2 when it sees
* __builtin_unreachable(). The BUG() macro has an unreachable() after
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 2d7df8703cf207..3cf7bac67239cd 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -469,7 +469,7 @@ This option sets the time out limit. The default value is 500 ms.
--switch-events::
Record context switch events i.e. events of type PERF_RECORD_SWITCH or
-PERF_RECORD_SWITCH_CPU_WIDE. In some cases (e.g. Intel PT or CoreSight)
+PERF_RECORD_SWITCH_CPU_WIDE. In some cases (e.g. Intel PT, CoreSight or Arm SPE)
switch events will be enabled automatically, which can be suppressed by
by the option --no-switch-events.
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 0777748b6da8f2..80522bcfafe069 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -516,17 +516,17 @@ kvm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/kvm_ioctl.sh
$(kvm_ioctl_array): $(kvm_hdr_dir)/kvm.h $(kvm_ioctl_tbl)
$(Q)$(SHELL) '$(kvm_ioctl_tbl)' $(kvm_hdr_dir) > $@
-socket_ipproto_array := $(beauty_outdir)/socket_ipproto_array.c
-socket_ipproto_tbl := $(srctree)/tools/perf/trace/beauty/socket_ipproto.sh
+socket_arrays := $(beauty_outdir)/socket.c
+socket_tbl := $(srctree)/tools/perf/trace/beauty/socket.sh
-$(socket_ipproto_array): $(linux_uapi_dir)/in.h $(socket_ipproto_tbl)
- $(Q)$(SHELL) '$(socket_ipproto_tbl)' $(linux_uapi_dir) > $@
+$(socket_arrays): $(linux_uapi_dir)/in.h $(beauty_linux_dir)/socket.h $(socket_tbl)
+ $(Q)$(SHELL) '$(socket_tbl)' $(linux_uapi_dir) $(beauty_linux_dir) > $@
-socket_arrays := $(beauty_outdir)/socket_arrays.c
-socket_tbl := $(srctree)/tools/perf/trace/beauty/socket.sh
+sockaddr_arrays := $(beauty_outdir)/sockaddr.c
+sockaddr_tbl := $(srctree)/tools/perf/trace/beauty/sockaddr.sh
-$(socket_arrays): $(beauty_linux_dir)/socket.h $(socket_tbl)
- $(Q)$(SHELL) '$(socket_tbl)' $(beauty_linux_dir) > $@
+$(sockaddr_arrays): $(beauty_linux_dir)/socket.h $(sockaddr_tbl)
+ $(Q)$(SHELL) '$(sockaddr_tbl)' $(beauty_linux_dir) > $@
vhost_virtio_ioctl_array := $(beauty_ioctl_outdir)/vhost_virtio_ioctl_array.c
vhost_virtio_hdr_dir := $(srctree)/tools/include/uapi/linux
@@ -736,8 +736,8 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(sndrv_ctl_ioctl_array) \
$(kcmp_type_array) \
$(kvm_ioctl_array) \
- $(socket_ipproto_array) \
$(socket_arrays) \
+ $(sockaddr_arrays) \
$(vhost_virtio_ioctl_array) \
$(madvise_behavior_array) \
$(mmap_flags_array) \
@@ -1113,8 +1113,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(sndrv_pcm_ioctl_array) \
$(OUTPUT)$(kvm_ioctl_array) \
$(OUTPUT)$(kcmp_type_array) \
- $(OUTPUT)$(socket_ipproto_array) \
$(OUTPUT)$(socket_arrays) \
+ $(OUTPUT)$(sockaddr_arrays) \
$(OUTPUT)$(vhost_virtio_ioctl_array) \
$(OUTPUT)$(perf_ioctl_array) \
$(OUTPUT)$(prctl_option_array) \
diff --git a/tools/perf/arch/arm/include/arch-tests.h b/tools/perf/arch/arm/include/arch-tests.h
index c62538052404d7..452b3d90452181 100644
--- a/tools/perf/arch/arm/include/arch-tests.h
+++ b/tools/perf/arch/arm/include/arch-tests.h
@@ -2,6 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-extern struct test arch_tests[];
+extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c
index 6848101a855fb4..69561111cc6f76 100644
--- a/tools/perf/arch/arm/tests/arch-tests.c
+++ b/tools/perf/arch/arm/tests/arch-tests.c
@@ -3,18 +3,10 @@
#include "tests/tests.h"
#include "arch-tests.h"
-struct test arch_tests[] = {
+struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- {
- .desc = "DWARF unwind",
- .func = test__dwarf_unwind,
- },
+ &suite__dwarf_unwind,
#endif
- {
- .desc = "Vectors page",
- .func = test__vectors_page,
- },
- {
- .func = NULL,
- },
+ &suite__vectors_page,
+ NULL,
};
diff --git a/tools/perf/arch/arm/tests/vectors-page.c b/tools/perf/arch/arm/tests/vectors-page.c
index 7ffdd79971c892..55a8358374666e 100644
--- a/tools/perf/arch/arm/tests/vectors-page.c
+++ b/tools/perf/arch/arm/tests/vectors-page.c
@@ -9,8 +9,7 @@
#define VECTORS__MAP_NAME "[vectors]"
-int test__vectors_page(struct test *test __maybe_unused,
- int subtest __maybe_unused)
+static int test__vectors_page(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
void *start, *end;
@@ -22,3 +21,5 @@ int test__vectors_page(struct test *test __maybe_unused,
return TEST_OK;
}
+
+DEFINE_SUITE("Vectors page", vectors_page);
diff --git a/tools/perf/arch/arm64/include/arch-tests.h b/tools/perf/arch/arm64/include/arch-tests.h
index c62538052404d7..452b3d90452181 100644
--- a/tools/perf/arch/arm64/include/arch-tests.h
+++ b/tools/perf/arch/arm64/include/arch-tests.h
@@ -2,6 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-extern struct test arch_tests[];
+extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/arm64/tests/arch-tests.c b/tools/perf/arch/arm64/tests/arch-tests.c
index 5b1543c980223d..ad16b4f8f63e9d 100644
--- a/tools/perf/arch/arm64/tests/arch-tests.c
+++ b/tools/perf/arch/arm64/tests/arch-tests.c
@@ -3,14 +3,9 @@
#include "tests/tests.h"
#include "arch-tests.h"
-struct test arch_tests[] = {
+struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- {
- .desc = "DWARF unwind",
- .func = test__dwarf_unwind,
- },
+ &suite__dwarf_unwind,
#endif
- {
- .func = NULL,
- },
+ NULL,
};
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index a4420d4df50338..2100d46ccf5e64 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -23,6 +23,7 @@
#include "../../../util/auxtrace.h"
#include "../../../util/record.h"
#include "../../../util/arm-spe.h"
+#include <tools/libc_compat.h> // reallocarray
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
@@ -31,6 +32,8 @@ struct arm_spe_recording {
struct auxtrace_record itr;
struct perf_pmu *arm_spe_pmu;
struct evlist *evlist;
+ int wrapped_cnt;
+ bool *wrapped;
};
static void arm_spe_set_timestamp(struct auxtrace_record *itr,
@@ -84,6 +87,55 @@ static int arm_spe_info_fill(struct auxtrace_record *itr,
return 0;
}
+static void
+arm_spe_snapshot_resolve_auxtrace_defaults(struct record_opts *opts,
+ bool privileged)
+{
+ /*
+ * The default snapshot size is the auxtrace mmap size. If neither auxtrace mmap size nor
+ * snapshot size is specified, then the default is 4MiB for privileged users, 128KiB for
+ * unprivileged users.
+ *
+ * The default auxtrace mmap size is 4MiB/page_size for privileged users, 128KiB for
+ * unprivileged users. If an unprivileged user does not specify mmap pages, the mmap pages
+ * will be reduced from the default 512KiB/page_size to 256KiB/page_size, otherwise the
+ * user is likely to get an error as they exceed their mlock limmit.
+ */
+
+ /*
+ * No size were given to '-S' or '-m,', so go with the default
+ */
+ if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(4) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ } else if (!opts->auxtrace_mmap_pages && !privileged && opts->mmap_pages == UINT_MAX) {
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+
+ /*
+ * '-m,xyz' was specified but no snapshot size, so make the snapshot size as big as the
+ * auxtrace mmap area.
+ */
+ if (!opts->auxtrace_snapshot_size)
+ opts->auxtrace_snapshot_size = opts->auxtrace_mmap_pages * (size_t)page_size;
+
+ /*
+ * '-Sxyz' was specified but no auxtrace mmap area, so make the auxtrace mmap area big
+ * enough to fit the requested snapshot size.
+ */
+ if (!opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_snapshot_size;
+
+ sz = round_up(sz, page_size) / page_size;
+ opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
+ }
+}
+
static int arm_spe_recording_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
@@ -115,6 +167,36 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
if (!opts->full_auxtrace)
return 0;
+ /*
+ * we are in snapshot mode.
+ */
+ if (opts->auxtrace_snapshot_mode) {
+ /*
+ * Command arguments '-Sxyz' and/or '-m,xyz' are missing, so fill those in with
+ * default values.
+ */
+ if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages)
+ arm_spe_snapshot_resolve_auxtrace_defaults(opts, privileged);
+
+ /*
+ * Snapshot size can't be bigger than the auxtrace area.
+ */
+ if (opts->auxtrace_snapshot_size > opts->auxtrace_mmap_pages * (size_t)page_size) {
+ pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
+ opts->auxtrace_snapshot_size,
+ opts->auxtrace_mmap_pages * (size_t)page_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Something went wrong somewhere - this shouldn't happen.
+ */
+ if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
+ pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
+ return -EINVAL;
+ }
+ }
+
/* We are in full trace mode but '-m,xyz' wasn't specified */
if (!opts->auxtrace_mmap_pages) {
if (privileged) {
@@ -138,6 +220,9 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
}
}
+ if (opts->auxtrace_snapshot_mode)
+ pr_debug2("%sx snapshot size: %zu\n", ARM_SPE_PMU_NAME,
+ opts->auxtrace_snapshot_size);
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace event
@@ -166,8 +251,199 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
tracking_evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */
- if (!perf_cpu_map__empty(cpus))
+ if (!perf_cpu_map__empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME);
+ evsel__set_sample_bit(tracking_evsel, CPU);
+
+ /* also track task context switch */
+ if (!record_opts__no_switch_events(opts))
+ tracking_evsel->core.attr.context_switch = 1;
+ }
+
+ return 0;
+}
+
+static int arm_spe_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
+ struct record_opts *opts,
+ const char *str)
+{
+ unsigned long long snapshot_size = 0;
+ char *endptr;
+
+ if (str) {
+ snapshot_size = strtoull(str, &endptr, 0);
+ if (*endptr || snapshot_size > SIZE_MAX)
+ return -1;
+ }
+
+ opts->auxtrace_snapshot_mode = true;
+ opts->auxtrace_snapshot_size = snapshot_size;
+
+ return 0;
+}
+
+static int arm_spe_snapshot_start(struct auxtrace_record *itr)
+{
+ struct arm_spe_recording *ptr =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct evsel *evsel;
+
+ evlist__for_each_entry(ptr->evlist, evsel) {
+ if (evsel->core.attr.type == ptr->arm_spe_pmu->type)
+ return evsel__disable(evsel);
+ }
+ return -EINVAL;
+}
+
+static int arm_spe_snapshot_finish(struct auxtrace_record *itr)
+{
+ struct arm_spe_recording *ptr =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct evsel *evsel;
+
+ evlist__for_each_entry(ptr->evlist, evsel) {
+ if (evsel->core.attr.type == ptr->arm_spe_pmu->type)
+ return evsel__enable(evsel);
+ }
+ return -EINVAL;
+}
+
+static int arm_spe_alloc_wrapped_array(struct arm_spe_recording *ptr, int idx)
+{
+ bool *wrapped;
+ int cnt = ptr->wrapped_cnt, new_cnt, i;
+
+ /*
+ * No need to allocate, so return early.
+ */
+ if (idx < cnt)
+ return 0;
+
+ /*
+ * Make ptr->wrapped as big as idx.
+ */
+ new_cnt = idx + 1;
+
+ /*
+ * Free'ed in arm_spe_recording_free().
+ */
+ wrapped = reallocarray(ptr->wrapped, new_cnt, sizeof(bool));
+ if (!wrapped)
+ return -ENOMEM;
+
+ /*
+ * init new allocated values.
+ */
+ for (i = cnt; i < new_cnt; i++)
+ wrapped[i] = false;
+
+ ptr->wrapped_cnt = new_cnt;
+ ptr->wrapped = wrapped;
+
+ return 0;
+}
+
+static bool arm_spe_buffer_has_wrapped(unsigned char *buffer,
+ size_t buffer_size, u64 head)
+{
+ u64 i, watermark;
+ u64 *buf = (u64 *)buffer;
+ size_t buf_size = buffer_size;
+
+ /*
+ * Defensively handle the case where head might be continually increasing - if its value is
+ * equal or greater than the size of the ring buffer, then we can safely determine it has
+ * wrapped around. Otherwise, continue to detect if head might have wrapped.
+ */
+ if (head >= buffer_size)
+ return true;
+
+ /*
+ * We want to look the very last 512 byte (chosen arbitrarily) in the ring buffer.
+ */
+ watermark = buf_size - 512;
+
+ /*
+ * The value of head is somewhere within the size of the ring buffer. This can be that there
+ * hasn't been enough data to fill the ring buffer yet or the trace time was so long that
+ * head has numerically wrapped around. To find we need to check if we have data at the
+ * very end of the ring buffer. We can reliably do this because mmap'ed pages are zeroed
+ * out and there is a fresh mapping with every new session.
+ */
+
+ /*
+ * head is less than 512 byte from the end of the ring buffer.
+ */
+ if (head > watermark)
+ watermark = head;
+
+ /*
+ * Speed things up by using 64 bit transactions (see "u64 *buf" above)
+ */
+ watermark /= sizeof(u64);
+ buf_size /= sizeof(u64);
+
+ /*
+ * If we find trace data at the end of the ring buffer, head has been there and has
+ * numerically wrapped around at least once.
+ */
+ for (i = watermark; i < buf_size; i++)
+ if (buf[i])
+ return true;
+
+ return false;
+}
+
+static int arm_spe_find_snapshot(struct auxtrace_record *itr, int idx,
+ struct auxtrace_mmap *mm, unsigned char *data,
+ u64 *head, u64 *old)
+{
+ int err;
+ bool wrapped;
+ struct arm_spe_recording *ptr =
+ container_of(itr, struct arm_spe_recording, itr);
+
+ /*
+ * Allocate memory to keep track of wrapping if this is the first
+ * time we deal with this *mm.
+ */
+ if (idx >= ptr->wrapped_cnt) {
+ err = arm_spe_alloc_wrapped_array(ptr, idx);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Check to see if *head has wrapped around. If it hasn't only the
+ * amount of data between *head and *old is snapshot'ed to avoid
+ * bloating the perf.data file with zeros. But as soon as *head has
+ * wrapped around the entire size of the AUX ring buffer it taken.
+ */
+ wrapped = ptr->wrapped[idx];
+ if (!wrapped && arm_spe_buffer_has_wrapped(data, mm->len, *head)) {
+ wrapped = true;
+ ptr->wrapped[idx] = true;
+ }
+
+ pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
+ __func__, idx, (size_t)*old, (size_t)*head, mm->len);
+
+ /*
+ * No wrap has occurred, we can just use *head and *old.
+ */
+ if (!wrapped)
+ return 0;
+
+ /*
+ * *head has wrapped around - adjust *head and *old to pickup the
+ * entire content of the AUX buffer.
+ */
+ if (*head >= mm->len) {
+ *old = *head - mm->len;
+ } else {
+ *head += mm->len;
+ *old = *head - mm->len;
+ }
return 0;
}
@@ -186,6 +462,7 @@ static void arm_spe_recording_free(struct auxtrace_record *itr)
struct arm_spe_recording *sper =
container_of(itr, struct arm_spe_recording, itr);
+ free(sper->wrapped);
free(sper);
}
@@ -207,6 +484,10 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
sper->arm_spe_pmu = arm_spe_pmu;
sper->itr.pmu = arm_spe_pmu;
+ sper->itr.snapshot_start = arm_spe_snapshot_start;
+ sper->itr.snapshot_finish = arm_spe_snapshot_finish;
+ sper->itr.find_snapshot = arm_spe_find_snapshot;
+ sper->itr.parse_snapshot_options = arm_spe_parse_snapshot_options;
sper->itr.recording_options = arm_spe_recording_options;
sper->itr.info_priv_size = arm_spe_info_priv_size;
sper->itr.info_fill = arm_spe_info_fill;
diff --git a/tools/perf/arch/powerpc/include/arch-tests.h b/tools/perf/arch/powerpc/include/arch-tests.h
index c62538052404d7..452b3d90452181 100644
--- a/tools/perf/arch/powerpc/include/arch-tests.h
+++ b/tools/perf/arch/powerpc/include/arch-tests.h
@@ -2,6 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-extern struct test arch_tests[];
+extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/powerpc/tests/arch-tests.c b/tools/perf/arch/powerpc/tests/arch-tests.c
index 8c3fbd4af8171c..eb98c57b5aeba3 100644
--- a/tools/perf/arch/powerpc/tests/arch-tests.c
+++ b/tools/perf/arch/powerpc/tests/arch-tests.c
@@ -3,14 +3,10 @@
#include "tests/tests.h"
#include "arch-tests.h"
-struct test arch_tests[] = {
+
+struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- {
- .desc = "Test dwarf unwind",
- .func = test__dwarf_unwind,
- },
+ &suite__dwarf_unwind,
#endif
- {
- .func = NULL,
- },
+ NULL,
};
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 18b5500ea8bfd5..fe8f8dd157b4d4 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -370,6 +370,7 @@
446 common landlock_restrict_self sys_landlock_restrict_self
447 common memfd_secret sys_memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index 9599e7a3f1af27..6a1a1b3c082759 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -2,15 +2,15 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-struct test;
+struct test_suite;
/* Tests */
-int test__rdpmc(struct test *test, int subtest);
-int test__insn_x86(struct test *test, int subtest);
-int test__intel_pt_pkt_decoder(struct test *test, int subtest);
-int test__bp_modify(struct test *test, int subtest);
-int test__x86_sample_parsing(struct test *test, int subtest);
+int test__rdpmc(struct test_suite *test, int subtest);
+int test__insn_x86(struct test_suite *test, int subtest);
+int test__intel_pt_pkt_decoder(struct test_suite *test, int subtest);
+int test__bp_modify(struct test_suite *test, int subtest);
+int test__x86_sample_parsing(struct test_suite *test, int subtest);
-extern struct test arch_tests[];
+extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index 71aa67367ad6bd..64fb73d14d2f88 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -3,39 +3,28 @@
#include "tests/tests.h"
#include "arch-tests.h"
-struct test arch_tests[] = {
- {
- .desc = "x86 rdpmc",
- .func = test__rdpmc,
- },
+DEFINE_SUITE("x86 rdpmc", rdpmc);
+#ifdef HAVE_AUXTRACE_SUPPORT
+DEFINE_SUITE("x86 instruction decoder - new instructions", insn_x86);
+DEFINE_SUITE("Intel PT packet decoder", intel_pt_pkt_decoder);
+#endif
+#if defined(__x86_64__)
+DEFINE_SUITE("x86 bp modify", bp_modify);
+#endif
+DEFINE_SUITE("x86 Sample parsing", x86_sample_parsing);
+
+struct test_suite *arch_tests[] = {
+ &suite__rdpmc,
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- {
- .desc = "DWARF unwind",
- .func = test__dwarf_unwind,
- },
+ &suite__dwarf_unwind,
#endif
#ifdef HAVE_AUXTRACE_SUPPORT
- {
- .desc = "x86 instruction decoder - new instructions",
- .func = test__insn_x86,
- },
- {
- .desc = "Intel PT packet decoder",
- .func = test__intel_pt_pkt_decoder,
- },
+ &suite__insn_x86,
+ &suite__intel_pt_pkt_decoder,
#endif
#if defined(__x86_64__)
- {
- .desc = "x86 bp modify",
- .func = test__bp_modify,
- },
+ &suite__bp_modify,
#endif
- {
- .desc = "x86 Sample parsing",
- .func = test__x86_sample_parsing,
- },
- {
- .func = NULL,
- },
-
+ &suite__x86_sample_parsing,
+ NULL,
};
diff --git a/tools/perf/arch/x86/tests/bp-modify.c b/tools/perf/arch/x86/tests/bp-modify.c
index dffcf9b52153e5..0924ccd9e36d5b 100644
--- a/tools/perf/arch/x86/tests/bp-modify.c
+++ b/tools/perf/arch/x86/tests/bp-modify.c
@@ -204,7 +204,7 @@ out:
return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
}
-int test__bp_modify(struct test *test __maybe_unused,
+int test__bp_modify(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
TEST_ASSERT_VAL("modify test 1 failed\n", !bp_modify1());
diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c
index 0262b0d8ccf517..94b490c434d07c 100644
--- a/tools/perf/arch/x86/tests/insn-x86.c
+++ b/tools/perf/arch/x86/tests/insn-x86.c
@@ -173,7 +173,7 @@ static int test_data_set(struct test_data *dat_set, int x86_64)
* verbose (-v) option to see all the instructions and whether or not they
* decoded successfully.
*/
-int test__insn_x86(struct test *test __maybe_unused, int subtest __maybe_unused)
+int test__insn_x86(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret = 0;
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
index 27dd8cf9e0609a..cb5b2c6c3b3b75 100644
--- a/tools/perf/arch/x86/tests/intel-cqm.c
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -37,7 +37,7 @@ static pid_t spawn(void)
* the last read counter value to avoid triggering a WARN_ON_ONCE() in
* smp_call_function_many() caused by sending IPIs from NMI context.
*/
-int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subtest __maybe_unused)
+int test__intel_cqm_count_nmi_context(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct evlist *evlist = NULL;
struct evsel *evsel = NULL;
diff --git a/tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c b/tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c
index c933e3dcd0a82e..2fc882ab24c1c9 100644
--- a/tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c
+++ b/tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c
@@ -289,7 +289,7 @@ static int test_one(struct test_data *d)
* This test feeds byte sequences to the Intel PT packet decoder and checks the
* results. Changes to the packet context are also checked.
*/
-int test__intel_pt_pkt_decoder(struct test *test __maybe_unused, int subtest __maybe_unused)
+int test__intel_pt_pkt_decoder(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct test_data *d = data;
int ret;
diff --git a/tools/perf/arch/x86/tests/rdpmc.c b/tools/perf/arch/x86/tests/rdpmc.c
index 1ea916656a2dd4..498413ad9c976a 100644
--- a/tools/perf/arch/x86/tests/rdpmc.c
+++ b/tools/perf/arch/x86/tests/rdpmc.c
@@ -157,7 +157,7 @@ out_close:
return 0;
}
-int test__rdpmc(struct test *test __maybe_unused, int subtest __maybe_unused)
+int test__rdpmc(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int status = 0;
int wret = 0;
diff --git a/tools/perf/arch/x86/tests/sample-parsing.c b/tools/perf/arch/x86/tests/sample-parsing.c
index c92db87e4479e5..bfbd3662b69e8d 100644
--- a/tools/perf/arch/x86/tests/sample-parsing.c
+++ b/tools/perf/arch/x86/tests/sample-parsing.c
@@ -115,7 +115,7 @@ out_free:
* For now, the PERF_SAMPLE_WEIGHT_STRUCT is the only X86 specific sample type.
* The test only checks the PERF_SAMPLE_WEIGHT_STRUCT type.
*/
-int test__x86_sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
+int test__x86_sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
return do_test(PERF_SAMPLE_WEIGHT_STRUCT);
}
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 5d1fe9c35807a4..137890f78e17ae 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -233,6 +233,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
print_summary();
free(worker);
+ perf_cpu_map__put(cpu);
return ret;
err:
usage_with_options(bench_futex_lock_pi_usage, options);
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index 97fe31fd3a236b..f7a5ffebb9408b 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -294,6 +294,7 @@ int bench_futex_requeue(int argc, const char **argv)
print_summary();
free(worker);
+ perf_cpu_map__put(cpu);
return ret;
err:
usage_with_options(bench_futex_requeue_usage, options);
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index e970e6b9ad535a..0983f40b4b408d 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -329,6 +329,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
print_summary();
free(blocked_worker);
+ perf_cpu_map__put(cpu);
return ret;
}
#endif /* HAVE_PTHREAD_BARRIER */
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 77f058a4779034..2226a475e782b6 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -222,5 +222,6 @@ int bench_futex_wake(int argc, const char **argv)
print_summary();
free(worker);
+ perf_cpu_map__put(cpu);
return ret;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 624ea12ce5ca92..0b52e08e558e3c 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -979,6 +979,8 @@ static struct syscall_fmt syscall_fmts[] = {
.arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
{ .name = "getrlimit",
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
+ { .name = "getsockopt",
+ .arg = { [1] = STRARRAY(level, socket_level), }, },
{ .name = "gettid", .errpid = true, },
{ .name = "ioctl",
.arg = {
@@ -1121,6 +1123,8 @@ static struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = STRARRAY(which, itimers), }, },
{ .name = "setrlimit",
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
+ { .name = "setsockopt",
+ .arg = { [1] = STRARRAY(level, socket_level), }, },
{ .name = "socket",
.arg = { [0] = STRARRAY(family, socket_families),
[1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index a42fab308ff6a2..aa8cfeabb7432d 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -106,6 +106,9 @@ enum perf_hw_id {
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
PERF_COUNT_HW_BRANCH_MISSES = 5,
PERF_COUNT_HW_BUS_CYCLES = 6,
+ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
+ PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
};
These are standardized types of events that work relatively uniformly
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
new file mode 100644
index 00000000000000..b57526fa44f2df
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
@@ -0,0 +1,676 @@
+[
+ {
+ "BriefDescription": "Percentage of cycles that are run cycles",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC * 100",
+ "MetricGroup": "General",
+ "MetricName": "RUN_CYCLES_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "CYCLES_PER_INSTRUCTION"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled for any reason",
+ "MetricExpr": "PM_DISP_STALL_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because there was a flush",
+ "MetricExpr": "PM_DISP_STALL_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_FLUSH_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because the MMU was handling a translation miss",
+ "MetricExpr": "PM_DISP_STALL_TRANSLATION / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_TRANSLATION_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled waiting to resolve an instruction ERAT miss",
+ "MetricExpr": "PM_DISP_STALL_IERAT_ONLY_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IERAT_ONLY_MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled waiting to resolve an instruction TLB miss",
+ "MetricExpr": "PM_DISP_STALL_ITLB_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_ITLB_MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to an icache miss",
+ "MetricExpr": "PM_DISP_STALL_IC_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IC_MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from the local L2",
+ "MetricExpr": "PM_DISP_STALL_IC_L2 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IC_L2_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from the local L3",
+ "MetricExpr": "PM_DISP_STALL_IC_L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IC_L3_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from any source beyond the local L3",
+ "MetricExpr": "PM_DISP_STALL_IC_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IC_L3MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to an icache miss after a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED_ICMISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_ICMISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from the local L2 after suffering a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L2 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_IC_L2_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from the local L3 after suffering a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_IC_L3_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from any source beyond the local L3 after suffering a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_IC_L3MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch for any reason",
+ "MetricExpr": "PM_DISP_STALL_HELD_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch",
+ "MetricExpr": "PM_DISP_STALL_HELD_SYNC_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISP_HELD_STALL_SYNC_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch while waiting on the scoreboard",
+ "MetricExpr": "PM_DISP_STALL_HELD_SCOREBOARD_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISP_HELD_STALL_SCOREBOARD_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch due to issue queue full",
+ "MetricExpr": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISP_HELD_STALL_ISSQ_FULL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the mapper/SRB was full",
+ "MetricExpr": "PM_DISP_STALL_HELD_RENAME_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_RENAME_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the STF mapper/SRB was full",
+ "MetricExpr": "PM_DISP_STALL_HELD_STF_MAPPER_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_STF_MAPPER_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the XVFC mapper/SRB was full",
+ "MetricExpr": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_XVFC_MAPPER_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch for any other reason",
+ "MetricExpr": "PM_DISP_STALL_HELD_OTHER_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_OTHER_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction has been dispatched but not issued for any reason",
+ "MetricExpr": "PM_ISSUE_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "ISSUE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting to be finished in one of the execution units",
+ "MetricExpr": "PM_EXEC_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "EXECUTION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction spent executing an NTC instruction that gets flushed some time after dispatch",
+ "MetricExpr": "PM_EXEC_STALL_NTC_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "NTC_FLUSH_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTF instruction finishes at dispatch",
+ "MetricExpr": "PM_EXEC_STALL_FIN_AT_DISP / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "FIN_AT_DISP_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing in the branch unit",
+ "MetricExpr": "PM_EXEC_STALL_BRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "BRU_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a simple fixed point instruction that is executing in the LSU",
+ "MetricExpr": "PM_EXEC_STALL_SIMPLE_FX / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "SIMPLE_FX_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing in the VSU",
+ "MetricExpr": "PM_EXEC_STALL_VSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "VSU_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting to be finished in one of the execution units",
+ "MetricExpr": "PM_EXEC_STALL_TRANSLATION / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "TRANSLATION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a load or store that suffered a translation miss",
+ "MetricExpr": "PM_EXEC_STALL_DERAT_ONLY_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DERAT_ONLY_MISS_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is recovering from a TLB miss",
+ "MetricExpr": "PM_EXEC_STALL_DERAT_DTLB_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DERAT_DTLB_MISS_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing in the LSU",
+ "MetricExpr": "PM_EXEC_STALL_LSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "LSU_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a load that is executing in the LSU",
+ "MetricExpr": "PM_EXEC_STALL_LOAD / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "LOAD_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from either the local L2 or local L3",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L2L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L2L3_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from either the local L2 or local L3, with an RC dispatch conflict",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L2L3_CONFLICT_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from either the local L2 or local L3, without an RC dispatch conflict",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L2L3_NOCONFLICT_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from a source beyond the local L2 and local L3",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L3MISS_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from a neighbor chiplet's L2 or L3 in the same chip",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L21_L31 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L21_L31_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from L4, local memory or OpenCAPI chip",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_LMEM / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_LMEM_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from a remote chip (cache, L4, memory or OpenCAPI) in the same group",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_OFF_CHIP / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_OFF_CHIP_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from a distant chip (cache, L4, memory or OpenCAPI chip)",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_OFF_NODE / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_OFF_NODE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing a TLBIEL instruction",
+ "MetricExpr": "PM_EXEC_STALL_TLBIEL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "TLBIEL_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is finishing a load after its data has been reloaded from a data source beyond the local L1, OR when the LSU is processing an L1-hit, OR when the NTF instruction merged with another load in the LMQ",
+ "MetricExpr": "PM_EXEC_STALL_LOAD_FINISH / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "LOAD_FINISH_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a store that is executing in the LSU",
+ "MetricExpr": "PM_EXEC_STALL_STORE / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "STORE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is in the store unit outside of handling store misses or other special store operations",
+ "MetricExpr": "PM_EXEC_STALL_STORE_PIPE / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "STORE_PIPE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a store whose cache line was not resident in the L1 and had to wait for allocation of the missing line into the L1",
+ "MetricExpr": "PM_EXEC_STALL_STORE_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "STORE_MISS_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a TLBIE instruction waiting for a response from the L2",
+ "MetricExpr": "PM_EXEC_STALL_TLBIE / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "TLBIE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing a PTESYNC instruction",
+ "MetricExpr": "PM_EXEC_STALL_PTESYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "PTESYNC_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction cannot complete because the thread was blocked",
+ "MetricExpr": "PM_CMPL_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction cannot complete because it was interrupted by ANY exception",
+ "MetricExpr": "PM_CMPL_STALL_EXCEPTION / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "EXCEPTION_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is stuck at finish waiting for the non-speculative finish of either a STCX instruction waiting for its result or a load waiting for non-critical sectors of data and ECC",
+ "MetricExpr": "PM_CMPL_STALL_MEM_ECC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "MEM_ECC_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a STCX instruction waiting for resolution from the nest",
+ "MetricExpr": "PM_CMPL_STALL_STCX / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "STCX_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a LWSYNC instruction waiting to complete",
+ "MetricExpr": "PM_CMPL_STALL_LWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "LWSYNC_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a HWSYNC instruction stuck at finish waiting for a response from the L2",
+ "MetricExpr": "PM_CMPL_STALL_HWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "HWSYNC_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction required special handling before completion",
+ "MetricExpr": "PM_CMPL_STALL_SPECIAL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "SPECIAL_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because fetch was being held, so there was nothing in the pipeline for this thread",
+ "MetricExpr": "PM_DISP_STALL_FETCH / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_FETCH_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because of power management",
+ "MetricExpr": "PM_DISP_STALL_HELD_HALT_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_HALT_CPI"
+ },
+ {
+ "BriefDescription": "Percentage of flushes per completed instruction",
+ "MetricExpr": "PM_FLUSH / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Others",
+ "MetricName": "FLUSH_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of flushes due to a branch mispredict per completed instruction",
+ "MetricExpr": "PM_FLUSH_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Others",
+ "MetricName": "BR_MPRED_FLUSH_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of branch mispredictions per completed instruction",
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_RUN_INST_CMPL",
+ "MetricGroup": "Others",
+ "MetricName": "BRANCH_MISPREDICTION_RATE"
+ },
+ {
+ "BriefDescription": "Percentage of finished loads that missed in the L1",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "Others",
+ "MetricName": "L1_LD_MISS_RATIO",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were loads that missed the L1",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Others",
+ "MetricName": "L1_LD_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions when the DPTEG required for the load/store instruction in execution was missing from the TLB",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Others",
+ "MetricName": "DTLB_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of completed instructions dispatched per instruction completed",
+ "MetricExpr": "PM_INST_DISP / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "DISPATCH_PER_INST_CMPL"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were a demand load that did not hit in the L1 or L2",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "General",
+ "MetricName": "L2_LD_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were demand fetches that missed the L1 icache",
+ "MetricExpr": "PM_L1_ICACHE_MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "L1_INST_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were demand fetches that reloaded from beyond the L3 icache",
+ "MetricExpr": "PM_INST_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "General",
+ "MetricName": "L3_INST_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of completed instructions per cycle",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "General",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Average number of cycles per completed instruction group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "CYCLES_PER_COMPLETED_INSTRUCTIONS_SET"
+ },
+ {
+ "BriefDescription": "Percentage of cycles when at least 1 instruction dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_RUN_CYC * 100",
+ "MetricGroup": "General",
+ "MetricName": "CYCLES_ATLEAST_ONE_INST_DISPATCHED",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of finished loads per completed instruction",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "LOADS_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of finished stores per completed instruction",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "STORES_PER_INST"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded from beyond the L2 per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L2_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded from beyond the L3 per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L3_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses with 4k page size per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_4K_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses with 64k page size per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_64K_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of run cycles per completed instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "RUN_CPI"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of completed instructions per run cycle",
+ "MetricExpr": "PM_RUN_INST_CMPL / PM_RUN_CYC",
+ "MetricGroup": "General",
+ "MetricName": "RUN_IPC"
+ },
+ {
+ "BriefDescription": "Average number of completed instructions per instruction group",
+ "MetricExpr": "PM_RUN_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "AVERAGE_COMPLETED_INSTRUCTION_SET_SIZE"
+ },
+ {
+ "BriefDescription": "Average number of finished instructions per completed instructions",
+ "MetricExpr": "PM_INST_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "INST_FIN_PER_CMPL"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTF instruction is completing and the finish was overlooked",
+ "MetricExpr": "PM_EXEC_STALL_UNKNOWN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "EXEC_STALL_UNKOWN_CPI"
+ },
+ {
+ "BriefDescription": "Percentage of finished branches that were taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL / PM_BR_FIN * 100",
+ "MetricGroup": "General",
+ "MetricName": "TAKEN_BRANCHES",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were a demand load that did not hit in the L1, L2, or the L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "General",
+ "MetricName": "L3_LD_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of finished branches per completed instruction",
+ "MetricExpr": "PM_BR_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "BRANCHES_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of instructions finished in the LSU per completed instruction",
+ "MetricExpr": "PM_LSU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "LSU_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of instructions finished in the VSU per completed instruction",
+ "MetricExpr": "PM_VSU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "VSU_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of TLBIE instructions finished in the LSU per completed instruction",
+ "MetricExpr": "PM_TLBIE_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "TLBIE_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of STCX instructions finshed per completed instruction",
+ "MetricExpr": "PM_STCX_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "STXC_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of LARX instructions finshed per completed instruction",
+ "MetricExpr": "PM_LARX_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "LARX_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of PTESYNC instructions finshed per completed instruction",
+ "MetricExpr": "PM_PTESYNC_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "PTESYNC_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of simple fixed-point instructions finshed in the store unit per completed instruction",
+ "MetricExpr": "PM_FX_LSU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "FX_PER_INST"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded the L1 cache",
+ "MetricExpr": "PM_LD_DEMAND_MISS_L1 / PM_LD_MISS_L1 * 100",
+ "MetricGroup": "General",
+ "MetricName": "DL1_MISS_RELOADS",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from beyond the local L2",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_LD_DEMAND_MISS_L1 * 100",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L2_MISS",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_LD_DEMAND_MISS_L1 * 100",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L3_MISS",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles stalled due to the NTC instruction waiting for a load miss to resolve from a source beyond the local L2 and local L3",
+ "MetricExpr": "DMISS_L3MISS_STALL_CPI / RUN_CPI * 100",
+ "MetricGroup": "General",
+ "MetricName": "DCACHE_MISS_CPI",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses with 2M page size per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS_2M / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_2M_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses with 16M page size per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS_16M / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_16M_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page size",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_DERAT_MISS",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_4K_MISS_RATIO"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 2M page size",
+ "MetricExpr": "PM_DERAT_MISS_2M / PM_DERAT_MISS",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_2M_MISS_RATIO"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16M page size",
+ "MetricExpr": "PM_DERAT_MISS_16M / PM_DERAT_MISS",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_16M_MISS_RATIO"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page size",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_DERAT_MISS",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_64K_MISS_RATIO"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses that resulted in TLB reloads",
+ "MetricExpr": "PM_DTLB_MISS / PM_DERAT_MISS * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_MISS_RELOAD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of icache misses that were reloaded from beyond the local L3",
+ "MetricExpr": "PM_INST_FROM_L3MISS / PM_L1_ICACHE_MISS * 100",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_L3_MISS",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of icache reloads from the beyond the L3 per completed instruction",
+ "MetricExpr": "PM_INST_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_L3_MISS_RATE",
+ "ScaleUnit": "1%"
+ }
+]
diff --git a/tools/perf/tests/api-io.c b/tools/perf/tests/api-io.c
index 2ada86ad608409..e91cf2c127f166 100644
--- a/tools/perf/tests/api-io.c
+++ b/tools/perf/tests/api-io.c
@@ -289,8 +289,8 @@ static int test_get_dec(void)
return ret;
}
-int test__api_io(struct test *test __maybe_unused,
- int subtest __maybe_unused)
+static int test__api_io(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
int ret = 0;
@@ -302,3 +302,5 @@ int test__api_io(struct test *test __maybe_unused,
ret = TEST_FAIL;
return ret;
}
+
+DEFINE_SUITE("Test api io", api_io);
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 9b40a25376ae5d..0f73e300f207fc 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -178,7 +178,7 @@ static int run_dir(const char *d, const char *perf)
return system(cmd) ? TEST_FAIL : TEST_OK;
}
-int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__attr(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct stat st;
char path_perf[PATH_MAX];
@@ -207,3 +207,5 @@ int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused)
return TEST_SKIP;
}
+
+DEFINE_SUITE("Setup struct perf_event_attr", attr);
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 7447a44789919f..79a980b1e78693 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -82,7 +82,7 @@ static int do_test(struct evlist *evlist, int mmap_pages,
}
-int test__backward_ring_buffer(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__backward_ring_buffer(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret = TEST_SKIP, err, sample_count = 0, comm_count = 0;
char pid[16], sbuf[STRERR_BUFSIZE];
@@ -167,3 +167,5 @@ out_delete_evlist:
evlist__delete(evlist);
return ret;
}
+
+DEFINE_SUITE("Read backward ring buffer", backward_ring_buffer);
diff --git a/tools/perf/tests/bitmap.c b/tools/perf/tests/bitmap.c
index 12b805efdca0d9..38485634723686 100644
--- a/tools/perf/tests/bitmap.c
+++ b/tools/perf/tests/bitmap.c
@@ -40,7 +40,7 @@ static int test_bitmap(const char *str)
return ret;
}
-int test__bitmap_print(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__bitmap_print(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
TEST_ASSERT_VAL("failed to convert map", test_bitmap("1"));
TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,5"));
@@ -51,3 +51,5 @@ int test__bitmap_print(struct test *test __maybe_unused, int subtest __maybe_unu
TEST_ASSERT_VAL("failed to convert map", test_bitmap("1-10,12-20,22-30,32-40"));
return 0;
}
+
+DEFINE_SUITE("Print bitmap", bitmap_print);
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index 489b50604cf274..d1ebb5561e5b31 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -19,6 +19,19 @@
#include "../perf-sys.h"
#include "cloexec.h"
+/*
+ * PowerPC and S390 do not support creation of instruction breakpoints using the
+ * perf_event interface.
+ *
+ * Just disable the test for these architectures until these issues are
+ * resolved.
+ */
+#if defined(__powerpc__) || defined(__s390x__)
+#define BP_ACCOUNT_IS_SUPPORTED 0
+#else
+#define BP_ACCOUNT_IS_SUPPORTED 1
+#endif
+
static volatile long the_var;
static noinline int test_function(void)
@@ -173,13 +186,18 @@ static int detect_share(int wp_cnt, int bp_cnt)
* we create another watchpoint to ensure
* the slot accounting is correct
*/
-int test__bp_accounting(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__bp_accounting(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int has_ioctl = detect_ioctl();
int wp_cnt = detect_cnt(false);
int bp_cnt = detect_cnt(true);
int share = detect_share(wp_cnt, bp_cnt);
+ if (!BP_ACCOUNT_IS_SUPPORTED) {
+ pr_debug("Test not supported on this architecture");
+ return TEST_SKIP;
+ }
+
pr_debug("watchpoints count %d, breakpoints count %d, has_ioctl %d, share %d\n",
wp_cnt, bp_cnt, has_ioctl, share);
@@ -189,18 +207,4 @@ int test__bp_accounting(struct test *test __maybe_unused, int subtest __maybe_un
return bp_accounting(wp_cnt, share);
}
-bool test__bp_account_is_supported(void)
-{
- /*
- * PowerPC and S390 do not support creation of instruction
- * breakpoints using the perf_event interface.
- *
- * Just disable the test for these architectures until these
- * issues are resolved.
- */
-#if defined(__powerpc__) || defined(__s390x__)
- return false;
-#else
- return true;
-#endif
-}
+DEFINE_SUITE("Breakpoint accounting", bp_accounting);
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index ef37353636d878..1f2908f02389a8 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -161,11 +161,16 @@ static long long bp_count(int fd)
return count;
}
-int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__bp_signal(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct sigaction sa;
long long count1, count2, count3;
+ if (!BP_SIGNAL_IS_SUPPORTED) {
+ pr_debug("Test not supported on this architecture");
+ return TEST_SKIP;
+ }
+
/* setup SIGIO signal handler */
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_sigaction = (void *) sig_handler;
@@ -285,29 +290,4 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused
TEST_OK : TEST_FAIL;
}
-bool test__bp_signal_is_supported(void)
-{
- /*
- * PowerPC and S390 do not support creation of instruction
- * breakpoints using the perf_event interface.
- *
- * ARM requires explicit rounding down of the instruction
- * pointer in Thumb mode, and then requires the single-step
- * to be handled explicitly in the overflow handler to avoid
- * stepping into the SIGIO handler and getting stuck on the
- * breakpointed instruction.
- *
- * Since arm64 has the same issue with arm for the single-step
- * handling, this case also gets stuck on the breakpointed
- * instruction.
- *
- * Just disable the test for these architectures until these
- * issues are resolved.
- */
-#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__) || \
- defined(__aarch64__)
- return false;
-#else
- return true;
-#endif
-}
+DEFINE_SUITE("Breakpoint overflow signal handler", bp_signal);
diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c
index eb4dbbddf4ffcf..4e897c2cf26b88 100644
--- a/tools/perf/tests/bp_signal_overflow.c
+++ b/tools/perf/tests/bp_signal_overflow.c
@@ -59,13 +59,18 @@ static long long bp_count(int fd)
#define EXECUTIONS 10000
#define THRESHOLD 100
-int test__bp_signal_overflow(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__bp_signal_overflow(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_event_attr pe;
struct sigaction sa;
long long count;
int fd, i, fails = 0;
+ if (!BP_SIGNAL_IS_SUPPORTED) {
+ pr_debug("Test not supported on this architecture");
+ return TEST_SKIP;
+ }
+
/* setup SIGIO signal handler */
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_sigaction = (void *) sig_handler;
@@ -133,3 +138,5 @@ int test__bp_signal_overflow(struct test *test __maybe_unused, int subtest __may
return fails ? TEST_FAIL : TEST_OK;
}
+
+DEFINE_SUITE("Breakpoint overflow sampling", bp_signal_overflow);
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 2bf146e49ce886..329f77f592f421 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -62,7 +62,6 @@ static int llseek_loop(void)
static struct {
enum test_llvm__testcase prog_id;
- const char *desc;
const char *name;
const char *msg_compile_fail;
const char *msg_load_fail;
@@ -72,7 +71,6 @@ static struct {
} bpf_testcase_table[] = {
{
.prog_id = LLVM_TESTCASE_BASE,
- .desc = "Basic BPF filtering",
.name = "[basic_bpf_test]",
.msg_compile_fail = "fix 'perf test LLVM' first",
.msg_load_fail = "load bpf object failed",
@@ -81,7 +79,6 @@ static struct {
},
{
.prog_id = LLVM_TESTCASE_BASE,
- .desc = "BPF pinning",
.name = "[bpf_pinning]",
.msg_compile_fail = "fix kbuild first",
.msg_load_fail = "check your vmlinux setting?",
@@ -92,7 +89,6 @@ static struct {
#ifdef HAVE_BPF_PROLOGUE
{
.prog_id = LLVM_TESTCASE_BPF_PROLOGUE,
- .desc = "BPF prologue generation",
.name = "[bpf_prologue_test]",
.msg_compile_fail = "fix kbuild first",
.msg_load_fail = "check your vmlinux setting?",
@@ -283,18 +279,6 @@ out:
return ret;
}
-int test__bpf_subtest_get_nr(void)
-{
- return (int)ARRAY_SIZE(bpf_testcase_table);
-}
-
-const char *test__bpf_subtest_get_desc(int i)
-{
- if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
- return NULL;
- return bpf_testcase_table[i].desc;
-}
-
static int check_env(void)
{
int err;
@@ -313,7 +297,7 @@ static int check_env(void)
}
err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
- sizeof(insns) / sizeof(insns[0]),
+ ARRAY_SIZE(insns),
license, kver_int, NULL, 0);
if (err < 0) {
pr_err("Missing basic BPF support, skip this test: %s\n",
@@ -325,7 +309,7 @@ static int check_env(void)
return 0;
}
-int test__bpf(struct test *test __maybe_unused, int i)
+static int test__bpf(int i)
{
int err;
@@ -343,21 +327,60 @@ int test__bpf(struct test *test __maybe_unused, int i)
err = __test__bpf(i);
return err;
}
+#endif
-#else
-int test__bpf_subtest_get_nr(void)
+static int test__basic_bpf_test(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
- return 0;
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__bpf(0);
+#else
+ pr_debug("Skip BPF test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
}
-const char *test__bpf_subtest_get_desc(int i __maybe_unused)
+static int test__bpf_pinning(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
- return NULL;
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__bpf(1);
+#else
+ pr_debug("Skip BPF test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
}
-int test__bpf(struct test *test __maybe_unused, int i __maybe_unused)
+static int test__bpf_prologue_test(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
+#if defined(HAVE_LIBBPF_SUPPORT) && defined(HAVE_BPF_PROLOGUE)
+ return test__bpf(2);
+#else
pr_debug("Skip BPF test because BPF support is not compiled\n");
return TEST_SKIP;
+#endif
}
+
+
+static struct test_case bpf_tests[] = {
+#ifdef HAVE_LIBBPF_SUPPORT
+ TEST_CASE("Basic BPF filtering", basic_bpf_test),
+ TEST_CASE("BPF pinning", bpf_pinning),
+#ifdef HAVE_BPF_PROLOGUE
+ TEST_CASE("BPF prologue generation", bpf_prologue_test),
+#else
+ TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"),
+#endif
+#else
+ TEST_CASE_REASON("Basic BPF filtering", basic_bpf_test, "not compiled in"),
+ TEST_CASE_REASON("BPF pinning", bpf_pinning, "not compiled in"),
+ TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"),
#endif
+ { .name = NULL, }
+};
+
+struct test_suite suite__bpf = {
+ .desc = "BPF filter",
+ .test_cases = bpf_tests,
+};
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index da7dc5e45d0cf4..8cb5a1c3489eaf 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -30,350 +30,134 @@
static bool dont_fork;
-struct test __weak arch_tests[] = {
- {
- .func = NULL,
- },
+struct test_suite *__weak arch_tests[] = {
+ NULL,
};
-static struct test generic_tests[] = {
- {
- .desc = "vmlinux symtab matches kallsyms",
- .func = test__vmlinux_matches_kallsyms,
- },
- {
- .desc = "Detect openat syscall event",
- .func = test__openat_syscall_event,
- },
- {
- .desc = "Detect openat syscall event on all cpus",
- .func = test__openat_syscall_event_on_all_cpus,
- },
- {
- .desc = "Read samples using the mmap interface",
- .func = test__basic_mmap,
- },
- {
- .desc = "Test data source output",
- .func = test__mem,
- },
- {
- .desc = "Parse event definition strings",
- .func = test__parse_events,
- },
- {
- .desc = "Simple expression parser",
- .func = test__expr,
- },
- {
- .desc = "PERF_RECORD_* events & perf_sample fields",
- .func = test__PERF_RECORD,
- },
- {
- .desc = "Parse perf pmu format",
- .func = test__pmu,
- },
- {
- .desc = "PMU events",
- .func = test__pmu_events,
- .subtest = {
- .skip_if_fail = false,
- .get_nr = test__pmu_events_subtest_get_nr,
- .get_desc = test__pmu_events_subtest_get_desc,
- .skip_reason = test__pmu_events_subtest_skip_reason,
- },
-
- },
- {
- .desc = "DSO data read",
- .func = test__dso_data,
- },
- {
- .desc = "DSO data cache",
- .func = test__dso_data_cache,
- },
- {
- .desc = "DSO data reopen",
- .func = test__dso_data_reopen,
- },
- {
- .desc = "Roundtrip evsel->name",
- .func = test__perf_evsel__roundtrip_name_test,
- },
- {
- .desc = "Parse sched tracepoints fields",
- .func = test__perf_evsel__tp_sched_test,
- },
- {
- .desc = "syscalls:sys_enter_openat event fields",
- .func = test__syscall_openat_tp_fields,
- },
- {
- .desc = "Setup struct perf_event_attr",
- .func = test__attr,
- },
- {
- .desc = "Match and link multiple hists",
- .func = test__hists_link,
- },
- {
- .desc = "'import perf' in python",
- .func = test__python_use,
- },
- {
- .desc = "Breakpoint overflow signal handler",
- .func = test__bp_signal,
- .is_supported = test__bp_signal_is_supported,
- },
- {
- .desc = "Breakpoint overflow sampling",
- .func = test__bp_signal_overflow,
- .is_supported = test__bp_signal_is_supported,
- },
- {
- .desc = "Breakpoint accounting",
- .func = test__bp_accounting,
- .is_supported = test__bp_account_is_supported,
- },
- {
- .desc = "Watchpoint",
- .func = test__wp,
- .is_supported = test__wp_is_supported,
- .subtest = {
- .skip_if_fail = false,
- .get_nr = test__wp_subtest_get_nr,
- .get_desc = test__wp_subtest_get_desc,
- .skip_reason = test__wp_subtest_skip_reason,
- },
- },
- {
- .desc = "Number of exit events of a simple workload",
- .func = test__task_exit,
- },
- {
- .desc = "Software clock events period values",
- .func = test__sw_clock_freq,
- },
- {
- .desc = "Object code reading",
- .func = test__code_reading,
- },
- {
- .desc = "Sample parsing",
- .func = test__sample_parsing,
- },
- {
- .desc = "Use a dummy software event to keep tracking",
- .func = test__keep_tracking,
- },
- {
- .desc = "Parse with no sample_id_all bit set",
- .func = test__parse_no_sample_id_all,
- },
- {
- .desc = "Filter hist entries",
- .func = test__hists_filter,
- },
- {
- .desc = "Lookup mmap thread",
- .func = test__mmap_thread_lookup,
- },
- {
- .desc = "Share thread maps",
- .func = test__thread_maps_share,
- },
- {
- .desc = "Sort output of hist entries",
- .func = test__hists_output,
- },
- {
- .desc = "Cumulate child hist entries",
- .func = test__hists_cumulate,
- },
- {
- .desc = "Track with sched_switch",
- .func = test__switch_tracking,
- },
- {
- .desc = "Filter fds with revents mask in a fdarray",
- .func = test__fdarray__filter,
- },
- {
- .desc = "Add fd to a fdarray, making it autogrow",
- .func = test__fdarray__add,
- },
- {
- .desc = "kmod_path__parse",
- .func = test__kmod_path__parse,
- },
- {
- .desc = "Thread map",
- .func = test__thread_map,
- },
- {
- .desc = "LLVM search and compile",
- .func = test__llvm,
- .subtest = {
- .skip_if_fail = true,
- .get_nr = test__llvm_subtest_get_nr,
- .get_desc = test__llvm_subtest_get_desc,
- },
- },
- {
- .desc = "Session topology",
- .func = test__session_topology,
- },
- {
- .desc = "BPF filter",
- .func = test__bpf,
- .subtest = {
- .skip_if_fail = true,
- .get_nr = test__bpf_subtest_get_nr,
- .get_desc = test__bpf_subtest_get_desc,
- },
- },
- {
- .desc = "Synthesize thread map",
- .func = test__thread_map_synthesize,
- },
- {
- .desc = "Remove thread map",
- .func = test__thread_map_remove,
- },
- {
- .desc = "Synthesize cpu map",
- .func = test__cpu_map_synthesize,
- },
- {
- .desc = "Synthesize stat config",
- .func = test__synthesize_stat_config,
- },
- {
- .desc = "Synthesize stat",
- .func = test__synthesize_stat,
- },
- {
- .desc = "Synthesize stat round",
- .func = test__synthesize_stat_round,
- },
- {
- .desc = "Synthesize attr update",
- .func = test__event_update,
- },
- {
- .desc = "Event times",
- .func = test__event_times,
- },
- {
- .desc = "Read backward ring buffer",
- .func = test__backward_ring_buffer,
- },
- {
- .desc = "Print cpu map",
- .func = test__cpu_map_print,
- },
- {
- .desc = "Merge cpu map",
- .func = test__cpu_map_merge,
- },
-
- {
- .desc = "Probe SDT events",
- .func = test__sdt_event,
- },
- {
- .desc = "is_printable_array",
- .func = test__is_printable_array,
- },
- {
- .desc = "Print bitmap",
- .func = test__bitmap_print,
- },
- {
- .desc = "perf hooks",
- .func = test__perf_hooks,
- },
- {
- .desc = "builtin clang support",
- .func = test__clang,
- .subtest = {
- .skip_if_fail = true,
- .get_nr = test__clang_subtest_get_nr,
- .get_desc = test__clang_subtest_get_desc,
- }
- },
- {
- .desc = "unit_number__scnprintf",
- .func = test__unit_number__scnprint,
- },
- {
- .desc = "mem2node",
- .func = test__mem2node,
- },
- {
- .desc = "time utils",
- .func = test__time_utils,
- },
- {
- .desc = "Test jit_write_elf",
- .func = test__jit_write_elf,
- },
- {
- .desc = "Test libpfm4 support",
- .func = test__pfm,
- .subtest = {
- .skip_if_fail = true,
- .get_nr = test__pfm_subtest_get_nr,
- .get_desc = test__pfm_subtest_get_desc,
- }
- },
- {
- .desc = "Test api io",
- .func = test__api_io,
- },
- {
- .desc = "maps__merge_in",
- .func = test__maps__merge_in,
- },
- {
- .desc = "Demangle Java",
- .func = test__demangle_java,
- },
- {
- .desc = "Demangle OCaml",
- .func = test__demangle_ocaml,
- },
- {
- .desc = "Parse and process metrics",
- .func = test__parse_metric,
- },
- {
- .desc = "PE file support",
- .func = test__pe_file_parsing,
- },
- {
- .desc = "Event expansion for cgroups",
- .func = test__expand_cgroup_events,
- },
- {
- .desc = "Convert perf time to TSC",
- .func = test__perf_time_to_tsc,
- .is_supported = test__tsc_is_supported,
- },
- {
- .desc = "dlfilter C API",
- .func = test__dlfilter,
- },
- {
- .func = NULL,
- },
+static struct test_suite *generic_tests[] = {
+ &suite__vmlinux_matches_kallsyms,
+ &suite__openat_syscall_event,
+ &suite__openat_syscall_event_on_all_cpus,
+ &suite__basic_mmap,
+ &suite__mem,
+ &suite__parse_events,
+ &suite__expr,
+ &suite__PERF_RECORD,
+ &suite__pmu,
+ &suite__pmu_events,
+ &suite__dso_data,
+ &suite__dso_data_cache,
+ &suite__dso_data_reopen,
+ &suite__perf_evsel__roundtrip_name_test,
+ &suite__perf_evsel__tp_sched_test,
+ &suite__syscall_openat_tp_fields,
+ &suite__attr,
+ &suite__hists_link,
+ &suite__python_use,
+ &suite__bp_signal,
+ &suite__bp_signal_overflow,
+ &suite__bp_accounting,
+ &suite__wp,
+ &suite__task_exit,
+ &suite__sw_clock_freq,
+ &suite__code_reading,
+ &suite__sample_parsing,
+ &suite__keep_tracking,
+ &suite__parse_no_sample_id_all,
+ &suite__hists_filter,
+ &suite__mmap_thread_lookup,
+ &suite__thread_maps_share,
+ &suite__hists_output,
+ &suite__hists_cumulate,
+ &suite__switch_tracking,
+ &suite__fdarray__filter,
+ &suite__fdarray__add,
+ &suite__kmod_path__parse,
+ &suite__thread_map,
+ &suite__llvm,
+ &suite__session_topology,
+ &suite__bpf,
+ &suite__thread_map_synthesize,
+ &suite__thread_map_remove,
+ &suite__cpu_map_synthesize,
+ &suite__synthesize_stat_config,
+ &suite__synthesize_stat,
+ &suite__synthesize_stat_round,
+ &suite__event_update,
+ &suite__event_times,
+ &suite__backward_ring_buffer,
+ &suite__cpu_map_print,
+ &suite__cpu_map_merge,
+ &suite__sdt_event,
+ &suite__is_printable_array,
+ &suite__bitmap_print,
+ &suite__perf_hooks,
+ &suite__clang,
+ &suite__unit_number__scnprint,
+ &suite__mem2node,
+ &suite__time_utils,
+ &suite__jit_write_elf,
+ &suite__pfm,
+ &suite__api_io,
+ &suite__maps__merge_in,
+ &suite__demangle_java,
+ &suite__demangle_ocaml,
+ &suite__parse_metric,
+ &suite__pe_file_parsing,
+ &suite__expand_cgroup_events,
+ &suite__perf_time_to_tsc,
+ &suite__dlfilter,
+ NULL,
};
-static struct test *tests[] = {
+static struct test_suite **tests[] = {
generic_tests,
arch_tests,
};
+static int num_subtests(const struct test_suite *t)
+{
+ int num;
+
+ if (!t->test_cases)
+ return 0;
+
+ num = 0;
+ while (t->test_cases[num].name)
+ num++;
+
+ return num;
+}
+
+static bool has_subtests(const struct test_suite *t)
+{
+ return num_subtests(t) > 1;
+}
+
+static const char *skip_reason(const struct test_suite *t, int subtest)
+{
+ if (t->test_cases && subtest >= 0)
+ return t->test_cases[subtest].skip_reason;
+
+ return NULL;
+}
+
+static const char *test_description(const struct test_suite *t, int subtest)
+{
+ if (t->test_cases && subtest >= 0)
+ return t->test_cases[subtest].desc;
+
+ return t->desc;
+}
+
+static test_fnptr test_function(const struct test_suite *t, int subtest)
+{
+ if (subtest <= 0)
+ return t->test_cases[0].run_case;
+
+ return t->test_cases[subtest].run_case;
+}
+
static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[])
{
int i;
@@ -398,7 +182,7 @@ static bool perf_test__matches(const char *desc, int curr, int argc, const char
return false;
}
-static int run_test(struct test *test, int subtest)
+static int run_test(struct test_suite *test, int subtest)
{
int status, err = -1, child = dont_fork ? 0 : fork();
char sbuf[STRERR_BUFSIZE];
@@ -430,7 +214,7 @@ static int run_test(struct test *test, int subtest)
}
}
- err = test->func(test, subtest);
+ err = test_function(test, subtest)(test, subtest);
if (!dont_fork)
exit(err);
}
@@ -450,24 +234,19 @@ static int run_test(struct test *test, int subtest)
return err;
}
-#define for_each_test(j, t) \
+#define for_each_test(j, k, t) \
for (j = 0; j < ARRAY_SIZE(tests); j++) \
- for (t = &tests[j][0]; t->func; t++)
+ for (k = 0, t = tests[j][k]; tests[j][k]; k++, t = tests[j][k])
-static int test_and_print(struct test *t, bool force_skip, int subtest)
+static int test_and_print(struct test_suite *t, int subtest)
{
int err;
- if (!force_skip) {
- pr_debug("\n--- start ---\n");
- err = run_test(t, subtest);
- pr_debug("---- end ----\n");
- } else {
- pr_debug("\n--- force skipped ---\n");
- err = TEST_SKIP;
- }
+ pr_debug("\n--- start ---\n");
+ err = run_test(t, subtest);
+ pr_debug("---- end ----\n");
- if (!t->subtest.get_nr)
+ if (!has_subtests(t))
pr_debug("%s:", t->desc);
else
pr_debug("%s subtest %d:", t->desc, subtest + 1);
@@ -477,11 +256,10 @@ static int test_and_print(struct test *t, bool force_skip, int subtest)
pr_info(" Ok\n");
break;
case TEST_SKIP: {
- const char *skip_reason = NULL;
- if (t->subtest.skip_reason)
- skip_reason = t->subtest.skip_reason(subtest);
- if (skip_reason)
- color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", skip_reason);
+ const char *reason = skip_reason(t, subtest);
+
+ if (reason)
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
else
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
}
@@ -580,7 +358,7 @@ struct shell_test {
const char *file;
};
-static int shell_test__run(struct test *test, int subdir __maybe_unused)
+static int shell_test__run(struct test_suite *test, int subdir __maybe_unused)
{
int err;
char script[PATH_MAX];
@@ -622,24 +400,34 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width,
for_each_shell_test(entlist, n_dirs, st.dir, ent) {
int curr = i++;
char desc[256];
- struct test test = {
- .desc = shell_test__description(desc, sizeof(desc), st.dir, ent->d_name),
- .func = shell_test__run,
+ struct test_case test_cases[] = {
+ {
+ .desc = shell_test__description(desc,
+ sizeof(desc),
+ st.dir,
+ ent->d_name),
+ .run_case = shell_test__run,
+ },
+ { .name = NULL, }
+ };
+ struct test_suite test_suite = {
+ .desc = test_cases[0].desc,
+ .test_cases = test_cases,
.priv = &st,
};
- if (!perf_test__matches(test.desc, curr, argc, argv))
+ if (!perf_test__matches(test_suite.desc, curr, argc, argv))
continue;
st.file = ent->d_name;
- pr_info("%2d: %-*s:", i, width, test.desc);
+ pr_info("%2d: %-*s:", i, width, test_suite.desc);
if (intlist__find(skiplist, i)) {
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
continue;
}
- test_and_print(&test, false, -1);
+ test_and_print(&test_suite, 0);
}
for (e = 0; e < n_dirs; e++)
@@ -650,33 +438,31 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width,
static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
{
- struct test *t;
- unsigned int j;
+ struct test_suite *t;
+ unsigned int j, k;
int i = 0;
int width = shell_tests__max_desc_width();
- for_each_test(j, t) {
- int len = strlen(t->desc);
+ for_each_test(j, k, t) {
+ int len = strlen(test_description(t, -1));
if (width < len)
width = len;
}
- for_each_test(j, t) {
- int curr = i++, err;
+ for_each_test(j, k, t) {
+ int curr = i++;
int subi;
- if (!perf_test__matches(t->desc, curr, argc, argv)) {
+ if (!perf_test__matches(test_description(t, -1), curr, argc, argv)) {
bool skip = true;
int subn;
- if (!t->subtest.get_nr)
- continue;
-
- subn = t->subtest.get_nr();
+ subn = num_subtests(t);
for (subi = 0; subi < subn; subi++) {
- if (perf_test__matches(t->subtest.get_desc(subi), curr, argc, argv))
+ if (perf_test__matches(test_description(t, subi),
+ curr, argc, argv))
skip = false;
}
@@ -684,22 +470,17 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
continue;
}
- if (t->is_supported && !t->is_supported()) {
- pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
- continue;
- }
-
- pr_info("%2d: %-*s:", i, width, t->desc);
+ pr_info("%2d: %-*s:", i, width, test_description(t, -1));
if (intlist__find(skiplist, i)) {
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
continue;
}
- if (!t->subtest.get_nr) {
- test_and_print(t, false, -1);
+ if (!has_subtests(t)) {
+ test_and_print(t, -1);
} else {
- int subn = t->subtest.get_nr();
+ int subn = num_subtests(t);
/*
* minus 2 to align with normal testcases.
* For subtest we print additional '.x' in number.
@@ -709,7 +490,6 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
* 35.1: Basic BPF llvm compiling test : Ok
*/
int subw = width > 2 ? width - 2 : width;
- bool skip = false;
if (subn <= 0) {
color_fprintf(stderr, PERF_COLOR_YELLOW,
@@ -719,21 +499,20 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
pr_info("\n");
for (subi = 0; subi < subn; subi++) {
- int len = strlen(t->subtest.get_desc(subi));
+ int len = strlen(test_description(t, subi));
if (subw < len)
subw = len;
}
for (subi = 0; subi < subn; subi++) {
- if (!perf_test__matches(t->subtest.get_desc(subi), curr, argc, argv))
+ if (!perf_test__matches(test_description(t, subi),
+ curr, argc, argv))
continue;
pr_info("%2d.%1d: %-*s:", i, subi + 1, subw,
- t->subtest.get_desc(subi));
- err = test_and_print(t, skip, subi);
- if (err != TEST_OK && t->subtest.skip_if_fail)
- skip = true;
+ test_description(t, subi));
+ test_and_print(t, subi);
}
}
}
@@ -759,7 +538,7 @@ static int perf_test__list_shell(int argc, const char **argv, int i)
for_each_shell_test(entlist, n_dirs, path, ent) {
int curr = i++;
char bf[256];
- struct test t = {
+ struct test_suite t = {
.desc = shell_test__description(bf, sizeof(bf), path, ent->d_name),
};
@@ -778,26 +557,25 @@ static int perf_test__list_shell(int argc, const char **argv, int i)
static int perf_test__list(int argc, const char **argv)
{
- unsigned int j;
- struct test *t;
+ unsigned int j, k;
+ struct test_suite *t;
int i = 0;
- for_each_test(j, t) {
+ for_each_test(j, k, t) {
int curr = i++;
- if (!perf_test__matches(t->desc, curr, argc, argv) ||
- (t->is_supported && !t->is_supported()))
+ if (!perf_test__matches(test_description(t, -1), curr, argc, argv))
continue;
- pr_info("%2d: %s\n", i, t->desc);
+ pr_info("%2d: %s\n", i, test_description(t, -1));
- if (t->subtest.get_nr) {
- int subn = t->subtest.get_nr();
+ if (has_subtests(t)) {
+ int subn = num_subtests(t);
int subi;
for (subi = 0; subi < subn; subi++)
pr_info("%2d:%1d: %s\n", i, subi + 1,
- t->subtest.get_desc(subi));
+ test_description(t, subi));
}
}
diff --git a/tools/perf/tests/clang.c b/tools/perf/tests/clang.c
index 2577d3ed153152..a7111005d5b9f4 100644
--- a/tools/perf/tests/clang.c
+++ b/tools/perf/tests/clang.c
@@ -3,44 +3,30 @@
#include "c++/clang-c.h"
#include <linux/kernel.h>
-static struct {
- int (*func)(void);
- const char *desc;
-} clang_testcase_table[] = {
-#ifdef HAVE_LIBCLANGLLVM_SUPPORT
- {
- .func = test__clang_to_IR,
- .desc = "builtin clang compile C source to IR",
- },
- {
- .func = test__clang_to_obj,
- .desc = "builtin clang compile C source to ELF object",
- },
-#endif
-};
-
-int test__clang_subtest_get_nr(void)
-{
- return (int)ARRAY_SIZE(clang_testcase_table);
-}
-
-const char *test__clang_subtest_get_desc(int i)
-{
- if (i < 0 || i >= (int)ARRAY_SIZE(clang_testcase_table))
- return NULL;
- return clang_testcase_table[i].desc;
-}
-
#ifndef HAVE_LIBCLANGLLVM_SUPPORT
-int test__clang(struct test *test __maybe_unused, int i __maybe_unused)
+static int test__clang_to_IR(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
return TEST_SKIP;
}
-#else
-int test__clang(struct test *test __maybe_unused, int i)
+
+static int test__clang_to_obj(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
- if (i < 0 || i >= (int)ARRAY_SIZE(clang_testcase_table))
- return TEST_FAIL;
- return clang_testcase_table[i].func();
+ return TEST_SKIP;
}
#endif
+
+static struct test_case clang_tests[] = {
+ TEST_CASE_REASON("builtin clang compile C source to IR", clang_to_IR,
+ "not compiled in"),
+ TEST_CASE_REASON("builtin clang compile C source to ELF object",
+ clang_to_obj,
+ "not compiled in"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__clang = {
+ .desc = "builtin clang support",
+ .test_cases = clang_tests,
+};
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index f439bd49da1953..5610767b407f01 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -716,7 +716,7 @@ out_err:
return err;
}
-int test__code_reading(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__code_reading(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret;
@@ -743,3 +743,5 @@ int test__code_reading(struct test *test __maybe_unused, int subtest __maybe_unu
return -1;
};
}
+
+DEFINE_SUITE("Object code reading", code_reading);
diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c
index 0472b110fe651f..89a155092f853b 100644
--- a/tools/perf/tests/cpumap.c
+++ b/tools/perf/tests/cpumap.c
@@ -75,7 +75,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
}
-int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__cpu_map_synthesize(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_cpu_map *cpus;
@@ -111,7 +111,7 @@ static int cpu_map_print(const char *str)
return !strcmp(buf, str);
}
-int test__cpu_map_print(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__cpu_map_print(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1"));
TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,5"));
@@ -123,7 +123,7 @@ int test__cpu_map_print(struct test *test __maybe_unused, int subtest __maybe_un
return 0;
}
-int test__cpu_map_merge(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__cpu_map_merge(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_cpu_map *a = perf_cpu_map__new("4,2,1");
struct perf_cpu_map *b = perf_cpu_map__new("4,5,7");
@@ -137,3 +137,7 @@ int test__cpu_map_merge(struct test *test __maybe_unused, int subtest __maybe_un
perf_cpu_map__put(c);
return 0;
}
+
+DEFINE_SUITE("Synthesize cpu map", cpu_map_synthesize);
+DEFINE_SUITE("Print cpu map", cpu_map_print);
+DEFINE_SUITE("Merge cpu map", cpu_map_merge);
diff --git a/tools/perf/tests/demangle-java-test.c b/tools/perf/tests/demangle-java-test.c
index 8f3b90832fb0f9..44d1be303b67ae 100644
--- a/tools/perf/tests/demangle-java-test.c
+++ b/tools/perf/tests/demangle-java-test.c
@@ -7,7 +7,7 @@
#include "debug.h"
#include "demangle-java.h"
-int test__demangle_java(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__demangle_java(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret = TEST_OK;
char *buf = NULL;
@@ -40,3 +40,5 @@ int test__demangle_java(struct test *test __maybe_unused, int subtest __maybe_un
return ret;
}
+
+DEFINE_SUITE("Demangle Java", demangle_java);
diff --git a/tools/perf/tests/demangle-ocaml-test.c b/tools/perf/tests/demangle-ocaml-test.c
index 0043be812355a9..90a4285e2ad547 100644
--- a/tools/perf/tests/demangle-ocaml-test.c
+++ b/tools/perf/tests/demangle-ocaml-test.c
@@ -7,7 +7,7 @@
#include "debug.h"
#include "demangle-ocaml.h"
-int test__demangle_ocaml(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__demangle_ocaml(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret = TEST_OK;
char *buf = NULL;
@@ -41,3 +41,5 @@ int test__demangle_ocaml(struct test *test __maybe_unused, int subtest __maybe_u
return ret;
}
+
+DEFINE_SUITE("Demangle OCaml", demangle_ocaml);
diff --git a/tools/perf/tests/dlfilter-test.c b/tools/perf/tests/dlfilter-test.c
index bc03b5df6828d6..84352d55347d56 100644
--- a/tools/perf/tests/dlfilter-test.c
+++ b/tools/perf/tests/dlfilter-test.c
@@ -398,7 +398,7 @@ static void test_data__free(struct test_data *td)
}
}
-int test__dlfilter(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__dlfilter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct test_data td = {.fd = -1};
int pid = getpid();
@@ -414,3 +414,5 @@ int test__dlfilter(struct test *test __maybe_unused, int subtest __maybe_unused)
test_data__free(&td);
return err;
}
+
+DEFINE_SUITE("dlfilter C API", dlfilter);
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index 43e1b01e5afcb7..3419a4ab5590f5 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -113,7 +113,7 @@ static int dso__data_fd(struct dso *dso, struct machine *machine)
return fd;
}
-int test__dso_data(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__dso_data(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct machine machine;
struct dso *dso;
@@ -248,7 +248,7 @@ static int set_fd_limit(int n)
return setrlimit(RLIMIT_NOFILE, &rlim);
}
-int test__dso_data_cache(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__dso_data_cache(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct machine machine;
long nr_end, nr = open_files_cnt();
@@ -318,7 +318,7 @@ static long new_limit(int count)
return ret;
}
-int test__dso_data_reopen(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct machine machine;
long nr_end, nr = open_files_cnt(), lim = new_limit(3);
@@ -393,3 +393,7 @@ int test__dso_data_reopen(struct test *test __maybe_unused, int subtest __maybe_
TEST_ASSERT_VAL("failed leaking files", nr == nr_end);
return 0;
}
+
+DEFINE_SUITE("DSO data read", dso_data);
+DEFINE_SUITE("DSO data cache", dso_data_cache);
+DEFINE_SUITE("DSO data reopen", dso_data_reopen);
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index c756284b3b1353..2dab2d2620608b 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -195,7 +195,8 @@ NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *th
return ret;
}
-int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__dwarf_unwind(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
struct machine *machine;
struct thread *thread;
@@ -237,3 +238,5 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu
machine__delete(machine);
return err;
}
+
+DEFINE_SUITE("Test dwarf unwind", dwarf_unwind);
diff --git a/tools/perf/tests/event-times.c b/tools/perf/tests/event-times.c
index 04ce4401f77528..7606eb3df92f06 100644
--- a/tools/perf/tests/event-times.c
+++ b/tools/perf/tests/event-times.c
@@ -216,7 +216,7 @@ out_err:
* and checks that enabled and running times
* match.
*/
-int test__event_times(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__event_times(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err, ret = 0;
@@ -239,3 +239,5 @@ int test__event_times(struct test *test __maybe_unused, int subtest __maybe_unus
#undef _T
return ret;
}
+
+DEFINE_SUITE("Event times", event_times);
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index 44a50527f9d951..fbb68deba59f22 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -83,7 +83,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
return 0;
}
-int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__event_update(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct evsel *evsel;
struct event_name tmp;
@@ -123,3 +123,5 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
evlist__delete(evlist);
return 0;
}
+
+DEFINE_SUITE("Synthesize attr update", event_update);
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index 4e09f0a312afc3..fdbf17642e45c0 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -99,7 +99,8 @@ out_delete_evlist:
#define perf_evsel__name_array_test(names, distance) \
__perf_evsel__name_array_test(names, ARRAY_SIZE(names), distance)
-int test__perf_evsel__roundtrip_name_test(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__perf_evsel__roundtrip_name_test(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
int err = 0, ret = 0;
@@ -120,3 +121,5 @@ int test__perf_evsel__roundtrip_name_test(struct test *test __maybe_unused, int
return ret;
}
+
+DEFINE_SUITE("Roundtrip evsel->name", perf_evsel__roundtrip_name_test);
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index f9e34bd26cf335..cf4da3d748c29f 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -32,7 +32,8 @@ static int evsel__test_field(struct evsel *evsel, const char *name, int size, bo
return ret;
}
-int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__perf_evsel__tp_sched_test(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
struct evsel *evsel = evsel__newtp("sched", "sched_switch");
int ret = 0;
@@ -87,3 +88,5 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
evsel__delete(evsel);
return ret;
}
+
+DEFINE_SUITE("Parse sched tracepoints fields", perf_evsel__tp_sched_test);
diff --git a/tools/perf/tests/expand-cgroup.c b/tools/perf/tests/expand-cgroup.c
index 80cff8a3558cfb..dfefe5b60eb23b 100644
--- a/tools/perf/tests/expand-cgroup.c
+++ b/tools/perf/tests/expand-cgroup.c
@@ -221,8 +221,8 @@ out:
return ret;
}
-int test__expand_cgroup_events(struct test *test __maybe_unused,
- int subtest __maybe_unused)
+static int test__expand_cgroup_events(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
int ret;
@@ -240,3 +240,5 @@ int test__expand_cgroup_events(struct test *test __maybe_unused,
return ret;
}
+
+DEFINE_SUITE("Event expansion for cgroups", expand_cgroup_events);
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 077783223ce076..c895de481fe10d 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -62,11 +62,11 @@ static int test(struct expr_parse_ctx *ctx, const char *e, double val2)
return 0;
}
-int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
+static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
struct expr_id_data *val_ptr;
const char *p;
- double val;
+ double val, num_cpus, num_cores, num_dies, num_packages;
int ret;
struct expr_parse_ctx *ctx;
@@ -134,6 +134,16 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT2,param=3@",
(void **)&val_ptr));
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("dash\\-event1 - dash\\-event2",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 2);
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event1",
+ (void **)&val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event2",
+ (void **)&val_ptr));
+
/* Only EVENT1 or EVENT2 need be measured depending on the value of smt_on. */
expr__ctx_clear(ctx);
TEST_ASSERT_VAL("find ids",
@@ -151,7 +161,31 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
NULL, ctx) == 0);
TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 0);
+ /* Test toplogy constants appear well ordered. */
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("#num_cpus", expr__parse(&num_cpus, ctx, "#num_cpus") == 0);
+ TEST_ASSERT_VAL("#num_cores", expr__parse(&num_cores, ctx, "#num_cores") == 0);
+ TEST_ASSERT_VAL("#num_cpus >= #num_cores", num_cpus >= num_cores);
+ TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0);
+ TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies);
+ TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0);
+ TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
+
+ /*
+ * Source count returns the number of events aggregating in a leader
+ * event including the leader. Check parsing yields an id.
+ */
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("source count",
+ expr__find_ids("source_count(EVENT1)",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("source count", hashmap__size(ctx->ids) == 1);
+ TEST_ASSERT_VAL("source count", hashmap__find(ctx->ids, "EVENT1",
+ (void **)&val_ptr));
+
expr__ctx_free(ctx);
return 0;
}
+
+DEFINE_SUITE("Simple expression parser", expr);
diff --git a/tools/perf/tests/fdarray.c b/tools/perf/tests/fdarray.c
index d9eca8e86a6b0b..40983c3574b120 100644
--- a/tools/perf/tests/fdarray.c
+++ b/tools/perf/tests/fdarray.c
@@ -28,7 +28,7 @@ static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE
return printed + fdarray__fprintf(fda, fp);
}
-int test__fdarray__filter(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__fdarray__filter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int nr_fds, err = TEST_FAIL;
struct fdarray *fda = fdarray__new(5, 5);
@@ -89,7 +89,7 @@ out:
return err;
}
-int test__fdarray__add(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__fdarray__add(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct fdarray *fda = fdarray__new(2, 2);
@@ -158,3 +158,6 @@ out_delete:
out:
return err;
}
+
+DEFINE_SUITE("Filter fds with revents mask in a fdarray", fdarray__filter);
+DEFINE_SUITE("Add fd to a fdarray, making it autogrow", fdarray__add);
diff --git a/tools/perf/tests/genelf.c b/tools/perf/tests/genelf.c
index f797f9823e8917..95f3be1b683a02 100644
--- a/tools/perf/tests/genelf.c
+++ b/tools/perf/tests/genelf.c
@@ -16,8 +16,8 @@
#define TEMPL "/tmp/perf-test-XXXXXX"
-int test__jit_write_elf(struct test *test __maybe_unused,
- int subtest __maybe_unused)
+static int test__jit_write_elf(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
#ifdef HAVE_JITDUMP
static unsigned char x86_code[] = {
@@ -49,3 +49,5 @@ int test__jit_write_elf(struct test *test __maybe_unused,
return TEST_SKIP;
#endif
}
+
+DEFINE_SUITE("Test jit_write_elf", jit_write_elf);
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 890cb1f5bf53d3..17f4fcd6bdcebc 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -689,7 +689,7 @@ out:
return err;
}
-int test__hists_cumulate(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__hists_cumulate(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct machines machines;
@@ -736,3 +736,5 @@ out:
return err;
}
+
+DEFINE_SUITE("Cumulate child hist entries", hists_cumulate);
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index ca6120cd1d90c0..08cbeb9e39ae1b 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -101,7 +101,7 @@ out:
return TEST_FAIL;
}
-int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__hists_filter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct machines machines;
@@ -325,3 +325,5 @@ out:
return err;
}
+
+DEFINE_SUITE("Filter hist entries", hists_filter);
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index a024d3f3a4123d..c575e13a850dc6 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -264,7 +264,7 @@ static int validate_link(struct hists *leader, struct hists *other)
return __validate_link(leader, 0) || __validate_link(other, 1);
}
-int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__hists_link(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = -1;
struct hists *hists, *first_hists;
@@ -339,3 +339,5 @@ out:
return err;
}
+
+DEFINE_SUITE("Match and link multiple hists", hists_link);
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index 8973f35df6042b..0bde4a768c1599 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -575,7 +575,7 @@ out:
return err;
}
-int test__hists_output(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__hists_output(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct machines machines;
@@ -623,3 +623,5 @@ out:
return err;
}
+
+DEFINE_SUITE("Sort output of hist entries", hists_output);
diff --git a/tools/perf/tests/is_printable_array.c b/tools/perf/tests/is_printable_array.c
index 9c7b3baca4fed6..f72de2457ff164 100644
--- a/tools/perf/tests/is_printable_array.c
+++ b/tools/perf/tests/is_printable_array.c
@@ -5,7 +5,7 @@
#include "debug.h"
#include "print_binary.h"
-int test__is_printable_array(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__is_printable_array(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char buf1[] = { 'k', 'r', 4, 'v', 'a', 0 };
char buf2[] = { 'k', 'r', 'a', 'v', 4, 0 };
@@ -36,3 +36,5 @@ int test__is_printable_array(struct test *test __maybe_unused, int subtest __may
return TEST_OK;
}
+
+DEFINE_SUITE("is_printable_array", is_printable_array);
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index a0438b0f080526..dd2067312452cc 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -61,7 +61,7 @@ static int find_comm(struct evlist *evlist, const char *comm)
* when an event is disabled but a dummy software event is not disabled. If the
* test passes %0 is returned, otherwise %-1 is returned.
*/
-int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__keep_tracking(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct record_opts opts = {
.mmap_pages = UINT_MAX,
@@ -160,3 +160,5 @@ out_err:
return err;
}
+
+DEFINE_SUITE("Use a dummy software event to keep tracking", keep_tracking);
diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c
index e483210b176bf0..dfe1bd5dabaa8b 100644
--- a/tools/perf/tests/kmod-path.c
+++ b/tools/perf/tests/kmod-path.c
@@ -47,7 +47,7 @@ static int test_is_kernel_module(const char *path, int cpumode, bool expect)
#define M(path, c, e) \
TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e))
-int test__kmod_path__parse(struct test *t __maybe_unused, int subtest __maybe_unused)
+static int test__kmod_path__parse(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
/* path alloc_name kmod comp name */
T("/xxxx/xxxx/x-x.ko", true , true, 0 , "[x_x]");
@@ -159,3 +159,5 @@ int test__kmod_path__parse(struct test *t __maybe_unused, int subtest __maybe_un
return 0;
}
+
+DEFINE_SUITE("kmod_path__parse", kmod_path__parse);
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
index 33e43cce90640f..8ac0a3a457ef53 100644
--- a/tools/perf/tests/llvm.c
+++ b/tools/perf/tests/llvm.c
@@ -124,7 +124,7 @@ out:
return ret;
}
-int test__llvm(struct test *test __maybe_unused, int subtest)
+static int test__llvm(int subtest)
{
int ret;
void *obj_buf = NULL;
@@ -148,32 +148,72 @@ int test__llvm(struct test *test __maybe_unused, int subtest)
return ret;
}
+#endif //HAVE_LIBBPF_SUPPORT
-int test__llvm_subtest_get_nr(void)
+static int test__llvm__bpf_base_prog(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
- return __LLVM_TESTCASE_MAX;
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__llvm(LLVM_TESTCASE_BASE);
+#else
+ pr_debug("Skip LLVM test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
}
-const char *test__llvm_subtest_get_desc(int subtest)
-{
- if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
- return NULL;
-
- return bpf_source_table[subtest].desc;
-}
-#else //HAVE_LIBBPF_SUPPORT
-int test__llvm(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__llvm__bpf_test_kbuild_prog(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__llvm(LLVM_TESTCASE_KBUILD);
+#else
+ pr_debug("Skip LLVM test because BPF support is not compiled\n");
return TEST_SKIP;
+#endif
}
-int test__llvm_subtest_get_nr(void)
+static int test__llvm__bpf_test_prologue_prog(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
- return 0;
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__llvm(LLVM_TESTCASE_BPF_PROLOGUE);
+#else
+ pr_debug("Skip LLVM test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
}
-const char *test__llvm_subtest_get_desc(int subtest __maybe_unused)
+static int test__llvm__bpf_test_relocation(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
- return NULL;
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__llvm(LLVM_TESTCASE_BPF_RELOCATION);
+#else
+ pr_debug("Skip LLVM test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
}
-#endif // HAVE_LIBBPF_SUPPORT
+
+
+static struct test_case llvm_tests[] = {
+#ifdef HAVE_LIBBPF_SUPPORT
+ TEST_CASE("Basic BPF llvm compile", llvm__bpf_base_prog),
+ TEST_CASE("kbuild searching", llvm__bpf_test_kbuild_prog),
+ TEST_CASE("Compile source for BPF prologue generation",
+ llvm__bpf_test_prologue_prog),
+ TEST_CASE("Compile source for BPF relocation", llvm__bpf_test_relocation),
+#else
+ TEST_CASE_REASON("Basic BPF llvm compile", llvm__bpf_base_prog, "not compiled in"),
+ TEST_CASE_REASON("kbuild searching", llvm__bpf_test_kbuild_prog, "not compiled in"),
+ TEST_CASE_REASON("Compile source for BPF prologue generation",
+ llvm__bpf_test_prologue_prog, "not compiled in"),
+ TEST_CASE_REASON("Compile source for BPF relocation",
+ llvm__bpf_test_relocation, "not compiled in"),
+#endif
+ { .name = NULL, }
+};
+
+struct test_suite suite__llvm = {
+ .desc = "LLVM search and compile",
+ .test_cases = llvm_tests,
+};
diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c
index 1ac72919fa3586..e308a3296cefe4 100644
--- a/tools/perf/tests/maps.c
+++ b/tools/perf/tests/maps.c
@@ -33,7 +33,7 @@ static int check_maps(struct map_def *merged, unsigned int size, struct maps *ma
return TEST_OK;
}
-int test__maps__merge_in(struct test *t __maybe_unused, int subtest __maybe_unused)
+static int test__maps__merge_in(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
struct maps maps;
unsigned int i;
@@ -120,3 +120,5 @@ int test__maps__merge_in(struct test *t __maybe_unused, int subtest __maybe_unus
maps__exit(&maps);
return TEST_OK;
}
+
+DEFINE_SUITE("maps__merge_in", maps__merge_in);
diff --git a/tools/perf/tests/mem.c b/tools/perf/tests/mem.c
index 673a11a6cd1b14..56014ec7d49da4 100644
--- a/tools/perf/tests/mem.c
+++ b/tools/perf/tests/mem.c
@@ -23,7 +23,7 @@ static int check(union perf_mem_data_src data_src,
return 0;
}
-int test__mem(struct test *text __maybe_unused, int subtest __maybe_unused)
+static int test__mem(struct test_suite *text __maybe_unused, int subtest __maybe_unused)
{
int ret = 0;
union perf_mem_data_src src;
@@ -56,3 +56,5 @@ int test__mem(struct test *text __maybe_unused, int subtest __maybe_unused)
return ret;
}
+
+DEFINE_SUITE("Test data source output", mem);
diff --git a/tools/perf/tests/mem2node.c b/tools/perf/tests/mem2node.c
index e4d0d58b97f81a..b17b86391383c0 100644
--- a/tools/perf/tests/mem2node.c
+++ b/tools/perf/tests/mem2node.c
@@ -43,7 +43,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
return bm && map ? bm : NULL;
}
-int test__mem2node(struct test *t __maybe_unused, int subtest __maybe_unused)
+static int test__mem2node(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
struct mem2node map;
struct memory_node nodes[3];
@@ -77,3 +77,5 @@ int test__mem2node(struct test *t __maybe_unused, int subtest __maybe_unused)
mem2node__exit(&map);
return 0;
}
+
+DEFINE_SUITE("mem2node", mem2node);
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index d38757db2dc2c0..90b2feda31acb4 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -29,7 +29,7 @@
* Then it checks if the number of syscalls reported as perf events by
* the kernel corresponds to the number of syscalls made.
*/
-int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = -1;
union perf_event *event;
@@ -164,3 +164,5 @@ out_free_threads:
perf_thread_map__put(threads);
return err;
}
+
+DEFINE_SUITE("Read samples using the mmap interface", basic_mmap);
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 6f2da7a72f67e7..a4301fc7b7705c 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -224,7 +224,7 @@ static int mmap_events(synth_cb synth)
*
* by using all thread objects.
*/
-int test__mmap_thread_lookup(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__mmap_thread_lookup(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
/* perf_event__synthesize_threads synthesize */
TEST_ASSERT_VAL("failed with sythesizing all",
@@ -236,3 +236,5 @@ int test__mmap_thread_lookup(struct test *test __maybe_unused, int subtest __may
return 0;
}
+
+DEFINE_SUITE("Lookup mmap thread", mmap_thread_lookup);
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index f7dd6c463f04fe..cd3dd463783fd0 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -19,7 +19,8 @@
#include "stat.h"
#include "util/counts.h"
-int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
int err = -1, fd, cpu;
struct perf_cpu_map *cpus;
@@ -127,3 +128,5 @@ out_thread_map_delete:
perf_thread_map__put(threads);
return err;
}
+
+DEFINE_SUITE("Detect openat syscall event on all cpus", openat_syscall_event_on_all_cpus);
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index 5e4af2f0f14ae9..a7b2800652e4c7 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -22,7 +22,8 @@
#define AT_FDCWD -100
#endif
-int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__syscall_openat_tp_fields(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
struct record_opts opts = {
.target = {
@@ -142,3 +143,5 @@ out_delete_evlist:
out:
return err;
}
+
+DEFINE_SUITE("syscalls:sys_enter_openat event fields", syscall_openat_tp_fields);
diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c
index 85a8f0fe7aea00..7f4c13c4b14d06 100644
--- a/tools/perf/tests/openat-syscall.c
+++ b/tools/perf/tests/openat-syscall.c
@@ -13,7 +13,8 @@
#include "tests.h"
#include "util/counts.h"
-int test__openat_syscall_event(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__openat_syscall_event(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
int err = -1, fd;
struct evsel *evsel;
@@ -66,3 +67,5 @@ out_thread_map_delete:
perf_thread_map__put(threads);
return err;
}
+
+DEFINE_SUITE("Detect openat syscall event", openat_syscall_event);
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 6af94639b14a8f..a508f1dbcb2ad4 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -2276,7 +2276,7 @@ static int test_pmu_events_alias(char *event, char *alias)
return test_event(&e);
}
-int test__parse_events(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__parse_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret1, ret2 = 0;
char *event, *alias;
@@ -2319,3 +2319,5 @@ do { \
return ret2;
}
+
+DEFINE_SUITE("Parse event definition strings", parse_events);
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
index dfc797ecc75095..574b7e4efd3a5a 100644
--- a/tools/perf/tests/parse-metric.c
+++ b/tools/perf/tests/parse-metric.c
@@ -369,7 +369,7 @@ static int test_metric_group(void)
return 0;
}
-int test__parse_metric(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__parse_metric(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
TEST_ASSERT_VAL("IPC failed", test_ipc() == 0);
TEST_ASSERT_VAL("frontend failed", test_frontend() == 0);
@@ -383,3 +383,5 @@ int test__parse_metric(struct test *test __maybe_unused, int subtest __maybe_unu
}
return 0;
}
+
+DEFINE_SUITE("Parse and process metrics", parse_metric);
diff --git a/tools/perf/tests/parse-no-sample-id-all.c b/tools/perf/tests/parse-no-sample-id-all.c
index 471273676701ba..d62e31595ab210 100644
--- a/tools/perf/tests/parse-no-sample-id-all.c
+++ b/tools/perf/tests/parse-no-sample-id-all.c
@@ -67,7 +67,8 @@ struct test_attr_event {
*
* Return: %0 on success, %-1 if the test fails.
*/
-int test__parse_no_sample_id_all(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__parse_no_sample_id_all(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
int err;
@@ -103,3 +104,5 @@ int test__parse_no_sample_id_all(struct test *test __maybe_unused, int subtest _
return 0;
}
+
+DEFINE_SUITE("Parse with no sample_id_all bit set", parse_no_sample_id_all);
diff --git a/tools/perf/tests/pe-file-parsing.c b/tools/perf/tests/pe-file-parsing.c
index 58b90c42eb38c1..c09a9fae16897d 100644
--- a/tools/perf/tests/pe-file-parsing.c
+++ b/tools/perf/tests/pe-file-parsing.c
@@ -68,7 +68,7 @@ static int run_dir(const char *d)
return TEST_OK;
}
-int test__pe_file_parsing(struct test *test __maybe_unused,
+static int test__pe_file_parsing(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct stat st;
@@ -89,10 +89,12 @@ int test__pe_file_parsing(struct test *test __maybe_unused,
#else
-int test__pe_file_parsing(struct test *test __maybe_unused,
+static int test__pe_file_parsing(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
return TEST_SKIP;
}
#endif
+
+DEFINE_SUITE("PE file support", pe_file_parsing);
diff --git a/tools/perf/tests/perf-hooks.c b/tools/perf/tests/perf-hooks.c
index dd865e0bea12d4..78cdeb89645ef6 100644
--- a/tools/perf/tests/perf-hooks.c
+++ b/tools/perf/tests/perf-hooks.c
@@ -26,7 +26,7 @@ static void the_hook(void *_hook_flags)
raise(SIGSEGV);
}
-int test__perf_hooks(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__perf_hooks(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int hook_flags = 0;
@@ -45,3 +45,5 @@ int test__perf_hooks(struct test *test __maybe_unused, int subtest __maybe_unuse
return TEST_FAIL;
return TEST_OK;
}
+
+DEFINE_SUITE("perf hooks", perf_hooks);
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 0df471bf1590ee..6354465067b87e 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -41,7 +41,7 @@ realloc:
return cpu;
}
-int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct record_opts opts = {
.target = {
@@ -332,3 +332,5 @@ out_delete_evlist:
out:
return (err < 0 || errs > 0) ? -1 : 0;
}
+
+DEFINE_SUITE("PERF_RECORD_* events & perf_sample fields", PERF_RECORD);
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 7c56bc1f4cff0d..d12d0ad8180107 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -23,6 +23,16 @@
#include "pmu.h"
#include "pmu-hybrid.h"
+/*
+ * Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just
+ * enable the test for x86_64/i386 and Arm64 archs.
+ */
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
+#define TSC_IS_SUPPORTED 1
+#else
+#define TSC_IS_SUPPORTED 0
+#endif
+
#define CHECK__(x) { \
while ((x) < 0) { \
pr_debug(#x " failed!\n"); \
@@ -45,7 +55,7 @@
* %0 is returned, otherwise %-1 is returned. If TSC conversion is not
* supported then then the test passes but " (not supported)" is printed.
*/
-int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct record_opts opts = {
.mmap_pages = UINT_MAX,
@@ -69,6 +79,11 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
u64 test_time, comm1_time = 0, comm2_time = 0;
struct mmap *md;
+ if (!TSC_IS_SUPPORTED) {
+ pr_debug("Test not supported on this architecture");
+ return TEST_SKIP;
+ }
+
threads = thread_map__new(-1, getpid(), UINT_MAX);
CHECK_NOT_NULL__(threads);
@@ -185,15 +200,4 @@ out_err:
return err;
}
-bool test__tsc_is_supported(void)
-{
- /*
- * Except x86_64/i386 and Arm64, other archs don't support TSC in perf.
- * Just enable the test for x86_64/i386 and Arm64 archs.
- */
-#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
- return true;
-#else
- return false;
-#endif
-}
+DEFINE_SUITE("Convert perf time to TSC", perf_time_to_tsc);
diff --git a/tools/perf/tests/pfm.c b/tools/perf/tests/pfm.c
index e8fd0da0762b81..71b76deb1f9277 100644
--- a/tools/perf/tests/pfm.c
+++ b/tools/perf/tests/pfm.c
@@ -12,27 +12,6 @@
#include <linux/kernel.h>
#ifdef HAVE_LIBPFM
-static int test__pfm_events(void);
-static int test__pfm_group(void);
-#endif
-
-static const struct {
- int (*func)(void);
- const char *desc;
-} pfm_testcase_table[] = {
-#ifdef HAVE_LIBPFM
- {
- .func = test__pfm_events,
- .desc = "test of individual --pfm-events",
- },
- {
- .func = test__pfm_group,
- .desc = "test groups of --pfm-events",
- },
-#endif
-};
-
-#ifdef HAVE_LIBPFM
static int count_pfm_events(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
@@ -44,7 +23,8 @@ static int count_pfm_events(struct perf_evlist *evlist)
return count;
}
-static int test__pfm_events(void)
+static int test__pfm_events(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
struct evlist *evlist;
struct option opt;
@@ -104,7 +84,8 @@ static int test__pfm_events(void)
return 0;
}
-static int test__pfm_group(void)
+static int test__pfm_group(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
struct evlist *evlist;
struct option opt;
@@ -187,27 +168,27 @@ static int test__pfm_group(void)
}
return 0;
}
-#endif
-
-const char *test__pfm_subtest_get_desc(int i)
-{
- if (i < 0 || i >= (int)ARRAY_SIZE(pfm_testcase_table))
- return NULL;
- return pfm_testcase_table[i].desc;
-}
-
-int test__pfm_subtest_get_nr(void)
+#else
+static int test__pfm_events(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
- return (int)ARRAY_SIZE(pfm_testcase_table);
+ return TEST_SKIP;
}
-int test__pfm(struct test *test __maybe_unused, int i __maybe_unused)
+static int test__pfm_group(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
-#ifdef HAVE_LIBPFM
- if (i < 0 || i >= (int)ARRAY_SIZE(pfm_testcase_table))
- return TEST_FAIL;
- return pfm_testcase_table[i].func();
-#else
return TEST_SKIP;
-#endif
}
+#endif
+
+static struct test_case pfm_tests[] = {
+ TEST_CASE_REASON("test of individual --pfm-events", pfm_events, "not compiled in"),
+ TEST_CASE_REASON("test groups of --pfm-events", pfm_group, "not compiled in"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__pfm = {
+ .desc = "Test libpfm4 support",
+ .test_cases = pfm_tests,
+};
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index 9ae894c406d823..df1c9a3cc05bf3 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -418,7 +418,8 @@ static int compare_alias_to_test_event(struct perf_pmu_alias *alias,
}
/* Verify generated events from pmu-events.c are as expected */
-static int test_pmu_event_table(void)
+static int test__pmu_event_table(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
const struct pmu_event *sys_event_tables = __test_pmu_get_sys_events_table();
const struct pmu_events_map *map = __test_pmu_get_events_map();
@@ -705,7 +706,8 @@ static struct perf_pmu_test_pmu test_pmus[] = {
};
/* Test that aliases generated are as expected */
-static int test_aliases(void)
+static int test__aliases(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
struct perf_pmu *pmu = NULL;
unsigned long i;
@@ -892,7 +894,8 @@ out_err:
}
-static int test_parsing(void)
+static int test__parsing(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
const struct pmu_events_map *cpus_map = pmu_events_map__find();
const struct pmu_events_map *map;
@@ -1034,7 +1037,8 @@ out:
* or all defined cpus via the 'fake_pmu'
* in parse_events.
*/
-static int test_parsing_fake(void)
+static int test__parsing_fake(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
const struct pmu_events_map *map;
const struct pmu_event *pe;
@@ -1068,55 +1072,16 @@ static int test_parsing_fake(void)
return 0;
}
-static const struct {
- int (*func)(void);
- const char *desc;
-} pmu_events_testcase_table[] = {
- {
- .func = test_pmu_event_table,
- .desc = "PMU event table sanity",
- },
- {
- .func = test_aliases,
- .desc = "PMU event map aliases",
- },
- {
- .func = test_parsing,
- .desc = "Parsing of PMU event table metrics",
- },
- {
- .func = test_parsing_fake,
- .desc = "Parsing of PMU event table metrics with fake PMUs",
- },
+static struct test_case pmu_events_tests[] = {
+ TEST_CASE("PMU event table sanity", pmu_event_table),
+ TEST_CASE("PMU event map aliases", aliases),
+ TEST_CASE_REASON("Parsing of PMU event table metrics", parsing,
+ "some metrics failed"),
+ TEST_CASE("Parsing of PMU event table metrics with fake PMUs", parsing_fake),
+ { .name = NULL, }
};
-const char *test__pmu_events_subtest_get_desc(int subtest)
-{
- if (subtest < 0 ||
- subtest >= (int)ARRAY_SIZE(pmu_events_testcase_table))
- return NULL;
- return pmu_events_testcase_table[subtest].desc;
-}
-
-const char *test__pmu_events_subtest_skip_reason(int subtest)
-{
- if (subtest < 0 ||
- subtest >= (int)ARRAY_SIZE(pmu_events_testcase_table))
- return NULL;
- if (pmu_events_testcase_table[subtest].func != test_parsing)
- return NULL;
- return "some metrics failed";
-}
-
-int test__pmu_events_subtest_get_nr(void)
-{
- return (int)ARRAY_SIZE(pmu_events_testcase_table);
-}
-
-int test__pmu_events(struct test *test __maybe_unused, int subtest)
-{
- if (subtest < 0 ||
- subtest >= (int)ARRAY_SIZE(pmu_events_testcase_table))
- return TEST_FAIL;
- return pmu_events_testcase_table[subtest].func();
-}
+struct test_suite suite__pmu_events = {
+ .desc = "PMU events",
+ .test_cases = pmu_events_tests,
+};
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 714e6830a758fa..8507bd615e97ef 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -137,7 +137,7 @@ static struct list_head *test_terms_list(void)
return &terms;
}
-int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char *format = test_format_dir_get();
LIST_HEAD(formats);
@@ -177,3 +177,5 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused)
test_format_dir_put(format);
return ret;
}
+
+DEFINE_SUITE("Parse perf pmu format", pmu);
diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c
index 98c6d474aa6f20..6b990ee385756b 100644
--- a/tools/perf/tests/python-use.c
+++ b/tools/perf/tests/python-use.c
@@ -9,7 +9,7 @@
#include "tests.h"
#include "util/debug.h"
-int test__python_use(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__python_use(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char *cmd;
int ret;
@@ -23,3 +23,5 @@ int test__python_use(struct test *test __maybe_unused, int subtest __maybe_unuse
free(cmd);
return ret;
}
+
+DEFINE_SUITE("'import perf' in python", python_use);
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index c83a1151412914..b669d22f2b1361 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -368,7 +368,7 @@ out_free:
* checks sample format bits separately and together. If the test passes %0 is
* returned, otherwise %-1 is returned.
*/
-int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
u64 sample_type;
@@ -426,3 +426,5 @@ int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_u
return 0;
}
+
+DEFINE_SUITE("Sample parsing", sample_parsing);
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
index ed76c693f65ecf..91971289925102 100644
--- a/tools/perf/tests/sdt.c
+++ b/tools/perf/tests/sdt.c
@@ -76,7 +76,7 @@ static int search_cached_probe(const char *target,
return ret;
}
-int test__sdt_event(struct test *test __maybe_unused, int subtests __maybe_unused)
+static int test__sdt_event(struct test_suite *test __maybe_unused, int subtests __maybe_unused)
{
int ret = TEST_FAIL;
char __tempdir[] = "./test-buildid-XXXXXX";
@@ -114,9 +114,11 @@ error:
return ret;
}
#else
-int test__sdt_event(struct test *test __maybe_unused, int subtests __maybe_unused)
+static int test__sdt_event(struct test_suite *test __maybe_unused, int subtests __maybe_unused)
{
pr_debug("Skip SDT event test because SDT support is not compiled\n");
return TEST_SKIP;
}
#endif
+
+DEFINE_SUITE("Probe SDT events", sdt_event);
diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
index 8a168cf8bacca2..49bd875d512278 100755
--- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh
+++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
@@ -12,7 +12,7 @@ skip_if_no_z_record() {
collect_z_record() {
echo "Collecting compressed record file:"
- [[ "$(uname -m)" != s390x ]] && gflag='-g'
+ [ "$(uname -m)" != s390x ] && gflag='-g'
$perf_tool record -o $trace_file $gflag -z -F 5000 -- \
dd count=500 if=/dev/urandom of=/dev/null
}
diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
index 2de7fd0394fdfe..b30dba455f36c6 100755
--- a/tools/perf/tests/shell/stat_all_pmu.sh
+++ b/tools/perf/tests/shell/stat_all_pmu.sh
@@ -7,11 +7,11 @@ set -e
for p in $(perf list --raw-dump pmu); do
echo "Testing $p"
result=$(perf stat -e "$p" true 2>&1)
- if [[ ! "$result" =~ "$p" ]] && [[ ! "$result" =~ "<not supported>" ]]; then
+ if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>" ; then
# We failed to see the event and it is supported. Possibly the workload was
# too small so retry with something longer.
result=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
- if [[ ! "$result" =~ "$p" ]]; then
+ if ! echo "$result" | grep -q "$p" ; then
echo "Event '$p' not printed in:"
echo "$result"
exit 1
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 2aed20dc226254..13473aeba489c3 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -23,7 +23,7 @@ compare_number()
# skip if --bpf-counters is not supported
if ! perf stat --bpf-counters true > /dev/null 2>&1; then
- if [ "$1" == "-v" ]; then
+ if [ "$1" = "-v" ]; then
echo "Skipping: --bpf-counters not supported"
perf --no-pager stat --bpf-counters true || true
fi
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
new file mode 100755
index 00000000000000..e59044edc406a1
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -0,0 +1,89 @@
+#!/bin/sh
+# Check Arm SPE trace data recording and synthesized samples
+
+# Uses the 'perf record' to record trace data of Arm SPE events;
+# then verify if any SPE event samples are generated by SPE with
+# 'perf script' and 'perf report' commands.
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2021
+
+skip_if_no_arm_spe_event() {
+ perf list | egrep -q 'arm_spe_[0-9]+//' && return 0
+
+ # arm_spe event doesn't exist
+ return 2
+}
+
+skip_if_no_arm_spe_event || exit 2
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+glb_err=0
+
+cleanup_files()
+{
+ rm -f ${perfdata}
+ exit $glb_err
+}
+
+trap cleanup_files exit term int
+
+arm_spe_report() {
+ if [ $2 != 0 ]; then
+ echo "$1: FAIL"
+ glb_err=$2
+ else
+ echo "$1: PASS"
+ fi
+}
+
+perf_script_samples() {
+ echo "Looking at perf.data file for dumping samples:"
+
+ # from arm-spe.c/arm_spe_synth_events()
+ events="(ld1-miss|ld1-access|llc-miss|lld-access|tlb-miss|tlb-access|branch-miss|remote-access|memory)"
+
+ # Below is an example of the samples dumping:
+ # dd 3048 [002] 1 l1d-access: ffffaa64999c __GI___libc_write+0x3c (/lib/aarch64-linux-gnu/libc-2.27.so)
+ # dd 3048 [002] 1 tlb-access: ffffaa64999c __GI___libc_write+0x3c (/lib/aarch64-linux-gnu/libc-2.27.so)
+ # dd 3048 [002] 1 memory: ffffaa64999c __GI___libc_write+0x3c (/lib/aarch64-linux-gnu/libc-2.27.so)
+ perf script -F,-time -i ${perfdata} 2>&1 | \
+ egrep " +$1 +[0-9]+ .* +${events}:(.*:)? +" > /dev/null 2>&1
+}
+
+perf_report_samples() {
+ echo "Looking at perf.data file for reporting samples:"
+
+ # Below is an example of the samples reporting:
+ # 73.04% 73.04% dd libc-2.27.so [.] _dl_addr
+ # 7.71% 7.71% dd libc-2.27.so [.] getenv
+ # 2.59% 2.59% dd ld-2.27.so [.] strcmp
+ perf report --stdio -i ${perfdata} 2>&1 | \
+ egrep " +[0-9]+\.[0-9]+% +[0-9]+\.[0-9]+% +$1 " > /dev/null 2>&1
+}
+
+arm_spe_snapshot_test() {
+ echo "Recording trace with snapshot mode $perfdata"
+ perf record -o ${perfdata} -e arm_spe// -S \
+ -- dd if=/dev/zero of=/dev/null > /dev/null 2>&1 &
+ PERFPID=$!
+
+ # Wait for perf program
+ sleep 1
+
+ # Send signal to snapshot trace data
+ kill -USR2 $PERFPID
+
+ # Stop perf program
+ kill $PERFPID
+ wait $PERFPID
+
+ perf_script_samples dd &&
+ perf_report_samples dd
+
+ err=$?
+ arm_spe_report "SPE snapshot testing" $err
+}
+
+arm_spe_snapshot_test
+exit $glb_err
diff --git a/tools/perf/tests/stat.c b/tools/perf/tests/stat.c
index c1911501c39c30..2eb096b5e6dab6 100644
--- a/tools/perf/tests/stat.c
+++ b/tools/perf/tests/stat.c
@@ -47,7 +47,8 @@ static int process_stat_config_event(struct perf_tool *tool __maybe_unused,
return 0;
}
-int test__synthesize_stat_config(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__synthesize_stat_config(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
struct perf_stat_config stat_config = {
.aggr_mode = AGGR_CORE,
@@ -77,7 +78,7 @@ static int process_stat_event(struct perf_tool *tool __maybe_unused,
return 0;
}
-int test__synthesize_stat(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__synthesize_stat(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_counts_values count;
@@ -103,7 +104,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
return 0;
}
-int test__synthesize_stat_round(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__synthesize_stat_round(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
TEST_ASSERT_VAL("failed to synthesize stat_config",
!perf_event__synthesize_stat_round(NULL, 0xdeadbeef, PERF_STAT_ROUND_TYPE__INTERVAL,
@@ -111,3 +112,7 @@ int test__synthesize_stat_round(struct test *test __maybe_unused, int subtest __
return 0;
}
+
+DEFINE_SUITE("Synthesize stat config", synthesize_stat_config);
+DEFINE_SUITE("Synthesize stat", synthesize_stat);
+DEFINE_SUITE("Synthesize stat round", synthesize_stat_round);
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 74988846be1da9..9cd6fec375eeb7 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -133,7 +133,7 @@ out_delete_evlist:
return err;
}
-int test__sw_clock_freq(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__sw_clock_freq(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret;
@@ -143,3 +143,5 @@ int test__sw_clock_freq(struct test *test __maybe_unused, int subtest __maybe_un
return ret;
}
+
+DEFINE_SUITE("Software clock events period values", sw_clock_freq);
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 62c0ec21aaa86e..0c0c2328bf4e6e 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -321,7 +321,7 @@ out_free_nodes:
* evsel->core.system_wide and evsel->tracking flags (respectively) with other events
* sometimes enabled or disabled.
*/
-int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__switch_tracking(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
const char *sched_switch = "sched:sched_switch";
struct switch_tracking switch_tracking = { .tids = NULL, };
@@ -588,3 +588,5 @@ out_err:
err = -1;
goto out;
}
+
+DEFINE_SUITE("Track with sched_switch", switch_tracking);
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 4c2969db59b077..25f075fa9125bd 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -39,7 +39,7 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
* if the number of exit event reported by the kernel is 1 or not
* in order to check the kernel returns correct number of event.
*/
-int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__task_exit(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int err = -1;
union perf_event *event;
@@ -151,3 +151,5 @@ out_delete_evlist:
evlist__delete(evlist);
return err;
}
+
+DEFINE_SUITE("Number of exit events of a simple workload", task_exit);
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index fe1306f584958d..8f65098110fcf1 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -27,112 +27,146 @@ enum {
TEST_SKIP = -2,
};
-struct test {
+struct test_suite;
+
+typedef int (*test_fnptr)(struct test_suite *, int);
+
+struct test_case {
+ const char *name;
+ const char *desc;
+ const char *skip_reason;
+ test_fnptr run_case;
+};
+
+struct test_suite {
const char *desc;
- int (*func)(struct test *test, int subtest);
- struct {
- bool skip_if_fail;
- int (*get_nr)(void);
- const char *(*get_desc)(int subtest);
- const char *(*skip_reason)(int subtest);
- } subtest;
- bool (*is_supported)(void);
+ struct test_case *test_cases;
void *priv;
};
+#define DECLARE_SUITE(name) \
+ extern struct test_suite suite__##name;
+
+#define TEST_CASE(description, _name) \
+ { \
+ .name = #_name, \
+ .desc = description, \
+ .run_case = test__##_name, \
+ }
+
+#define TEST_CASE_REASON(description, _name, _reason) \
+ { \
+ .name = #_name, \
+ .desc = description, \
+ .run_case = test__##_name, \
+ .skip_reason = _reason, \
+ }
+
+#define DEFINE_SUITE(description, _name) \
+ struct test_case tests__##_name[] = { \
+ TEST_CASE(description, _name), \
+ { .name = NULL, } \
+ }; \
+ struct test_suite suite__##_name = { \
+ .desc = description, \
+ .test_cases = tests__##_name, \
+ }
+
/* Tests */
-int test__vmlinux_matches_kallsyms(struct test *test, int subtest);
-int test__openat_syscall_event(struct test *test, int subtest);
-int test__openat_syscall_event_on_all_cpus(struct test *test, int subtest);
-int test__basic_mmap(struct test *test, int subtest);
-int test__PERF_RECORD(struct test *test, int subtest);
-int test__perf_evsel__roundtrip_name_test(struct test *test, int subtest);
-int test__perf_evsel__tp_sched_test(struct test *test, int subtest);
-int test__syscall_openat_tp_fields(struct test *test, int subtest);
-int test__pmu(struct test *test, int subtest);
-int test__pmu_events(struct test *test, int subtest);
-const char *test__pmu_events_subtest_get_desc(int subtest);
-const char *test__pmu_events_subtest_skip_reason(int subtest);
-int test__pmu_events_subtest_get_nr(void);
-int test__attr(struct test *test, int subtest);
-int test__dso_data(struct test *test, int subtest);
-int test__dso_data_cache(struct test *test, int subtest);
-int test__dso_data_reopen(struct test *test, int subtest);
-int test__parse_events(struct test *test, int subtest);
-int test__hists_link(struct test *test, int subtest);
-int test__python_use(struct test *test, int subtest);
-int test__bp_signal(struct test *test, int subtest);
-int test__bp_signal_overflow(struct test *test, int subtest);
-int test__bp_accounting(struct test *test, int subtest);
-int test__wp(struct test *test, int subtest);
-const char *test__wp_subtest_get_desc(int subtest);
-const char *test__wp_subtest_skip_reason(int subtest);
-int test__wp_subtest_get_nr(void);
-int test__task_exit(struct test *test, int subtest);
-int test__mem(struct test *test, int subtest);
-int test__sw_clock_freq(struct test *test, int subtest);
-int test__code_reading(struct test *test, int subtest);
-int test__sample_parsing(struct test *test, int subtest);
-int test__keep_tracking(struct test *test, int subtest);
-int test__parse_no_sample_id_all(struct test *test, int subtest);
-int test__dwarf_unwind(struct test *test, int subtest);
-int test__expr(struct test *test, int subtest);
-int test__hists_filter(struct test *test, int subtest);
-int test__mmap_thread_lookup(struct test *test, int subtest);
-int test__thread_maps_share(struct test *test, int subtest);
-int test__hists_output(struct test *test, int subtest);
-int test__hists_cumulate(struct test *test, int subtest);
-int test__switch_tracking(struct test *test, int subtest);
-int test__fdarray__filter(struct test *test, int subtest);
-int test__fdarray__add(struct test *test, int subtest);
-int test__kmod_path__parse(struct test *test, int subtest);
-int test__thread_map(struct test *test, int subtest);
-int test__llvm(struct test *test, int subtest);
-const char *test__llvm_subtest_get_desc(int subtest);
-int test__llvm_subtest_get_nr(void);
-int test__bpf(struct test *test, int subtest);
-const char *test__bpf_subtest_get_desc(int subtest);
-int test__bpf_subtest_get_nr(void);
-int test__session_topology(struct test *test, int subtest);
-int test__thread_map_synthesize(struct test *test, int subtest);
-int test__thread_map_remove(struct test *test, int subtest);
-int test__cpu_map_synthesize(struct test *test, int subtest);
-int test__synthesize_stat_config(struct test *test, int subtest);
-int test__synthesize_stat(struct test *test, int subtest);
-int test__synthesize_stat_round(struct test *test, int subtest);
-int test__event_update(struct test *test, int subtest);
-int test__event_times(struct test *test, int subtest);
-int test__backward_ring_buffer(struct test *test, int subtest);
-int test__cpu_map_print(struct test *test, int subtest);
-int test__cpu_map_merge(struct test *test, int subtest);
-int test__sdt_event(struct test *test, int subtest);
-int test__is_printable_array(struct test *test, int subtest);
-int test__bitmap_print(struct test *test, int subtest);
-int test__perf_hooks(struct test *test, int subtest);
-int test__clang(struct test *test, int subtest);
-const char *test__clang_subtest_get_desc(int subtest);
-int test__clang_subtest_get_nr(void);
-int test__unit_number__scnprint(struct test *test, int subtest);
-int test__mem2node(struct test *t, int subtest);
-int test__maps__merge_in(struct test *t, int subtest);
-int test__time_utils(struct test *t, int subtest);
-int test__jit_write_elf(struct test *test, int subtest);
-int test__api_io(struct test *test, int subtest);
-int test__demangle_java(struct test *test, int subtest);
-int test__demangle_ocaml(struct test *test, int subtest);
-int test__pfm(struct test *test, int subtest);
-const char *test__pfm_subtest_get_desc(int subtest);
-int test__pfm_subtest_get_nr(void);
-int test__parse_metric(struct test *test, int subtest);
-int test__pe_file_parsing(struct test *test, int subtest);
-int test__expand_cgroup_events(struct test *test, int subtest);
-int test__perf_time_to_tsc(struct test *test, int subtest);
-int test__dlfilter(struct test *test, int subtest);
-
-bool test__bp_signal_is_supported(void);
-bool test__bp_account_is_supported(void);
-bool test__wp_is_supported(void);
-bool test__tsc_is_supported(void);
+DECLARE_SUITE(vmlinux_matches_kallsyms);
+DECLARE_SUITE(openat_syscall_event);
+DECLARE_SUITE(openat_syscall_event_on_all_cpus);
+DECLARE_SUITE(basic_mmap);
+DECLARE_SUITE(PERF_RECORD);
+DECLARE_SUITE(perf_evsel__roundtrip_name_test);
+DECLARE_SUITE(perf_evsel__tp_sched_test);
+DECLARE_SUITE(syscall_openat_tp_fields);
+DECLARE_SUITE(pmu);
+DECLARE_SUITE(pmu_events);
+DECLARE_SUITE(attr);
+DECLARE_SUITE(dso_data);
+DECLARE_SUITE(dso_data_cache);
+DECLARE_SUITE(dso_data_reopen);
+DECLARE_SUITE(parse_events);
+DECLARE_SUITE(hists_link);
+DECLARE_SUITE(python_use);
+DECLARE_SUITE(bp_signal);
+DECLARE_SUITE(bp_signal_overflow);
+DECLARE_SUITE(bp_accounting);
+DECLARE_SUITE(wp);
+DECLARE_SUITE(task_exit);
+DECLARE_SUITE(mem);
+DECLARE_SUITE(sw_clock_freq);
+DECLARE_SUITE(code_reading);
+DECLARE_SUITE(sample_parsing);
+DECLARE_SUITE(keep_tracking);
+DECLARE_SUITE(parse_no_sample_id_all);
+DECLARE_SUITE(dwarf_unwind);
+DECLARE_SUITE(expr);
+DECLARE_SUITE(hists_filter);
+DECLARE_SUITE(mmap_thread_lookup);
+DECLARE_SUITE(thread_maps_share);
+DECLARE_SUITE(hists_output);
+DECLARE_SUITE(hists_cumulate);
+DECLARE_SUITE(switch_tracking);
+DECLARE_SUITE(fdarray__filter);
+DECLARE_SUITE(fdarray__add);
+DECLARE_SUITE(kmod_path__parse);
+DECLARE_SUITE(thread_map);
+DECLARE_SUITE(llvm);
+DECLARE_SUITE(bpf);
+DECLARE_SUITE(session_topology);
+DECLARE_SUITE(thread_map_synthesize);
+DECLARE_SUITE(thread_map_remove);
+DECLARE_SUITE(cpu_map_synthesize);
+DECLARE_SUITE(synthesize_stat_config);
+DECLARE_SUITE(synthesize_stat);
+DECLARE_SUITE(synthesize_stat_round);
+DECLARE_SUITE(event_update);
+DECLARE_SUITE(event_times);
+DECLARE_SUITE(backward_ring_buffer);
+DECLARE_SUITE(cpu_map_print);
+DECLARE_SUITE(cpu_map_merge);
+DECLARE_SUITE(sdt_event);
+DECLARE_SUITE(is_printable_array);
+DECLARE_SUITE(bitmap_print);
+DECLARE_SUITE(perf_hooks);
+DECLARE_SUITE(clang);
+DECLARE_SUITE(unit_number__scnprint);
+DECLARE_SUITE(mem2node);
+DECLARE_SUITE(maps__merge_in);
+DECLARE_SUITE(time_utils);
+DECLARE_SUITE(jit_write_elf);
+DECLARE_SUITE(api_io);
+DECLARE_SUITE(demangle_java);
+DECLARE_SUITE(demangle_ocaml);
+DECLARE_SUITE(pfm);
+DECLARE_SUITE(parse_metric);
+DECLARE_SUITE(pe_file_parsing);
+DECLARE_SUITE(expand_cgroup_events);
+DECLARE_SUITE(perf_time_to_tsc);
+DECLARE_SUITE(dlfilter);
+
+/*
+ * PowerPC and S390 do not support creation of instruction breakpoints using the
+ * perf_event interface.
+ *
+ * ARM requires explicit rounding down of the instruction pointer in Thumb mode,
+ * and then requires the single-step to be handled explicitly in the overflow
+ * handler to avoid stepping into the SIGIO handler and getting stuck on the
+ * breakpointed instruction.
+ *
+ * Since arm64 has the same issue with arm for the single-step handling, this
+ * case also gets stuck on the breakpointed instruction.
+ *
+ * Just disable the test for these architectures until these issues are
+ * resolved.
+ */
+#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__) || defined(__aarch64__)
+#define BP_SIGNAL_IS_SUPPORTED 0
+#else
+#define BP_SIGNAL_IS_SUPPORTED 1
+#endif
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
@@ -142,7 +176,7 @@ int test__arch_unwind_sample(struct perf_sample *sample,
#endif
#if defined(__arm__)
-int test__vectors_page(struct test *test, int subtest);
+DECLARE_SUITE(vectors_page);
#endif
#endif /* TESTS_H */
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index d1e208b4a571c9..e413c1387fcba3 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -19,7 +19,7 @@ struct machine;
#define NAME (const char *) "perf"
#define NAMEUL (unsigned long) NAME
-int test__thread_map(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__thread_map(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_thread_map *map;
@@ -86,7 +86,7 @@ static int process_event(struct perf_tool *tool __maybe_unused,
return 0;
}
-int test__thread_map_synthesize(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__thread_map_synthesize(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_thread_map *threads;
@@ -106,7 +106,7 @@ int test__thread_map_synthesize(struct test *test __maybe_unused, int subtest __
return 0;
}
-int test__thread_map_remove(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__thread_map_remove(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct perf_thread_map *threads;
char *str;
@@ -145,3 +145,7 @@ int test__thread_map_remove(struct test *test __maybe_unused, int subtest __mayb
perf_thread_map__put(threads);
return 0;
}
+
+DEFINE_SUITE("Thread map", thread_map);
+DEFINE_SUITE("Synthesize thread map", thread_map_synthesize);
+DEFINE_SUITE("Remove thread map", thread_map_remove);
diff --git a/tools/perf/tests/thread-maps-share.c b/tools/perf/tests/thread-maps-share.c
index 9371484973f24d..84edd82c519e2f 100644
--- a/tools/perf/tests/thread-maps-share.c
+++ b/tools/perf/tests/thread-maps-share.c
@@ -4,7 +4,7 @@
#include "thread.h"
#include "debug.h"
-int test__thread_maps_share(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__thread_maps_share(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct machines machines;
struct machine *machine;
@@ -96,3 +96,5 @@ int test__thread_maps_share(struct test *test __maybe_unused, int subtest __mayb
machines__exit(&machines);
return 0;
}
+
+DEFINE_SUITE("Share thread maps", thread_maps_share);
diff --git a/tools/perf/tests/time-utils-test.c b/tools/perf/tests/time-utils-test.c
index fe57ca3b6e543c..38df10373c1e17 100644
--- a/tools/perf/tests/time-utils-test.c
+++ b/tools/perf/tests/time-utils-test.c
@@ -131,7 +131,7 @@ out:
return pass;
}
-int test__time_utils(struct test *t __maybe_unused, int subtest __maybe_unused)
+static int test__time_utils(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
bool pass = true;
@@ -249,3 +249,5 @@ int test__time_utils(struct test *t __maybe_unused, int subtest __maybe_unused)
return pass ? 0 : TEST_FAIL;
}
+
+DEFINE_SUITE("time utils", time_utils);
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index 4574c46260d9a7..86998613914606 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -175,7 +175,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
return 0;
}
-int test__session_topology(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__session_topology(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char path[PATH_MAX];
struct perf_cpu_map *map;
@@ -201,3 +201,5 @@ free_path:
unlink(path);
return ret;
}
+
+DEFINE_SUITE("Session topology", session_topology);
diff --git a/tools/perf/tests/unit_number__scnprintf.c b/tools/perf/tests/unit_number__scnprintf.c
index 3721757435da5e..88bcada1c78fa0 100644
--- a/tools/perf/tests/unit_number__scnprintf.c
+++ b/tools/perf/tests/unit_number__scnprintf.c
@@ -7,7 +7,7 @@
#include "units.h"
#include "debug.h"
-int test__unit_number__scnprint(struct test *t __maybe_unused, int subtest __maybe_unused)
+static int test__unit_number__scnprint(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
struct {
u64 n;
@@ -38,3 +38,5 @@ int test__unit_number__scnprint(struct test *t __maybe_unused, int subtest __may
return TEST_OK;
}
+
+DEFINE_SUITE("unit_number__scnprintf", unit_number__scnprint);
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 4f884aabc7f4ba..e80df13c04208d 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -111,7 +111,8 @@ static bool is_ignored_symbol(const char *name, char type)
return false;
}
-int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest __maybe_unused)
+static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
int err = -1;
struct rb_node *nd;
@@ -352,3 +353,5 @@ out:
machine__exit(&vmlinux);
return err;
}
+
+DEFINE_SUITE("vmlinux symtab matches kallsyms", vmlinux_matches_kallsyms);
diff --git a/tools/perf/tests/wp.c b/tools/perf/tests/wp.c
index 9387fa76faa503..820d942b30c390 100644
--- a/tools/perf/tests/wp.c
+++ b/tools/perf/tests/wp.c
@@ -48,6 +48,7 @@ static void get__perf_event_attr(struct perf_event_attr *attr, int wp_type,
attr->exclude_hv = 1;
}
+#ifndef __s390x__
static int __event(int wp_type, void *wp_addr, unsigned long wp_len)
{
int fd;
@@ -61,9 +62,14 @@ static int __event(int wp_type, void *wp_addr, unsigned long wp_len)
return fd;
}
+#endif
-static int wp_ro_test(void)
+static int test__wp_ro(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
+#if defined(__s390x__) || defined(__x86_64__) || defined(__i386__)
+ return TEST_SKIP;
+#else
int fd;
unsigned long tmp, tmp1 = rand();
@@ -79,10 +85,15 @@ static int wp_ro_test(void)
close(fd);
return 0;
+#endif
}
-static int wp_wo_test(void)
+static int test__wp_wo(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
+#if defined(__s390x__)
+ return TEST_SKIP;
+#else
int fd;
unsigned long tmp, tmp1 = rand();
@@ -98,10 +109,15 @@ static int wp_wo_test(void)
close(fd);
return 0;
+#endif
}
-static int wp_rw_test(void)
+static int test__wp_rw(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
+#if defined(__s390x__)
+ return TEST_SKIP;
+#else
int fd;
unsigned long tmp, tmp1 = rand();
@@ -118,10 +134,15 @@ static int wp_rw_test(void)
close(fd);
return 0;
+#endif
}
-static int wp_modify_test(void)
+static int test__wp_modify(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
+#if defined(__s390x__)
+ return TEST_SKIP;
+#else
int fd, ret;
unsigned long tmp = rand();
struct perf_event_attr new_attr;
@@ -163,93 +184,18 @@ static int wp_modify_test(void)
close(fd);
return 0;
-}
-
-static bool wp_ro_supported(void)
-{
-#if defined (__x86_64__) || defined (__i386__)
- return false;
-#else
- return true;
-#endif
-}
-
-static const char *wp_ro_skip_msg(void)
-{
-#if defined (__x86_64__) || defined (__i386__)
- return "missing hardware support";
-#else
- return NULL;
#endif
}
-static struct {
- const char *desc;
- int (*target_func)(void);
- bool (*is_supported)(void);
- const char *(*skip_msg)(void);
-} wp_testcase_table[] = {
- {
- .desc = "Read Only Watchpoint",
- .target_func = &wp_ro_test,
- .is_supported = &wp_ro_supported,
- .skip_msg = &wp_ro_skip_msg,
- },
- {
- .desc = "Write Only Watchpoint",
- .target_func = &wp_wo_test,
- },
- {
- .desc = "Read / Write Watchpoint",
- .target_func = &wp_rw_test,
- },
- {
- .desc = "Modify Watchpoint",
- .target_func = &wp_modify_test,
- },
+static struct test_case wp_tests[] = {
+ TEST_CASE_REASON("Read Only Watchpoint", wp_ro, "missing hardware support"),
+ TEST_CASE_REASON("Write Only Watchpoint", wp_wo, "missing hardware support"),
+ TEST_CASE_REASON("Read / Write Watchpoint", wp_rw, "missing hardware support"),
+ TEST_CASE_REASON("Modify Watchpoint", wp_modify, "missing hardware support"),
+ { .name = NULL, }
};
-int test__wp_subtest_get_nr(void)
-{
- return (int)ARRAY_SIZE(wp_testcase_table);
-}
-
-const char *test__wp_subtest_get_desc(int i)
-{
- if (i < 0 || i >= (int)ARRAY_SIZE(wp_testcase_table))
- return NULL;
- return wp_testcase_table[i].desc;
-}
-
-const char *test__wp_subtest_skip_reason(int i)
-{
- if (i < 0 || i >= (int)ARRAY_SIZE(wp_testcase_table))
- return NULL;
- if (!wp_testcase_table[i].skip_msg)
- return NULL;
- return wp_testcase_table[i].skip_msg();
-}
-
-int test__wp(struct test *test __maybe_unused, int i)
-{
- if (i < 0 || i >= (int)ARRAY_SIZE(wp_testcase_table))
- return TEST_FAIL;
-
- if (wp_testcase_table[i].is_supported &&
- !wp_testcase_table[i].is_supported())
- return TEST_SKIP;
-
- return !wp_testcase_table[i].target_func() ? TEST_OK : TEST_FAIL;
-}
-
-/* The s390 so far does not have support for
- * instruction breakpoint using the perf_event_open() system call.
- */
-bool test__wp_is_supported(void)
-{
-#if defined(__s390x__)
- return false;
-#else
- return true;
-#endif
-}
+struct test_suite suite__wp = {
+ .desc = "Watchpoint",
+ .test_cases = wp_tests,
+};
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index d6dfe68a761255..f527a46ab4e728 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -62,6 +62,8 @@ size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_
extern struct strarray strarray__socket_families;
+extern struct strarray strarray__socket_level;
+
/**
* augmented_arg: extra payload for syscall pointer arguments
@@ -230,6 +232,9 @@ size_t syscall_arg__scnprintf_sockaddr(char *bf, size_t size, struct syscall_arg
size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_SK_PROTO syscall_arg__scnprintf_socket_protocol
+size_t syscall_arg__scnprintf_socket_level(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_SK_LEVEL syscall_arg__scnprintf_socket_level
+
size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags
diff --git a/tools/perf/trace/beauty/sockaddr.c b/tools/perf/trace/beauty/sockaddr.c
index cd110634ab0994..2e0e867c0c1b87 100644
--- a/tools/perf/trace/beauty/sockaddr.c
+++ b/tools/perf/trace/beauty/sockaddr.c
@@ -7,7 +7,7 @@
#include <sys/un.h>
#include <arpa/inet.h>
-#include "trace/beauty/generated/socket_arrays.c"
+#include "trace/beauty/generated/sockaddr.c"
DEFINE_STRARRAY(socket_families, "PF_");
static size_t af_inet__scnprintf(struct sockaddr *sa, char *bf, size_t size)
diff --git a/tools/perf/trace/beauty/sockaddr.sh b/tools/perf/trace/beauty/sockaddr.sh
new file mode 100755
index 00000000000000..3820e5c8229313
--- /dev/null
+++ b/tools/perf/trace/beauty/sockaddr.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
+
+# This one uses a copy from the kernel sources headers that is in a
+# place used just for these tools/perf/beauty/ usage, we shouldn't not
+# put it in tools/include/linux otherwise they would be used in the
+# normal compiler building process and would drag needless stuff from the
+# kernel.
+
+# When what these scripts need is already in tools/include/ then use it,
+# otherwise grab and check the copy from the kernel sources just for these
+# string table building scripts.
+
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/perf/trace/beauty/include/linux/
+
+printf "static const char *socket_families[] = {\n"
+# #define AF_LOCAL 1 /* POSIX name for AF_UNIX */
+regex='^#define[[:space:]]+AF_(\w+)[[:space:]]+([[:digit:]]+).*'
+
+egrep $regex ${header_dir}/socket.h | \
+ sed -r "s/$regex/\2 \1/g" | \
+ xargs printf "\t[%s] = \"%s\",\n" | \
+ egrep -v "\"(UNIX|MAX)\""
+printf "};\n"
diff --git a/tools/perf/trace/beauty/socket.c b/tools/perf/trace/beauty/socket.c
index f23a3dda29026b..b0870c7b48e521 100644
--- a/tools/perf/trace/beauty/socket.c
+++ b/tools/perf/trace/beauty/socket.c
@@ -9,9 +9,10 @@
#include <sys/types.h>
#include <sys/socket.h>
+#include "trace/beauty/generated/socket.c"
+
static size_t socket__scnprintf_ipproto(int protocol, char *bf, size_t size, bool show_prefix)
{
-#include "trace/beauty/generated/socket_ipproto_array.c"
static DEFINE_STRARRAY(socket_ipproto, "IPPROTO_");
return strarray__scnprintf(&strarray__socket_ipproto, bf, size, "%d", show_prefix, protocol);
@@ -26,3 +27,21 @@ size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct sysc
return syscall_arg__scnprintf_int(bf, size, arg);
}
+
+static size_t socket__scnprintf_level(int level, char *bf, size_t size, bool show_prefix)
+{
+#if defined(__alpha__) || defined(__hppa__) || defined(__mips__) || defined(__sparc__)
+ const int sol_socket = 0xffff;
+#else
+ const int sol_socket = 1;
+#endif
+ if (level == sol_socket)
+ return scnprintf(bf, size, "%sSOCKET", show_prefix ? "SOL_" : "");
+
+ return strarray__scnprintf(&strarray__socket_level, bf, size, "%d", show_prefix, level);
+}
+
+size_t syscall_arg__scnprintf_socket_level(char *bf, size_t size, struct syscall_arg *arg)
+{
+ return socket__scnprintf_level(arg->val, bf, size, arg->show_string_prefix);
+}
diff --git a/tools/perf/trace/beauty/socket.sh b/tools/perf/trace/beauty/socket.sh
index 3820e5c8229313..76330acb27e585 100755
--- a/tools/perf/trace/beauty/socket.sh
+++ b/tools/perf/trace/beauty/socket.sh
@@ -1,24 +1,28 @@
#!/bin/sh
# SPDX-License-Identifier: LGPL-2.1
-# This one uses a copy from the kernel sources headers that is in a
-# place used just for these tools/perf/beauty/ usage, we shouldn't not
-# put it in tools/include/linux otherwise they would be used in the
-# normal compiler building process and would drag needless stuff from the
-# kernel.
+if [ $# -gt 0 ] ; then
+ uapi_header_dir=$1
+ beauty_header_dir=$2
+else
+ uapi_header_dir=tools/include/uapi/linux/
+ beauty_header_dir=tools/perf/trace/beauty/include/linux/
+fi
-# When what these scripts need is already in tools/include/ then use it,
-# otherwise grab and check the copy from the kernel sources just for these
-# string table building scripts.
+printf "static const char *socket_ipproto[] = {\n"
+ipproto_regex='^[[:space:]]+IPPROTO_(\w+)[[:space:]]+=[[:space:]]+([[:digit:]]+),.*'
-[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/perf/trace/beauty/include/linux/
+egrep $ipproto_regex ${uapi_header_dir}/in.h | \
+ sed -r "s/$ipproto_regex/\2 \1/g" | \
+ sort -n | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n\n"
-printf "static const char *socket_families[] = {\n"
-# #define AF_LOCAL 1 /* POSIX name for AF_UNIX */
-regex='^#define[[:space:]]+AF_(\w+)[[:space:]]+([[:digit:]]+).*'
+printf "static const char *socket_level[] = {\n"
+socket_level_regex='^#define[[:space:]]+SOL_(\w+)[[:space:]]+([[:digit:]]+)([[:space:]]+\/.*)?'
-egrep $regex ${header_dir}/socket.h | \
- sed -r "s/$regex/\2 \1/g" | \
- xargs printf "\t[%s] = \"%s\",\n" | \
- egrep -v "\"(UNIX|MAX)\""
-printf "};\n"
+egrep $socket_level_regex ${beauty_header_dir}/socket.h | \
+ sed -r "s/$socket_level_regex/\2 \1/g" | \
+ sort -n | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n\n"
+
+printf 'DEFINE_STRARRAY(socket_level, "SOL_");\n'
diff --git a/tools/perf/trace/beauty/socket_ipproto.sh b/tools/perf/trace/beauty/socket_ipproto.sh
deleted file mode 100755
index de0f2f29017f2e..00000000000000
--- a/tools/perf/trace/beauty/socket_ipproto.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: LGPL-2.1
-
-[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
-
-printf "static const char *socket_ipproto[] = {\n"
-regex='^[[:space:]]+IPPROTO_(\w+)[[:space:]]+=[[:space:]]+([[:digit:]]+),.*'
-
-egrep $regex ${header_dir}/in.h | \
- sed -r "s/$regex/\2 \1/g" | \
- sort | xargs printf "\t[%s] = \"%s\",\n"
-printf "};\n"
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 8511af55fc3a2a..01900689dc0017 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1255,6 +1255,17 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r
return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
}
+void annotation__init(struct annotation *notes)
+{
+ pthread_mutex_init(&notes->lock, NULL);
+}
+
+void annotation__exit(struct annotation *notes)
+{
+ annotated_source__delete(notes->src);
+ pthread_mutex_destroy(&notes->lock);
+}
+
static void annotation_line__add(struct annotation_line *al, struct list_head *head)
{
list_add_tail(&al->node, head);
@@ -3132,7 +3143,7 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
notes->nr_events = nr_pcnt;
annotation__update_column_widths(notes);
- sym->annotate2 = true;
+ sym->annotate2 = 1;
return 0;
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 3757416bcf46b0..986f2bbe4870ae 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -299,6 +299,9 @@ struct annotation {
struct annotated_source *src;
};
+void annotation__init(struct annotation *notes);
+void annotation__exit(struct annotation *notes);
+
static inline int annotation__cycles_width(struct annotation *notes)
{
if (notes->have_cycles && notes->options->show_minmax_cycle)
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
index 32fe41835fa686..3fc528c9270c2b 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
@@ -151,6 +151,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
u64 payload, ip;
memset(&decoder->record, 0x0, sizeof(decoder->record));
+ decoder->record.context_id = (u64)-1;
while (1) {
err = arm_spe_get_next_packet(decoder);
@@ -180,6 +181,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
case ARM_SPE_COUNTER:
break;
case ARM_SPE_CONTEXT:
+ decoder->record.context_id = payload;
break;
case ARM_SPE_OP_TYPE:
if (idx == SPE_OP_PKT_HDR_CLASS_LD_ST_ATOMIC) {
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
index 59bdb730967413..46a8556a9e956a 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
@@ -38,6 +38,7 @@ struct arm_spe_record {
u64 timestamp;
u64 virt_addr;
u64 phys_addr;
+ u64 context_id;
};
struct arm_spe_insn;
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 58b7069c5a5f83..4748bcfe61de4e 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -71,6 +71,7 @@ struct arm_spe {
u64 kernel_start;
unsigned long num_events;
+ u8 use_ctx_pkt_for_pid;
};
struct arm_spe_queue {
@@ -100,7 +101,7 @@ static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
const char *color = PERF_COLOR_BLUE;
color_fprintf(stdout, color,
- ". ... ARM SPE data: size %zu bytes\n",
+ ". ... ARM SPE data: size %#zx bytes\n",
len);
while (len) {
@@ -226,6 +227,44 @@ static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
PERF_RECORD_MISC_USER;
}
+static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
+ struct auxtrace_queue *queue)
+{
+ struct arm_spe_queue *speq = queue->priv;
+ pid_t tid;
+
+ tid = machine__get_current_tid(spe->machine, speq->cpu);
+ if (tid != -1) {
+ speq->tid = tid;
+ thread__zput(speq->thread);
+ } else
+ speq->tid = queue->tid;
+
+ if ((!speq->thread) && (speq->tid != -1)) {
+ speq->thread = machine__find_thread(spe->machine, -1,
+ speq->tid);
+ }
+
+ if (speq->thread) {
+ speq->pid = speq->thread->pid_;
+ if (queue->cpu == -1)
+ speq->cpu = speq->thread->cpu;
+ }
+}
+
+static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
+{
+ struct arm_spe *spe = speq->spe;
+ int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
+
+ if (err)
+ return err;
+
+ arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
+
+ return 0;
+}
+
static void arm_spe_prep_sample(struct arm_spe *spe,
struct arm_spe_queue *speq,
union perf_event *event,
@@ -460,6 +499,19 @@ static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
* can correlate samples between Arm SPE trace data and other
* perf events with correct time ordering.
*/
+
+ /*
+ * Update pid/tid info.
+ */
+ record = &speq->decoder->record;
+ if (!spe->timeless_decoding && record->context_id != (u64)-1) {
+ ret = arm_spe_set_tid(speq, record->context_id);
+ if (ret)
+ return ret;
+
+ spe->use_ctx_pkt_for_pid = true;
+ }
+
ret = arm_spe_sample(speq);
if (ret)
return ret;
@@ -586,31 +638,6 @@ static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
return timeless_decoding;
}
-static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
- struct auxtrace_queue *queue)
-{
- struct arm_spe_queue *speq = queue->priv;
- pid_t tid;
-
- tid = machine__get_current_tid(spe->machine, speq->cpu);
- if (tid != -1) {
- speq->tid = tid;
- thread__zput(speq->thread);
- } else
- speq->tid = queue->tid;
-
- if ((!speq->thread) && (speq->tid != -1)) {
- speq->thread = machine__find_thread(spe->machine, -1,
- speq->tid);
- }
-
- if (speq->thread) {
- speq->pid = speq->thread->pid_;
- if (queue->cpu == -1)
- speq->cpu = speq->thread->cpu;
- }
-}
-
static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
{
unsigned int queue_nr;
@@ -641,7 +668,12 @@ static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
ts = timestamp;
}
- arm_spe_set_pid_tid_cpu(spe, queue);
+ /*
+ * A previous context-switch event has set pid/tid in the machine's context, so
+ * here we need to update the pid/tid in the thread and SPE queue.
+ */
+ if (!spe->use_ctx_pkt_for_pid)
+ arm_spe_set_pid_tid_cpu(spe, queue);
ret = arm_spe_run_decoder(speq, &ts);
if (ret < 0) {
@@ -681,6 +713,25 @@ static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
return 0;
}
+static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
+ struct perf_sample *sample)
+{
+ pid_t pid, tid;
+ int cpu;
+
+ if (!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT))
+ return 0;
+
+ pid = event->context_switch.next_prev_pid;
+ tid = event->context_switch.next_prev_tid;
+ cpu = sample->cpu;
+
+ if (tid == -1)
+ pr_warning("context_switch event has no tid\n");
+
+ return machine__set_current_tid(spe->machine, cpu, pid, tid);
+}
+
static int arm_spe_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
@@ -718,6 +769,13 @@ static int arm_spe_process_event(struct perf_session *session,
}
} else if (timestamp) {
err = arm_spe_process_queues(spe, timestamp);
+ if (err)
+ return err;
+
+ if (!spe->use_ctx_pkt_for_pid &&
+ (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE ||
+ event->header.type == PERF_RECORD_SWITCH))
+ err = arm_spe_context_switch(spe, event, sample);
}
return err;
@@ -783,7 +841,15 @@ static int arm_spe_flush(struct perf_session *session __maybe_unused,
return arm_spe_process_timeless_queues(spe, -1,
MAX_TIMESTAMP - 1);
- return arm_spe_process_queues(spe, MAX_TIMESTAMP);
+ ret = arm_spe_process_queues(spe, MAX_TIMESTAMP);
+ if (ret)
+ return ret;
+
+ if (!spe->use_ctx_pkt_for_pid)
+ ui__warning("Arm SPE CONTEXT packets not found in the traces.\n"
+ "Matching of TIDs to SPE events could be inaccurate.\n");
+
+ return 0;
}
static void arm_spe_free_queue(void *priv)
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 4d3b4cdce17633..a517eaa51eb37f 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -33,6 +33,33 @@ struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
return err ? ERR_PTR(err) : btf;
}
+struct bpf_program * __weak
+bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
+{
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+ return bpf_program__next(prev, obj);
+#pragma GCC diagnostic pop
+}
+
+struct bpf_map * __weak
+bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
+{
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+ return bpf_map__next(prev, obj);
+#pragma GCC diagnostic pop
+}
+
+const void * __weak
+btf__raw_data(const struct btf *btf_ro, __u32 *size)
+{
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+ return btf__get_raw_data(btf_ro, size);
+#pragma GCC diagnostic pop
+}
+
static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
{
int ret = 0;
@@ -119,7 +146,11 @@ static int perf_env__fetch_btf(struct perf_env *env,
node->data_size = data_size;
memcpy(node->data, data, data_size);
- perf_env__insert_btf(env, node);
+ if (!perf_env__insert_btf(env, node)) {
+ /* Insertion failed because of a duplicate. */
+ free(node);
+ return -1;
+ }
return 0;
}
diff --git a/tools/perf/util/c++/clang-c.h b/tools/perf/util/c++/clang-c.h
index 2df8a45bd0888f..d3731a876b6c10 100644
--- a/tools/perf/util/c++/clang-c.h
+++ b/tools/perf/util/c++/clang-c.h
@@ -12,8 +12,9 @@ extern "C" {
extern void perf_clang__init(void);
extern void perf_clang__cleanup(void);
-extern int test__clang_to_IR(void);
-extern int test__clang_to_obj(void);
+struct test_suite;
+extern int test__clang_to_IR(struct test_suite *test, int subtest);
+extern int test__clang_to_obj(struct test_suite *test, int subtest);
extern int perf_clang__compile_bpf(const char *filename,
void **p_obj_buf,
@@ -26,9 +27,6 @@ extern int perf_clang__compile_bpf(const char *filename,
static inline void perf_clang__init(void) { }
static inline void perf_clang__cleanup(void) { }
-static inline int test__clang_to_IR(void) { return -1; }
-static inline int test__clang_to_obj(void) { return -1;}
-
static inline int
perf_clang__compile_bpf(const char *filename __maybe_unused,
void **p_obj_buf __maybe_unused,
diff --git a/tools/perf/util/c++/clang-test.cpp b/tools/perf/util/c++/clang-test.cpp
index 21b23605f78b74..a4683ca536973c 100644
--- a/tools/perf/util/c++/clang-test.cpp
+++ b/tools/perf/util/c++/clang-test.cpp
@@ -35,7 +35,8 @@ __test__clang_to_IR(void)
}
extern "C" {
-int test__clang_to_IR(void)
+int test__clang_to_IR(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
perf_clang_scope _scope;
@@ -48,7 +49,8 @@ int test__clang_to_IR(void)
return -1;
}
-int test__clang_to_obj(void)
+int test__clang_to_obj(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
perf_clang_scope _scope;
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
index ec77e2a7b3ca18..51b429c86f9808 100644
--- a/tools/perf/util/cputopo.c
+++ b/tools/perf/util/cputopo.c
@@ -14,14 +14,16 @@
#include "env.h"
#include "pmu-hybrid.h"
-#define CORE_SIB_FMT \
+#define PACKAGE_CPUS_FMT \
+ "%s/devices/system/cpu/cpu%d/topology/package_cpus_list"
+#define PACKAGE_CPUS_FMT_OLD \
"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
-#define DIE_SIB_FMT \
+#define DIE_CPUS_FMT \
"%s/devices/system/cpu/cpu%d/topology/die_cpus_list"
-#define THRD_SIB_FMT \
- "%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
-#define THRD_SIB_FMT_NEW \
+#define CORE_CPUS_FMT \
"%s/devices/system/cpu/cpu%d/topology/core_cpus_list"
+#define CORE_CPUS_FMT_OLD \
+ "%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
#define NODE_ONLINE_FMT \
"%s/devices/system/node/online"
#define NODE_MEMINFO_FMT \
@@ -39,8 +41,12 @@ static int build_cpu_topology(struct cpu_topology *tp, int cpu)
u32 i = 0;
int ret = -1;
- scnprintf(filename, MAXPATHLEN, CORE_SIB_FMT,
+ scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT,
sysfs__mountpoint(), cpu);
+ if (access(filename, F_OK) == -1) {
+ scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT_OLD,
+ sysfs__mountpoint(), cpu);
+ }
fp = fopen(filename, "r");
if (!fp)
goto try_dies;
@@ -54,23 +60,23 @@ static int build_cpu_topology(struct cpu_topology *tp, int cpu)
if (p)
*p = '\0';
- for (i = 0; i < tp->core_sib; i++) {
- if (!strcmp(buf, tp->core_siblings[i]))
+ for (i = 0; i < tp->package_cpus_lists; i++) {
+ if (!strcmp(buf, tp->package_cpus_list[i]))
break;
}
- if (i == tp->core_sib) {
- tp->core_siblings[i] = buf;
- tp->core_sib++;
+ if (i == tp->package_cpus_lists) {
+ tp->package_cpus_list[i] = buf;
+ tp->package_cpus_lists++;
buf = NULL;
len = 0;
}
ret = 0;
try_dies:
- if (!tp->die_siblings)
+ if (!tp->die_cpus_list)
goto try_threads;
- scnprintf(filename, MAXPATHLEN, DIE_SIB_FMT,
+ scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
sysfs__mountpoint(), cpu);
fp = fopen(filename, "r");
if (!fp)
@@ -85,23 +91,23 @@ try_dies:
if (p)
*p = '\0';
- for (i = 0; i < tp->die_sib; i++) {
- if (!strcmp(buf, tp->die_siblings[i]))
+ for (i = 0; i < tp->die_cpus_lists; i++) {
+ if (!strcmp(buf, tp->die_cpus_list[i]))
break;
}
- if (i == tp->die_sib) {
- tp->die_siblings[i] = buf;
- tp->die_sib++;
+ if (i == tp->die_cpus_lists) {
+ tp->die_cpus_list[i] = buf;
+ tp->die_cpus_lists++;
buf = NULL;
len = 0;
}
ret = 0;
try_threads:
- scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT_NEW,
+ scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT,
sysfs__mountpoint(), cpu);
if (access(filename, F_OK) == -1) {
- scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT,
+ scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT_OLD,
sysfs__mountpoint(), cpu);
}
fp = fopen(filename, "r");
@@ -115,13 +121,13 @@ try_threads:
if (p)
*p = '\0';
- for (i = 0; i < tp->thread_sib; i++) {
- if (!strcmp(buf, tp->thread_siblings[i]))
+ for (i = 0; i < tp->core_cpus_lists; i++) {
+ if (!strcmp(buf, tp->core_cpus_list[i]))
break;
}
- if (i == tp->thread_sib) {
- tp->thread_siblings[i] = buf;
- tp->thread_sib++;
+ if (i == tp->core_cpus_lists) {
+ tp->core_cpus_list[i] = buf;
+ tp->core_cpus_lists++;
buf = NULL;
}
ret = 0;
@@ -139,16 +145,14 @@ void cpu_topology__delete(struct cpu_topology *tp)
if (!tp)
return;
- for (i = 0 ; i < tp->core_sib; i++)
- zfree(&tp->core_siblings[i]);
+ for (i = 0 ; i < tp->package_cpus_lists; i++)
+ zfree(&tp->package_cpus_list[i]);
- if (tp->die_sib) {
- for (i = 0 ; i < tp->die_sib; i++)
- zfree(&tp->die_siblings[i]);
- }
+ for (i = 0 ; i < tp->die_cpus_lists; i++)
+ zfree(&tp->die_cpus_list[i]);
- for (i = 0 ; i < tp->thread_sib; i++)
- zfree(&tp->thread_siblings[i]);
+ for (i = 0 ; i < tp->core_cpus_lists; i++)
+ zfree(&tp->core_cpus_list[i]);
free(tp);
}
@@ -164,7 +168,7 @@ static bool has_die_topology(void)
if (strncmp(uts.machine, "x86_64", 6))
return false;
- scnprintf(filename, MAXPATHLEN, DIE_SIB_FMT,
+ scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
sysfs__mountpoint(), 0);
if (access(filename, F_OK) == -1)
return false;
@@ -205,13 +209,13 @@ struct cpu_topology *cpu_topology__new(void)
tp = addr;
addr += sizeof(*tp);
- tp->core_siblings = addr;
+ tp->package_cpus_list = addr;
addr += sz;
if (has_die) {
- tp->die_siblings = addr;
+ tp->die_cpus_list = addr;
addr += sz;
}
- tp->thread_siblings = addr;
+ tp->core_cpus_list = addr;
for (i = 0; i < nr; i++) {
if (!cpu_map__has(map, i))
diff --git a/tools/perf/util/cputopo.h b/tools/perf/util/cputopo.h
index d9af97177068bb..854e18f9041e8e 100644
--- a/tools/perf/util/cputopo.h
+++ b/tools/perf/util/cputopo.h
@@ -5,12 +5,33 @@
#include <linux/types.h>
struct cpu_topology {
- u32 core_sib;
- u32 die_sib;
- u32 thread_sib;
- char **core_siblings;
- char **die_siblings;
- char **thread_siblings;
+ /* The number of unique package_cpus_lists below. */
+ u32 package_cpus_lists;
+ /* The number of unique die_cpu_lists below. */
+ u32 die_cpus_lists;
+ /* The number of unique core_cpu_lists below. */
+ u32 core_cpus_lists;
+ /*
+ * An array of strings where each string is unique and read from
+ * /sys/devices/system/cpu/cpuX/topology/package_cpus_list. From the ABI
+ * each of these is a human-readable list of CPUs sharing the same
+ * physical_package_id. The format is like 0-3, 8-11, 14,17.
+ */
+ const char **package_cpus_list;
+ /*
+ * An array of string where each string is unique and from
+ * /sys/devices/system/cpu/cpuX/topology/die_cpus_list. From the ABI
+ * each of these is a human-readable list of CPUs within the same die.
+ * The format is like 0-3, 8-11, 14,17.
+ */
+ const char **die_cpus_list;
+ /*
+ * An array of string where each string is unique and from
+ * /sys/devices/system/cpu/cpuX/topology/core_cpus_list. From the ABI
+ * each of these is a human-readable list of CPUs within the same
+ * core. The format is like 0-3, 8-11, 14,17.
+ */
+ const char **core_cpus_list;
};
struct numa_topology_node {
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index f323adb1af8552..4f672f7d008c53 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -537,7 +537,7 @@ static void cs_etm__dump_event(struct cs_etm_queue *etmq,
fprintf(stdout, "\n");
color_fprintf(stdout, color,
- ". ... CoreSight %s Trace data: size %zu bytes\n",
+ ". ... CoreSight %s Trace data: size %#zx bytes\n",
cs_etm_decoder__get_name(etmq->decoder), buffer->size);
do {
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 17f1dd0680b424..b9904896eb9746 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -75,12 +75,13 @@ out:
return node;
}
-void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
{
struct rb_node *parent = NULL;
__u32 btf_id = btf_node->id;
struct btf_node *node;
struct rb_node **p;
+ bool ret = true;
down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.btfs.rb_node;
@@ -94,6 +95,7 @@ void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
p = &(*p)->rb_right;
} else {
pr_debug("duplicated btf %u\n", btf_id);
+ ret = false;
goto out;
}
}
@@ -103,6 +105,7 @@ void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
env->bpf_progs.btfs_cnt++;
out:
up_write(&env->bpf_progs.lock);
+ return ret;
}
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 1383876f72b377..163e5ec503a263 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -167,7 +167,7 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
-void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
int perf_env__numa_node(struct perf_env *env, int cpu);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index ec967fb8d7d9f0..a59fb2ecb84ee4 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -3037,3 +3037,15 @@ void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
{
evsel->core.leader = &leader->core;
}
+
+int evsel__source_count(const struct evsel *evsel)
+{
+ struct evsel *pos;
+ int count = 0;
+
+ evlist__for_each_entry(evsel->evlist, pos) {
+ if (pos->metric_leader == evsel)
+ count++;
+ }
+ return count;
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3ea687141afa68..29d49a8c1e92dd 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -489,6 +489,7 @@ struct evsel *evsel__leader(struct evsel *evsel);
bool evsel__has_leader(struct evsel *evsel, struct evsel *leader);
bool evsel__is_leader(struct evsel *evsel);
void evsel__set_leader(struct evsel *evsel, struct evsel *leader);
+int evsel__source_count(const struct evsel *evsel);
/*
* Macro to swap the bit-field postition and size.
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 77c6ad81a923ea..1d532b9fed29c2 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -5,13 +5,17 @@
#include <stdlib.h>
#include <string.h>
#include "metricgroup.h"
+#include "cpumap.h"
+#include "cputopo.h"
#include "debug.h"
#include "expr.h"
#include "expr-bison.h"
#include "expr-flex.h"
+#include "smt.h"
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <ctype.h>
+#include <math.h>
#ifdef PARSER_DEBUG
extern int expr_debug;
@@ -19,7 +23,10 @@ extern int expr_debug;
struct expr_id_data {
union {
- double val;
+ struct {
+ double val;
+ int source_count;
+ } val;
struct {
double val;
const char *metric_name;
@@ -137,6 +144,13 @@ int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
/* Caller must make sure id is allocated */
int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
{
+ return expr__add_id_val_source_count(ctx, id, val, /*source_count=*/1);
+}
+
+/* Caller must make sure id is allocated */
+int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
+ double val, int source_count)
+{
struct expr_id_data *data_ptr = NULL, *old_data = NULL;
char *old_key = NULL;
int ret;
@@ -144,7 +158,8 @@ int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
data_ptr = malloc(sizeof(*data_ptr));
if (!data_ptr)
return -ENOMEM;
- data_ptr->val = val;
+ data_ptr->val.val = val;
+ data_ptr->val.source_count = source_count;
data_ptr->kind = EXPR_ID_DATA__VALUE;
ret = hashmap__set(ctx->ids, id, data_ptr,
@@ -240,7 +255,7 @@ int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
switch (data->kind) {
case EXPR_ID_DATA__VALUE:
- pr_debug2("lookup(%s): val %f\n", id, data->val);
+ pr_debug2("lookup(%s): val %f\n", id, data->val.val);
break;
case EXPR_ID_DATA__REF:
pr_debug2("lookup(%s): ref metric name %s\n", id,
@@ -251,7 +266,7 @@ int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
pr_debug("%s failed to count\n", id);
return -1;
}
- pr_debug("processing metric: %s EXIT: %f\n", id, data->val);
+ pr_debug("processing metric: %s EXIT: %f\n", id, data->ref.val);
break;
case EXPR_ID_DATA__REF_VALUE:
pr_debug2("lookup(%s): ref val %f metric name %s\n", id,
@@ -366,7 +381,47 @@ int expr__find_ids(const char *expr, const char *one,
double expr_id_data__value(const struct expr_id_data *data)
{
if (data->kind == EXPR_ID_DATA__VALUE)
- return data->val;
+ return data->val.val;
assert(data->kind == EXPR_ID_DATA__REF_VALUE);
return data->ref.val;
}
+
+double expr_id_data__source_count(const struct expr_id_data *data)
+{
+ assert(data->kind == EXPR_ID_DATA__VALUE);
+ return data->val.source_count;
+}
+
+double expr__get_literal(const char *literal)
+{
+ static struct cpu_topology *topology;
+
+ if (!strcmp("#smt_on", literal))
+ return smt_on() > 0 ? 1.0 : 0.0;
+
+ if (!strcmp("#num_cpus", literal))
+ return cpu__max_present_cpu();
+
+ /*
+ * Assume that topology strings are consistent, such as CPUs "0-1"
+ * wouldn't be listed as "0,1", and so after deduplication the number of
+ * these strings gives an indication of the number of packages, dies,
+ * etc.
+ */
+ if (!topology) {
+ topology = cpu_topology__new();
+ if (!topology) {
+ pr_err("Error creating CPU topology");
+ return NAN;
+ }
+ }
+ if (!strcmp("#num_packages", literal))
+ return topology->package_cpus_lists;
+ if (!strcmp("#num_dies", literal))
+ return topology->die_cpus_lists;
+ if (!strcmp("#num_cores", literal))
+ return topology->core_cpus_lists;
+
+ pr_err("Unrecognized literal '%s'", literal);
+ return NAN;
+}
diff --git a/tools/perf/util/expr.h b/tools/perf/util/expr.h
index cf81f9166dbb49..bd2116983bbb52 100644
--- a/tools/perf/util/expr.h
+++ b/tools/perf/util/expr.h
@@ -40,6 +40,8 @@ void expr__ctx_free(struct expr_parse_ctx *ctx);
void expr__del_id(struct expr_parse_ctx *ctx, const char *id);
int expr__add_id(struct expr_parse_ctx *ctx, const char *id);
int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val);
+int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
+ double val, int source_count);
int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref);
int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
struct expr_id_data **data);
@@ -55,5 +57,7 @@ int expr__find_ids(const char *expr, const char *one,
struct expr_parse_ctx *ids);
double expr_id_data__value(const struct expr_id_data *data);
+double expr_id_data__source_count(const struct expr_id_data *data);
+double expr__get_literal(const char *literal);
#endif
diff --git a/tools/perf/util/expr.l b/tools/perf/util/expr.l
index bd20f33418bac0..0a13eb20c81470 100644
--- a/tools/perf/util/expr.l
+++ b/tools/perf/util/expr.l
@@ -6,6 +6,7 @@
#include <linux/compiler.h>
#include "expr.h"
#include "expr-bison.h"
+#include <math.h>
char *expr_get_text(yyscan_t yyscanner);
YYSTYPE *expr_get_lval(yyscan_t yyscanner);
@@ -77,6 +78,17 @@ static int str(yyscan_t scanner, int token, int runtime)
yylval->str = normalize(yylval->str, runtime);
return token;
}
+
+static int literal(yyscan_t scanner)
+{
+ YYSTYPE *yylval = expr_get_lval(scanner);
+
+ yylval->num = expr__get_literal(expr_get_text(scanner));
+ if (isnan(yylval->num))
+ return EXPR_ERROR;
+
+ return LITERAL;
+}
%}
number ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+)
@@ -85,6 +97,7 @@ sch [-,=]
spec \\{sch}
sym [0-9a-zA-Z_\.:@?]+
symbol ({spec}|{sym})+
+literal #[0-9a-zA-Z_\.\-]+
%%
struct expr_scanner_ctx *sctx = expr_get_extra(yyscanner);
@@ -94,7 +107,8 @@ max { return MAX; }
min { return MIN; }
if { return IF; }
else { return ELSE; }
-#smt_on { return SMT_ON; }
+source_count { return SOURCE_COUNT; }
+{literal} { return literal(yyscanner); }
{number} { return value(yyscanner); }
{symbol} { return str(yyscanner, ID, sctx->runtime); }
"|" { return '|'; }
diff --git a/tools/perf/util/expr.y b/tools/perf/util/expr.y
index f969dfa525bd8a..a30b825adb7ba6 100644
--- a/tools/perf/util/expr.y
+++ b/tools/perf/util/expr.y
@@ -3,8 +3,8 @@
#define YYDEBUG 1
#include <assert.h>
#include <math.h>
+#include <stdlib.h>
#include "util/debug.h"
-#include "smt.h"
#define IN_EXPR_Y 1
#include "expr.h"
%}
@@ -37,7 +37,7 @@
} ids;
}
-%token ID NUMBER MIN MAX IF ELSE SMT_ON D_RATIO EXPR_ERROR
+%token ID NUMBER MIN MAX IF ELSE LITERAL D_RATIO SOURCE_COUNT EXPR_ERROR
%left MIN MAX IF
%left '|'
%left '^'
@@ -46,7 +46,7 @@
%left '-' '+'
%left '*' '/' '%'
%left NEG NOT
-%type <num> NUMBER
+%type <num> NUMBER LITERAL
%type <str> ID
%destructor { free ($$); } <str>
%type <ids> expr if_expr
@@ -83,6 +83,41 @@ static struct ids union_expr(struct ids ids1, struct ids ids2)
return result;
}
+static struct ids handle_id(struct expr_parse_ctx *ctx, char *id,
+ bool compute_ids, bool source_count)
+{
+ struct ids result;
+
+ if (!compute_ids) {
+ /*
+ * Compute the event's value from ID. If the ID isn't known then
+ * it isn't used to compute the formula so set to NAN.
+ */
+ struct expr_id_data *data;
+
+ result.val = NAN;
+ if (expr__resolve_id(ctx, id, &data) == 0) {
+ result.val = source_count
+ ? expr_id_data__source_count(data)
+ : expr_id_data__value(data);
+ }
+ result.ids = NULL;
+ free(id);
+ } else {
+ /*
+ * Set the value to BOTTOM to show that any value is possible
+ * when the event is computed. Create a set of just the ID.
+ */
+ result.val = BOTTOM;
+ result.ids = ids__new();
+ if (!result.ids || ids__insert(result.ids, id)) {
+ pr_err("Error creating IDs for '%s'", id);
+ free(id);
+ }
+ }
+ return result;
+}
+
/*
* If we're not computing ids or $1 and $3 are constants, compute the new
* constant value using OP. Its invariant that there are no ids. If computing
@@ -168,32 +203,8 @@ expr: NUMBER
$$.val = $1;
$$.ids = NULL;
}
-| ID
-{
- if (!compute_ids) {
- /*
- * Compute the event's value from ID. If the ID isn't known then
- * it isn't used to compute the formula so set to NAN.
- */
- struct expr_id_data *data;
-
- $$.val = NAN;
- if (expr__resolve_id(ctx, $1, &data) == 0)
- $$.val = expr_id_data__value(data);
-
- $$.ids = NULL;
- free($1);
- } else {
- /*
- * Set the value to BOTTOM to show that any value is possible
- * when the event is computed. Create a set of just the ID.
- */
- $$.val = BOTTOM;
- $$.ids = ids__new();
- if (!$$.ids || ids__insert($$.ids, $1))
- YYABORT;
- }
-}
+| ID { $$ = handle_id(ctx, $1, compute_ids, /*source_count=*/false); }
+| SOURCE_COUNT '(' ID ')' { $$ = handle_id(ctx, $3, compute_ids, /*source_count=*/true); }
| expr '|' expr { BINARY_LONG_OP($$, |, $1, $3); }
| expr '&' expr { BINARY_LONG_OP($$, &, $1, $3); }
| expr '^' expr { BINARY_LONG_OP($$, ^, $1, $3); }
@@ -280,9 +291,9 @@ expr: NUMBER
$$ = union_expr($3, $5);
}
}
-| SMT_ON
+| LITERAL
{
- $$.val = smt_on() > 0 ? 1.0 : 0.0;
+ $$.val = $1;
$$.ids = NULL;
}
;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 56511db8fa0397..fda8d14c891f89 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -583,21 +583,21 @@ static int write_cpu_topology(struct feat_fd *ff,
if (!tp)
return -1;
- ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
+ ret = do_write(ff, &tp->package_cpus_lists, sizeof(tp->package_cpus_lists));
if (ret < 0)
goto done;
- for (i = 0; i < tp->core_sib; i++) {
- ret = do_write_string(ff, tp->core_siblings[i]);
+ for (i = 0; i < tp->package_cpus_lists; i++) {
+ ret = do_write_string(ff, tp->package_cpus_list[i]);
if (ret < 0)
goto done;
}
- ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
+ ret = do_write(ff, &tp->core_cpus_lists, sizeof(tp->core_cpus_lists));
if (ret < 0)
goto done;
- for (i = 0; i < tp->thread_sib; i++) {
- ret = do_write_string(ff, tp->thread_siblings[i]);
+ for (i = 0; i < tp->core_cpus_lists; i++) {
+ ret = do_write_string(ff, tp->core_cpus_list[i]);
if (ret < 0)
break;
}
@@ -617,15 +617,15 @@ static int write_cpu_topology(struct feat_fd *ff,
return ret;
}
- if (!tp->die_sib)
+ if (!tp->die_cpus_lists)
goto done;
- ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
+ ret = do_write(ff, &tp->die_cpus_lists, sizeof(tp->die_cpus_lists));
if (ret < 0)
goto done;
- for (i = 0; i < tp->die_sib; i++) {
- ret = do_write_string(ff, tp->die_siblings[i]);
+ for (i = 0; i < tp->die_cpus_lists; i++) {
+ ret = do_write_string(ff, tp->die_cpus_list[i]);
if (ret < 0)
goto done;
}
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index e4fb02b0513048..5c7308efa768a7 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -829,10 +829,12 @@ static int prepare_metric(struct evsel **metric_events,
struct saved_value *v;
struct stats *stats;
u64 metric_total = 0;
+ int source_count;
if (!strcmp(metric_events[i]->name, "duration_time")) {
stats = &walltime_nsecs_stats;
scale = 1e-9;
+ source_count = 1;
} else {
v = saved_value_lookup(metric_events[i], cpu, false,
STAT_NONE, 0, st,
@@ -841,6 +843,7 @@ static int prepare_metric(struct evsel **metric_events,
break;
stats = &v->stats;
scale = 1.0;
+ source_count = evsel__source_count(metric_events[i]);
if (v->metric_other)
metric_total = v->metric_total;
@@ -849,7 +852,9 @@ static int prepare_metric(struct evsel **metric_events,
if (!n)
return -ENOMEM;
- expr__add_id_val(pctx, n, metric_total ? : avg_stats(stats) * scale);
+ expr__add_id_val_source_count(pctx, n,
+ metric_total ? : avg_stats(stats) * scale,
+ source_count);
}
for (j = 0; metric_refs && metric_refs[j].metric_name; j++) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index aa1b7c12fd61da..b2ed3140a1faaa 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -274,7 +274,7 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *
if (symbol_conf.priv_size) {
if (symbol_conf.init_annotation) {
struct annotation *notes = (void *)sym;
- pthread_mutex_init(&notes->lock, NULL);
+ annotation__init(notes);
}
sym = ((void *)sym) + symbol_conf.priv_size;
}
@@ -294,6 +294,13 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *
void symbol__delete(struct symbol *sym)
{
+ if (symbol_conf.priv_size) {
+ if (symbol_conf.init_annotation) {
+ struct annotation *notes = symbol__annotation(sym);
+
+ annotation__exit(notes);
+ }
+ }
free(((void *)sym) - symbol_conf.priv_size);
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 166196686f2e63..fbf866d82dccdd 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -40,22 +40,33 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
GElf_Shdr *shp, const char *name, size_t *idx);
#endif
-/** struct symbol - symtab entry
- *
- * @ignore - resolvable but tools ignore it (e.g. idle routines)
+/**
+ * A symtab entry. When allocated this may be preceded by an annotation (see
+ * symbol__annotation), a browser_index (see symbol__browser_index) and rb_node
+ * to sort by name (see struct symbol_name_rb_node).
*/
struct symbol {
struct rb_node rb_node;
+ /** Range of symbol [start, end). */
u64 start;
u64 end;
+ /** Length of the string name. */
u16 namelen;
+ /** ELF symbol type as defined for st_info. E.g STT_OBJECT or STT_FUNC. */
u8 type:4;
+ /** ELF binding type as defined for st_info. E.g. STB_WEAK or STB_GLOBAL. */
u8 binding:4;
+ /** Set true for kernel symbols of idle routines. */
u8 idle:1;
+ /** Resolvable but tools ignore it (e.g. idle routines). */
u8 ignore:1;
+ /** Symbol for an inlined function. */
u8 inlined:1;
+ /** Has symbol__annotate2 been performed. */
+ u8 annotate2:1;
+ /** Architecture specific. Unused except on PPC where it holds st_other. */
u8 arch_sym;
- bool annotate2;
+ /** The name of length namelen associated with the symbol. */
char name[];
};
diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c
index 6ede48bde91b74..954964f0ac3db8 100644
--- a/tools/testing/selftests/bpf/prog_tests/netcnt.c
+++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c
@@ -8,7 +8,7 @@
#define CG_NAME "/netcnt"
-void test_netcnt(void)
+void serial_test_netcnt(void)
{
union percpu_net_cnt *percpu_netcnt = NULL;
struct bpf_cgroup_storage_key key;
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
index 172c999e523c10..d29ebfeef9c546 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
+#include <stdio.h>
#include <sched.h>
#include <sys/mount.h>
#include <sys/stat.h>
@@ -29,44 +30,106 @@ static int read_iter(char *file)
static int fn(void)
{
- int err, duration = 0;
+ struct stat a, b, c;
+ int err, map;
err = unshare(CLONE_NEWNS);
- if (CHECK(err, "unshare", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "unshare"))
goto out;
err = mount("", "/", "", MS_REC | MS_PRIVATE, NULL);
- if (CHECK(err, "mount /", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mount /"))
goto out;
err = umount(TDIR);
- if (CHECK(err, "umount " TDIR, "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "umount " TDIR))
goto out;
err = mount("none", TDIR, "tmpfs", 0, NULL);
- if (CHECK(err, "mount", "mount root failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mount tmpfs"))
goto out;
err = mkdir(TDIR "/fs1", 0777);
- if (CHECK(err, "mkdir "TDIR"/fs1", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mkdir " TDIR "/fs1"))
goto out;
err = mkdir(TDIR "/fs2", 0777);
- if (CHECK(err, "mkdir "TDIR"/fs2", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mkdir " TDIR "/fs2"))
goto out;
err = mount("bpf", TDIR "/fs1", "bpf", 0, NULL);
- if (CHECK(err, "mount bpffs "TDIR"/fs1", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mount bpffs " TDIR "/fs1"))
goto out;
err = mount("bpf", TDIR "/fs2", "bpf", 0, NULL);
- if (CHECK(err, "mount bpffs " TDIR "/fs2", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mount bpffs " TDIR "/fs2"))
goto out;
err = read_iter(TDIR "/fs1/maps.debug");
- if (CHECK(err, "reading " TDIR "/fs1/maps.debug", "failed\n"))
+ if (!ASSERT_OK(err, "reading " TDIR "/fs1/maps.debug"))
goto out;
err = read_iter(TDIR "/fs2/progs.debug");
- if (CHECK(err, "reading " TDIR "/fs2/progs.debug", "failed\n"))
+ if (!ASSERT_OK(err, "reading " TDIR "/fs2/progs.debug"))
goto out;
+
+ err = mkdir(TDIR "/fs1/a", 0777);
+ if (!ASSERT_OK(err, "creating " TDIR "/fs1/a"))
+ goto out;
+ err = mkdir(TDIR "/fs1/a/1", 0777);
+ if (!ASSERT_OK(err, "creating " TDIR "/fs1/a/1"))
+ goto out;
+ err = mkdir(TDIR "/fs1/b", 0777);
+ if (!ASSERT_OK(err, "creating " TDIR "/fs1/b"))
+ goto out;
+
+ map = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
+ if (!ASSERT_GT(map, 0, "create_map(ARRAY)"))
+ goto out;
+ err = bpf_obj_pin(map, TDIR "/fs1/c");
+ if (!ASSERT_OK(err, "pin map"))
+ goto out;
+ close(map);
+
+ /* Check that RENAME_EXCHANGE works for directories. */
+ err = stat(TDIR "/fs1/a", &a);
+ if (!ASSERT_OK(err, "stat(" TDIR "/fs1/a)"))
+ goto out;
+ err = renameat2(0, TDIR "/fs1/a", 0, TDIR "/fs1/b", RENAME_EXCHANGE);
+ if (!ASSERT_OK(err, "renameat2(/fs1/a, /fs1/b, RENAME_EXCHANGE)"))
+ goto out;
+ err = stat(TDIR "/fs1/b", &b);
+ if (!ASSERT_OK(err, "stat(" TDIR "/fs1/b)"))
+ goto out;
+ if (!ASSERT_EQ(a.st_ino, b.st_ino, "b should have a's inode"))
+ goto out;
+ err = access(TDIR "/fs1/b/1", F_OK);
+ if (!ASSERT_OK(err, "access(" TDIR "/fs1/b/1)"))
+ goto out;
+
+ /* Check that RENAME_EXCHANGE works for mixed file types. */
+ err = stat(TDIR "/fs1/c", &c);
+ if (!ASSERT_OK(err, "stat(" TDIR "/fs1/map)"))
+ goto out;
+ err = renameat2(0, TDIR "/fs1/c", 0, TDIR "/fs1/b", RENAME_EXCHANGE);
+ if (!ASSERT_OK(err, "renameat2(/fs1/c, /fs1/b, RENAME_EXCHANGE)"))
+ goto out;
+ err = stat(TDIR "/fs1/b", &b);
+ if (!ASSERT_OK(err, "stat(" TDIR "/fs1/b)"))
+ goto out;
+ if (!ASSERT_EQ(c.st_ino, b.st_ino, "b should have c's inode"))
+ goto out;
+ err = access(TDIR "/fs1/c/1", F_OK);
+ if (!ASSERT_OK(err, "access(" TDIR "/fs1/c/1)"))
+ goto out;
+
+ /* Check that RENAME_NOREPLACE works. */
+ err = renameat2(0, TDIR "/fs1/b", 0, TDIR "/fs1/a", RENAME_NOREPLACE);
+ if (!ASSERT_ERR(err, "renameat2(RENAME_NOREPLACE)")) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = access(TDIR "/fs1/b", F_OK);
+ if (!ASSERT_OK(err, "access(" TDIR "/fs1/b)"))
+ goto out;
+
out:
umount(TDIR "/fs1");
umount(TDIR "/fs2");
diff --git a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
index df918b2469da04..52f6995ff29c6c 100644
--- a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
+++ b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
@@ -23,6 +23,16 @@ struct callback_ctx {
int output;
};
+const volatile int bypass_unused = 1;
+
+static __u64
+unused_subprog(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ data->output = 0;
+ return 1;
+}
+
static __u64
check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
struct callback_ctx *data)
@@ -54,6 +64,8 @@ int test_pkt_access(struct __sk_buff *skb)
data.output = 0;
bpf_for_each_map_elem(&arraymap, check_array_elem, &data, 0);
+ if (!bypass_unused)
+ bpf_for_each_map_elem(&arraymap, unused_subprog, &data, 0);
arraymap_output = data.output;
bpf_for_each_map_elem(&percpu_map, check_percpu_elem, (void *)0, 0);
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
index 351955c2bdfd80..05f8727409997d 100755
--- a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
+++ b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
@@ -2,11 +2,11 @@
# SPDX-License-Identifier: GPL-2.0
#
# Test topology:
-# - - - - - - - - - - - - - - - - - - - - - - - - -
-# | veth1 veth2 veth3 | ... init net
+# - - - - - - - - - - - - - - - - - - -
+# | veth1 veth2 veth3 | ns0
# - -| - - - - - - | - - - - - - | - -
# --------- --------- ---------
-# | veth0 | | veth0 | | veth0 | ...
+# | veth0 | | veth0 | | veth0 |
# --------- --------- ---------
# ns1 ns2 ns3
#
@@ -31,6 +31,7 @@ IFACES=""
DRV_MODE="xdpgeneric xdpdrv xdpegress"
PASS=0
FAIL=0
+LOG_DIR=$(mktemp -d)
test_pass()
{
@@ -50,6 +51,7 @@ clean_up()
ip link del veth$i 2> /dev/null
ip netns del ns$i 2> /dev/null
done
+ ip netns del ns0 2> /dev/null
}
# Kselftest framework requirement - SKIP code is 4.
@@ -77,10 +79,12 @@ setup_ns()
mode="xdpdrv"
fi
+ ip netns add ns0
for i in $(seq $NUM); do
ip netns add ns$i
- ip link add veth$i type veth peer name veth0 netns ns$i
- ip link set veth$i up
+ ip -n ns$i link add veth0 index 2 type veth \
+ peer name veth$i netns ns0 index $((1 + $i))
+ ip -n ns0 link set veth$i up
ip -n ns$i link set veth0 up
ip -n ns$i addr add 192.0.2.$i/24 dev veth0
@@ -91,7 +95,7 @@ setup_ns()
xdp_dummy.o sec xdp &> /dev/null || \
{ test_fail "Unable to load dummy xdp" && exit 1; }
IFACES="$IFACES veth$i"
- veth_mac[$i]=$(ip link show veth$i | awk '/link\/ether/ {print $2}')
+ veth_mac[$i]=$(ip -n ns0 link show veth$i | awk '/link\/ether/ {print $2}')
done
}
@@ -100,17 +104,17 @@ do_egress_tests()
local mode=$1
# mac test
- ip netns exec ns2 tcpdump -e -i veth0 -nn -l -e &> mac_ns1-2_${mode}.log &
- ip netns exec ns3 tcpdump -e -i veth0 -nn -l -e &> mac_ns1-3_${mode}.log &
+ ip netns exec ns2 tcpdump -e -i veth0 -nn -l -e &> ${LOG_DIR}/mac_ns1-2_${mode}.log &
+ ip netns exec ns3 tcpdump -e -i veth0 -nn -l -e &> ${LOG_DIR}/mac_ns1-3_${mode}.log &
sleep 0.5
ip netns exec ns1 ping 192.0.2.254 -i 0.1 -c 4 &> /dev/null
sleep 0.5
- pkill -9 tcpdump
+ pkill tcpdump
# mac check
- grep -q "${veth_mac[2]} > ff:ff:ff:ff:ff:ff" mac_ns1-2_${mode}.log && \
+ grep -q "${veth_mac[2]} > ff:ff:ff:ff:ff:ff" ${LOG_DIR}/mac_ns1-2_${mode}.log && \
test_pass "$mode mac ns1-2" || test_fail "$mode mac ns1-2"
- grep -q "${veth_mac[3]} > ff:ff:ff:ff:ff:ff" mac_ns1-3_${mode}.log && \
+ grep -q "${veth_mac[3]} > ff:ff:ff:ff:ff:ff" ${LOG_DIR}/mac_ns1-3_${mode}.log && \
test_pass "$mode mac ns1-3" || test_fail "$mode mac ns1-3"
}
@@ -121,46 +125,46 @@ do_ping_tests()
# ping6 test: echo request should be redirect back to itself, not others
ip netns exec ns1 ip neigh add 2001:db8::2 dev veth0 lladdr 00:00:00:00:00:02
- ip netns exec ns1 tcpdump -i veth0 -nn -l -e &> ns1-1_${mode}.log &
- ip netns exec ns2 tcpdump -i veth0 -nn -l -e &> ns1-2_${mode}.log &
- ip netns exec ns3 tcpdump -i veth0 -nn -l -e &> ns1-3_${mode}.log &
+ ip netns exec ns1 tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-1_${mode}.log &
+ ip netns exec ns2 tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-2_${mode}.log &
+ ip netns exec ns3 tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-3_${mode}.log &
sleep 0.5
# ARP test
- ip netns exec ns1 ping 192.0.2.254 -i 0.1 -c 4 &> /dev/null
+ ip netns exec ns1 arping -q -c 2 -I veth0 192.0.2.254
# IPv4 test
ip netns exec ns1 ping 192.0.2.253 -i 0.1 -c 4 &> /dev/null
# IPv6 test
ip netns exec ns1 ping6 2001:db8::2 -i 0.1 -c 2 &> /dev/null
sleep 0.5
- pkill -9 tcpdump
+ pkill tcpdump
# All netns should receive the redirect arp requests
- [ $(grep -c "who-has 192.0.2.254" ns1-1_${mode}.log) -gt 4 ] && \
+ [ $(grep -cF "who-has 192.0.2.254" ${LOG_DIR}/ns1-1_${mode}.log) -eq 4 ] && \
test_pass "$mode arp(F_BROADCAST) ns1-1" || \
test_fail "$mode arp(F_BROADCAST) ns1-1"
- [ $(grep -c "who-has 192.0.2.254" ns1-2_${mode}.log) -le 4 ] && \
+ [ $(grep -cF "who-has 192.0.2.254" ${LOG_DIR}/ns1-2_${mode}.log) -eq 2 ] && \
test_pass "$mode arp(F_BROADCAST) ns1-2" || \
test_fail "$mode arp(F_BROADCAST) ns1-2"
- [ $(grep -c "who-has 192.0.2.254" ns1-3_${mode}.log) -le 4 ] && \
+ [ $(grep -cF "who-has 192.0.2.254" ${LOG_DIR}/ns1-3_${mode}.log) -eq 2 ] && \
test_pass "$mode arp(F_BROADCAST) ns1-3" || \
test_fail "$mode arp(F_BROADCAST) ns1-3"
# ns1 should not receive the redirect echo request, others should
- [ $(grep -c "ICMP echo request" ns1-1_${mode}.log) -eq 4 ] && \
+ [ $(grep -c "ICMP echo request" ${LOG_DIR}/ns1-1_${mode}.log) -eq 4 ] && \
test_pass "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-1" || \
test_fail "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-1"
- [ $(grep -c "ICMP echo request" ns1-2_${mode}.log) -eq 4 ] && \
+ [ $(grep -c "ICMP echo request" ${LOG_DIR}/ns1-2_${mode}.log) -eq 4 ] && \
test_pass "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-2" || \
test_fail "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-2"
- [ $(grep -c "ICMP echo request" ns1-3_${mode}.log) -eq 4 ] && \
+ [ $(grep -c "ICMP echo request" ${LOG_DIR}/ns1-3_${mode}.log) -eq 4 ] && \
test_pass "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-3" || \
test_fail "$mode IPv4 (F_BROADCAST|F_EXCLUDE_INGRESS) ns1-3"
# ns1 should receive the echo request, ns2 should not
- [ $(grep -c "ICMP6, echo request" ns1-1_${mode}.log) -eq 4 ] && \
+ [ $(grep -c "ICMP6, echo request" ${LOG_DIR}/ns1-1_${mode}.log) -eq 4 ] && \
test_pass "$mode IPv6 (no flags) ns1-1" || \
test_fail "$mode IPv6 (no flags) ns1-1"
- [ $(grep -c "ICMP6, echo request" ns1-2_${mode}.log) -eq 0 ] && \
+ [ $(grep -c "ICMP6, echo request" ${LOG_DIR}/ns1-2_${mode}.log) -eq 0 ] && \
test_pass "$mode IPv6 (no flags) ns1-2" || \
test_fail "$mode IPv6 (no flags) ns1-2"
}
@@ -176,9 +180,13 @@ do_tests()
xdpgeneric) drv_p="-S";;
esac
- ./xdp_redirect_multi $drv_p $IFACES &> xdp_redirect_${mode}.log &
+ ip netns exec ns0 ./xdp_redirect_multi $drv_p $IFACES &> ${LOG_DIR}/xdp_redirect_${mode}.log &
xdp_pid=$!
sleep 1
+ if ! ps -p $xdp_pid > /dev/null; then
+ test_fail "$mode xdp_redirect_multi start failed"
+ return 1
+ fi
if [ "$mode" = "xdpegress" ]; then
do_egress_tests $mode
@@ -189,16 +197,16 @@ do_tests()
kill $xdp_pid
}
-trap clean_up 0 2 3 6 9
+trap clean_up EXIT
check_env
-rm -f xdp_redirect_*.log ns*.log mac_ns*.log
for mode in ${DRV_MODE}; do
setup_ns $mode
do_tests $mode
clean_up
done
+rm -rf ${LOG_DIR}
echo "Summary: PASS $PASS, FAIL $FAIL"
[ $FAIL -eq 0 ] && exit 0 || exit 1
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
index c9991c3f3bd274..7ab3de10876146 100644
--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -265,3 +265,20 @@
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
+{
+ "Spill a u32 scalar at fp-4 and then at fp-8",
+ .insns = {
+ /* r4 = 4321 */
+ BPF_MOV32_IMM(BPF_REG_4, 4321),
+ /* *(u32 *)(r10 -4) = r4 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
+ /* *(u32 *)(r10 -8) = r4 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+ /* r4 = *(u64 *)(r10 -8) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/xdp_redirect_multi.c b/tools/testing/selftests/bpf/xdp_redirect_multi.c
index 3696a8f32c235f..f5ffba341c1743 100644
--- a/tools/testing/selftests/bpf/xdp_redirect_multi.c
+++ b/tools/testing/selftests/bpf/xdp_redirect_multi.c
@@ -129,7 +129,7 @@ int main(int argc, char **argv)
goto err_out;
}
- printf("Get interfaces");
+ printf("Get interfaces:");
for (i = 0; i < MAX_IFACE_NUM && argv[optind + i]; i++) {
ifaces[i] = if_nametoindex(argv[optind + i]);
if (!ifaces[i])
@@ -139,7 +139,7 @@ int main(int argc, char **argv)
goto err_out;
}
if (ifaces[i] > MAX_INDEX_NUM) {
- printf("Interface index to large\n");
+ printf(" interface index too large\n");
goto err_out;
}
printf(" %d", ifaces[i]);
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index c23e89dea0b661..c4e34717826aaf 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -73,7 +73,8 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
-TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
+TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index f6b3794f306b47..6a1a37f30494b3 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -82,6 +82,7 @@ struct vm_guest_mode_params {
};
extern const struct vm_guest_mode_params vm_guest_mode_params[];
+int open_path_or_exit(const char *path, int flags);
int open_kvm_dev_path_or_exit(void);
int kvm_check_cap(long cap);
int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
diff --git a/tools/testing/selftests/kvm/include/x86_64/svm_util.h b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
index b7531c83b8ae0a..587fbe408b99a2 100644
--- a/tools/testing/selftests/kvm/include/x86_64/svm_util.h
+++ b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
@@ -46,4 +46,6 @@ static inline bool cpu_has_svm(void)
return ecx & CPUID_SVM;
}
+int open_sev_dev_path_or_exit(void);
+
#endif /* SELFTEST_KVM_SVM_UTILS_H */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 041004c0fda760..14bb4d5b6bb7df 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -31,6 +31,19 @@ static void *align(void *x, size_t size)
return (void *) (((size_t) x + mask) & ~mask);
}
+int open_path_or_exit(const char *path, int flags)
+{
+ int fd;
+
+ fd = open(path, flags);
+ if (fd < 0) {
+ print_skip("%s not available (errno: %d)", path, errno);
+ exit(KSFT_SKIP);
+ }
+
+ return fd;
+}
+
/*
* Open KVM_DEV_PATH if available, otherwise exit the entire program.
*
@@ -42,16 +55,7 @@ static void *align(void *x, size_t size)
*/
static int _open_kvm_dev_path_or_exit(int flags)
{
- int fd;
-
- fd = open(KVM_DEV_PATH, flags);
- if (fd < 0) {
- print_skip("%s not available, is KVM loaded? (errno: %d)",
- KVM_DEV_PATH, errno);
- exit(KSFT_SKIP);
- }
-
- return fd;
+ return open_path_or_exit(KVM_DEV_PATH, flags);
}
int open_kvm_dev_path_or_exit(void)
diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c
index 161eba7cd1289b..0ebc03ce079ca7 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c
@@ -13,6 +13,8 @@
#include "processor.h"
#include "svm_util.h"
+#define SEV_DEV_PATH "/dev/sev"
+
struct gpr64_regs guest_regs;
u64 rflags;
@@ -172,3 +174,14 @@ void nested_svm_check_supported(void)
exit(KSFT_SKIP);
}
}
+
+/*
+ * Open SEV_DEV_PATH if available, otherwise exit the entire program.
+ *
+ * Return:
+ * The opened file descriptor of /dev/sev.
+ */
+int open_sev_dev_path_or_exit(void)
+{
+ return open_path_or_exit(SEV_DEV_PATH, 0);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
new file mode 100644
index 00000000000000..5ba325cd64bfd8
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kvm.h>
+#include <linux/psp-sev.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <pthread.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "kselftest.h"
+#include "../lib/kvm_util_internal.h"
+
+#define SEV_POLICY_ES 0b100
+
+#define NR_MIGRATE_TEST_VCPUS 4
+#define NR_MIGRATE_TEST_VMS 3
+#define NR_LOCK_TESTING_THREADS 3
+#define NR_LOCK_TESTING_ITERATIONS 10000
+
+static void sev_ioctl(int vm_fd, int cmd_id, void *data)
+{
+ struct kvm_sev_cmd cmd = {
+ .id = cmd_id,
+ .data = (uint64_t)data,
+ .sev_fd = open_sev_dev_path_or_exit(),
+ };
+ int ret;
+
+ ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
+ TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS),
+ "%d failed: return code: %d, errno: %d, fw error: %d",
+ cmd_id, ret, errno, cmd.error);
+}
+
+static struct kvm_vm *sev_vm_create(bool es)
+{
+ struct kvm_vm *vm;
+ struct kvm_sev_launch_start start = { 0 };
+ int i;
+
+ vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
+ for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
+ vm_vcpu_add(vm, i);
+ if (es)
+ start.policy |= SEV_POLICY_ES;
+ sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
+ if (es)
+ sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
+ return vm;
+}
+
+static struct kvm_vm *__vm_create(void)
+{
+ struct kvm_vm *vm;
+ int i;
+
+ vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
+ vm_vcpu_add(vm, i);
+
+ return vm;
+}
+
+static int __sev_migrate_from(int dst_fd, int src_fd)
+{
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM,
+ .args = { src_fd }
+ };
+
+ return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
+}
+
+
+static void sev_migrate_from(int dst_fd, int src_fd)
+{
+ int ret;
+
+ ret = __sev_migrate_from(dst_fd, src_fd);
+ TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno);
+}
+
+static void test_sev_migrate_from(bool es)
+{
+ struct kvm_vm *src_vm;
+ struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
+ int i;
+
+ src_vm = sev_vm_create(es);
+ for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
+ dst_vms[i] = __vm_create();
+
+ /* Initial migration from the src to the first dst. */
+ sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
+
+ for (i = 1; i < NR_MIGRATE_TEST_VMS; i++)
+ sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
+
+ /* Migrate the guest back to the original VM. */
+ sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+
+ kvm_vm_free(src_vm);
+ for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
+ kvm_vm_free(dst_vms[i]);
+}
+
+struct locking_thread_input {
+ struct kvm_vm *vm;
+ int source_fds[NR_LOCK_TESTING_THREADS];
+};
+
+static void *locking_test_thread(void *arg)
+{
+ int i, j;
+ struct locking_thread_input *input = (struct locking_thread_input *)arg;
+
+ for (i = 0; i < NR_LOCK_TESTING_ITERATIONS; ++i) {
+ j = i % NR_LOCK_TESTING_THREADS;
+ __sev_migrate_from(input->vm->fd, input->source_fds[j]);
+ }
+
+ return NULL;
+}
+
+static void test_sev_migrate_locking(void)
+{
+ struct locking_thread_input input[NR_LOCK_TESTING_THREADS];
+ pthread_t pt[NR_LOCK_TESTING_THREADS];
+ int i;
+
+ for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) {
+ input[i].vm = sev_vm_create(/* es= */ false);
+ input[0].source_fds[i] = input[i].vm->fd;
+ }
+ for (i = 1; i < NR_LOCK_TESTING_THREADS; ++i)
+ memcpy(input[i].source_fds, input[0].source_fds,
+ sizeof(input[i].source_fds));
+
+ for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
+ pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
+
+ for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
+ pthread_join(pt[i], NULL);
+}
+
+static void test_sev_migrate_parameters(void)
+{
+ struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_no_sev,
+ *sev_es_vm_no_vmsa;
+ int ret;
+
+ sev_vm = sev_vm_create(/* es= */ false);
+ sev_es_vm = sev_vm_create(/* es= */ true);
+ vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ vm_no_sev = __vm_create();
+ sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
+ vm_vcpu_add(sev_es_vm_no_vmsa, 1);
+
+
+ ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_migrate_from(sev_es_vm->fd, sev_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm_no_vmsa->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_migrate_from(vm_no_vcpu->fd, vm_no_sev->fd);
+ TEST_ASSERT(ret == -1 && errno == EINVAL,
+ "Migrations require SEV enabled. ret %d, errno: %d\n", ret,
+ errno);
+}
+
+int main(int argc, char *argv[])
+{
+ test_sev_migrate_from(/* es= */ false);
+ test_sev_migrate_from(/* es= */ true);
+ test_sev_migrate_locking();
+ test_sev_migrate_parameters();
+ return 0;
+}
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index aee76d1bb9da1b..7615f29831ebd6 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -12,7 +12,7 @@ TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_a
TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh
-TEST_PROGS += altnames.sh icmp_redirect.sh ip6_gre_headroom.sh
+TEST_PROGS += altnames.sh icmp.sh icmp_redirect.sh ip6_gre_headroom.sh
TEST_PROGS += route_localnet.sh
TEST_PROGS += reuseaddr_ports_exhausted.sh
TEST_PROGS += txtimestamp.sh
@@ -30,7 +30,12 @@ TEST_PROGS += ioam6.sh
TEST_PROGS += gro.sh
TEST_PROGS += gre_gso.sh
TEST_PROGS += cmsg_so_mark.sh
-TEST_PROGS_EXTENDED := in_netns.sh
+TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
+TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
+TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
+TEST_PROGS += vrf_strict_mode_test.sh
+TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
+TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
index f8cda822c1cec3..1b27f2b0f19606 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
@@ -80,7 +80,7 @@ test_gretap()
test_ip6gretap()
{
- test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ipv6' \
"mirror to ip6gretap"
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
index 472bd023e2a5fb..aff88f78e33915 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -74,7 +74,7 @@ test_span_gre_ttl()
mirror_install $swp1 ingress $tundev "matchall $tcflags"
tc filter add dev $h3 ingress pref 77 prot $prot \
- flower ip_ttl 50 action pass
+ flower skip_hw ip_ttl 50 action pass
mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 0
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index 880e3ab9d088df..c8a9b5bd841fe0 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -141,7 +141,7 @@ test_gretap()
test_ip6gretap()
{
- test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ipv6' \
"mirror to ip6gretap"
}
@@ -218,6 +218,7 @@ test_ip6gretap_forbidden_egress()
test_span_gre_untagged_egress()
{
local tundev=$1; shift
+ local ul_proto=$1; shift
local what=$1; shift
RET=0
@@ -225,7 +226,7 @@ test_span_gre_untagged_egress()
mirror_install $swp1 ingress $tundev "matchall $tcflags"
quick_test_span_gre_dir $tundev ingress
- quick_test_span_vlan_dir $h3 555 ingress
+ quick_test_span_vlan_dir $h3 555 ingress "$ul_proto"
h3_addr_add_del del $h3.555
bridge vlan add dev $swp3 vid 555 pvid untagged
@@ -233,7 +234,7 @@ test_span_gre_untagged_egress()
sleep 5
quick_test_span_gre_dir $tundev ingress
- fail_test_span_vlan_dir $h3 555 ingress
+ fail_test_span_vlan_dir $h3 555 ingress "$ul_proto"
h3_addr_add_del del $h3
bridge vlan add dev $swp3 vid 555
@@ -241,7 +242,7 @@ test_span_gre_untagged_egress()
sleep 5
quick_test_span_gre_dir $tundev ingress
- quick_test_span_vlan_dir $h3 555 ingress
+ quick_test_span_vlan_dir $h3 555 ingress "$ul_proto"
mirror_uninstall $swp1 ingress
@@ -250,12 +251,12 @@ test_span_gre_untagged_egress()
test_gretap_untagged_egress()
{
- test_span_gre_untagged_egress gt4 "mirror to gretap"
+ test_span_gre_untagged_egress gt4 ip "mirror to gretap"
}
test_ip6gretap_untagged_egress()
{
- test_span_gre_untagged_egress gt6 "mirror to ip6gretap"
+ test_span_gre_untagged_egress gt6 ipv6 "mirror to ip6gretap"
}
test_span_gre_fdb_roaming()
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index 6406cd76a19d8f..3e8ebeff3019ff 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -115,13 +115,14 @@ do_test_span_vlan_dir_ips()
local dev=$1; shift
local vid=$1; shift
local direction=$1; shift
+ local ul_proto=$1; shift
local ip1=$1; shift
local ip2=$1; shift
# Install the capture as skip_hw to avoid double-counting of packets.
# The traffic is meant for local box anyway, so will be trapped to
# kernel.
- vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype ip"
+ vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype $ul_proto"
mirror_test v$h1 $ip1 $ip2 $dev 100 $expect
mirror_test v$h2 $ip2 $ip1 $dev 100 $expect
vlan_capture_uninstall $dev
diff --git a/tools/testing/selftests/net/forwarding/mirror_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_vlan.sh
index 9ab2ce77b332cb..0b44e148235e01 100755
--- a/tools/testing/selftests/net/forwarding/mirror_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_vlan.sh
@@ -85,9 +85,9 @@ test_tagged_vlan_dir()
RET=0
mirror_install $swp1 $direction $swp3.555 "matchall $tcflags"
- do_test_span_vlan_dir_ips 10 "$h3.555" 111 "$direction" \
+ do_test_span_vlan_dir_ips 10 "$h3.555" 111 "$direction" ip \
192.0.2.17 192.0.2.18
- do_test_span_vlan_dir_ips 0 "$h3.555" 555 "$direction" \
+ do_test_span_vlan_dir_ips 0 "$h3.555" 555 "$direction" ip \
192.0.2.17 192.0.2.18
mirror_uninstall $swp1 $direction
diff --git a/tools/testing/selftests/net/gre_gso.sh b/tools/testing/selftests/net/gre_gso.sh
index facbb0c8044396..fdeb44d621eb93 100755
--- a/tools/testing/selftests/net/gre_gso.sh
+++ b/tools/testing/selftests/net/gre_gso.sh
@@ -116,17 +116,18 @@ gre_gst_test_checks()
{
local name=$1
local addr=$2
+ local proto=$3
- $NS_EXEC nc -kl $port >/dev/null &
+ $NS_EXEC nc $proto -kl $port >/dev/null &
PID=$!
while ! $NS_EXEC ss -ltn | grep -q $port; do ((i++)); sleep 0.01; done
- cat $TMPFILE | timeout 1 nc $addr $port
+ cat $TMPFILE | timeout 1 nc $proto -N $addr $port
log_test $? 0 "$name - copy file w/ TSO"
ethtool -K veth0 tso off
- cat $TMPFILE | timeout 1 nc $addr $port
+ cat $TMPFILE | timeout 1 nc $proto -N $addr $port
log_test $? 0 "$name - copy file w/ GSO"
ethtool -K veth0 tso on
@@ -155,7 +156,7 @@ gre6_gso_test()
sleep 2
gre_gst_test_checks GREv6/v4 172.16.2.2
- gre_gst_test_checks GREv6/v6 2001:db8:1::2
+ gre_gst_test_checks GREv6/v6 2001:db8:1::2 -6
cleanup
}
diff --git a/tools/testing/selftests/net/reuseport_bpf_numa.c b/tools/testing/selftests/net/reuseport_bpf_numa.c
index c9f478b40996d3..b2eebf669b8cfe 100644
--- a/tools/testing/selftests/net/reuseport_bpf_numa.c
+++ b/tools/testing/selftests/net/reuseport_bpf_numa.c
@@ -211,12 +211,16 @@ static void test(int *rcv_fd, int len, int family, int proto)
/* Forward iterate */
for (node = 0; node < len; ++node) {
+ if (!numa_bitmask_isbitset(numa_nodes_ptr, node))
+ continue;
send_from_node(node, family, proto);
receive_on_node(rcv_fd, len, epfd, node, proto);
}
/* Reverse iterate */
for (node = len - 1; node >= 0; --node) {
+ if (!numa_bitmask_isbitset(numa_nodes_ptr, node))
+ continue;
send_from_node(node, family, proto);
receive_on_node(rcv_fd, len, epfd, node, proto);
}
diff --git a/tools/testing/selftests/net/test_vxlan_under_vrf.sh b/tools/testing/selftests/net/test_vxlan_under_vrf.sh
index 534c8b7699ab98..ea5a7a808f120a 100755
--- a/tools/testing/selftests/net/test_vxlan_under_vrf.sh
+++ b/tools/testing/selftests/net/test_vxlan_under_vrf.sh
@@ -101,6 +101,8 @@ setup-vm() {
ip -netns hv-$id link set veth-tap master br0
ip -netns hv-$id link set veth-tap up
+ ip link set veth-hv address 02:1d:8d:dd:0c:6$id
+
ip link set veth-hv netns vm-$id
ip -netns vm-$id addr add 10.0.0.$id/24 dev veth-hv
ip -netns vm-$id link set veth-hv up
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index d3047e251fe938..e61fc4c32ba23a 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -654,7 +654,6 @@ TEST_F(tls, recvmsg_single_max)
TEST_F(tls, recvmsg_multiple)
{
unsigned int msg_iovlen = 1024;
- unsigned int len_compared = 0;
struct iovec vec[1024];
char *iov_base[1024];
unsigned int iov_len = 16;
@@ -675,8 +674,6 @@ TEST_F(tls, recvmsg_multiple)
hdr.msg_iovlen = msg_iovlen;
hdr.msg_iov = vec;
EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
- for (i = 0; i < msg_iovlen; i++)
- len_compared += iov_len;
for (i = 0; i < msg_iovlen; i++)
free(iov_base[i]);
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index 76a24052f4b476..6a193425c367fc 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -293,19 +293,17 @@ static void usage(const char *filepath)
static void parse_opts(int argc, char **argv)
{
+ const char *bind_addr = NULL;
int c;
- /* bind to any by default */
- setup_sockaddr(PF_INET6, "::", &cfg_bind_addr);
while ((c = getopt(argc, argv, "4b:C:Gl:n:p:rR:S:tv")) != -1) {
switch (c) {
case '4':
cfg_family = PF_INET;
cfg_alen = sizeof(struct sockaddr_in);
- setup_sockaddr(PF_INET, "0.0.0.0", &cfg_bind_addr);
break;
case 'b':
- setup_sockaddr(cfg_family, optarg, &cfg_bind_addr);
+ bind_addr = optarg;
break;
case 'C':
cfg_connect_timeout_ms = strtoul(optarg, NULL, 0);
@@ -341,6 +339,11 @@ static void parse_opts(int argc, char **argv)
}
}
+ if (!bind_addr)
+ bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0";
+
+ setup_sockaddr(cfg_family, bind_addr, &cfg_bind_addr);
+
if (optind != argc)
usage(argv[0]);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3f6d450355f062..d3172450050184 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3747,7 +3747,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_fpu *fpu = NULL;
struct kvm_sregs *kvm_sregs = NULL;
- if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
+ if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
return -EIO;
if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
@@ -3957,7 +3957,7 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
void __user *argp = compat_ptr(arg);
int r;
- if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
+ if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
return -EIO;
switch (ioctl) {
@@ -4023,7 +4023,7 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
{
struct kvm_device *dev = filp->private_data;
- if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged)
+ if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
return -EIO;
switch (ioctl) {
@@ -4345,7 +4345,7 @@ static long kvm_vm_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
int r;
- if (kvm->mm != current->mm || kvm->vm_bugged)
+ if (kvm->mm != current->mm || kvm->vm_dead)
return -EIO;
switch (ioctl) {
case KVM_CREATE_VCPU:
@@ -4556,7 +4556,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
struct kvm *kvm = filp->private_data;
int r;
- if (kvm->mm != current->mm || kvm->vm_bugged)
+ if (kvm->mm != current->mm || kvm->vm_dead)
return -EIO;
switch (ioctl) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT