diff --git a/include/kernel-defaults.mk b/include/kernel-defaults.mk index 5a061a5178..af1d59c481 100644 --- a/include/kernel-defaults.mk +++ b/include/kernel-defaults.mk @@ -174,13 +174,14 @@ ifeq ($(CONFIG_TARGET_ROOTFS_INITRAMFS_SEPARATE),y) ifneq ($(qstrip $(CONFIG_EXTERNAL_CPIO)),) $(CP) $(CONFIG_EXTERNAL_CPIO) $(KERNEL_BUILD_DIR)/initrd.cpio else - ( cd $(TARGET_DIR); find . | $(STAGING_DIR_HOST)/bin/cpio -o -H newc -R 0:0 > $(KERNEL_BUILD_DIR)/initrd.cpio ) + ( cd $(TARGET_DIR); find . | LC_ALL=C sort | $(STAGING_DIR_HOST)/bin/cpio --reproducible -o -H newc -R 0:0 > $(KERNEL_BUILD_DIR)/initrd.cpio ) endif + $(if $(SOURCE_DATE_EPOCH),touch -hcd "@$(SOURCE_DATE_EPOCH)" $(KERNEL_BUILD_DIR)/initrd.cpio) $(if $(CONFIG_TARGET_INITRAMFS_COMPRESSION_BZIP2),bzip2 -9 -c < $(KERNEL_BUILD_DIR)/initrd.cpio > $(KERNEL_BUILD_DIR)/initrd.cpio.bzip2) - $(if $(CONFIG_TARGET_INITRAMFS_COMPRESSION_GZIP),gzip -f -S .gzip -9n $(KERNEL_BUILD_DIR)/initrd.cpio) + $(if $(CONFIG_TARGET_INITRAMFS_COMPRESSION_GZIP),gzip -n -f -S .gzip -9n $(KERNEL_BUILD_DIR)/initrd.cpio) $(if $(CONFIG_TARGET_INITRAMFS_COMPRESSION_LZMA),$(STAGING_DIR_HOST)/bin/lzma e -lc1 -lp2 -pb2 $(KERNEL_BUILD_DIR)/initrd.cpio $(KERNEL_BUILD_DIR)/initrd.cpio.lzma) # ? $(if $(CONFIG_TARGET_INITRAMFS_COMPRESSION_LZO),) - $(if $(CONFIG_TARGET_INITRAMFS_COMPRESSION_XZ),$(STAGING_DIR_HOST)/bin/xz -9 -fz --check=crc32 $(KERNEL_BUILD_DIR)/initrd.cpio) + $(if $(CONFIG_TARGET_INITRAMFS_COMPRESSION_XZ),$(STAGING_DIR_HOST)/bin/xz -T0 -9 -fz --check=crc32 $(KERNEL_BUILD_DIR)/initrd.cpio) # ? $(if $(CONFIG_TARGET_INITRAMFS_COMPRESSION_LZ4),) $(if $(CONFIG_TARGET_INITRAMFS_COMPRESSION_ZSTD),$(STAGING_DIR_HOST)/bin/zstd -T0 -f -o $(KERNEL_BUILD_DIR)/initrd.cpio.zstd $(KERNEL_BUILD_DIR)/initrd.cpio) endif diff --git a/package/libs/openssl/engine.mk b/include/openssl-engine.mk similarity index 93% rename from package/libs/openssl/engine.mk rename to include/openssl-engine.mk index 973a989904..d8baba482e 100644 --- a/package/libs/openssl/engine.mk +++ b/include/openssl-engine.mk @@ -1,3 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2022 Enéas Ulir de Queiroz + ENGINES_DIR=engines-1.1 define Package/openssl/engine/Default diff --git a/package/base-files/files/lib/upgrade/common.sh b/package/base-files/files/lib/upgrade/common.sh index 24ff77a8b3..5af061f6a4 100644 --- a/package/base-files/files/lib/upgrade/common.sh +++ b/package/base-files/files/lib/upgrade/common.sh @@ -155,9 +155,11 @@ export_bootdevice() { fi done ;; + PARTUUID=????????-????-????-????-??????????0?/PARTNROFF=1 | \ PARTUUID=????????-????-????-????-??????????02) uuid="${rootpart#PARTUUID=}" - uuid="${uuid%02}00" + uuid="${uuid%/PARTNROFF=1}" + uuid="${uuid%0?}00" for disk in $(find /dev -type b); do set -- $(dd if=$disk bs=1 skip=568 count=16 2>/dev/null | hexdump -v -e '8/1 "%02x "" "2/1 "%02x""-"6/1 "%02x"') if [ "$4$3$2$1-$6$5-$8$7-$9" = "$uuid" ]; then diff --git a/package/boot/uboot-envtools/files/oxnas b/package/boot/uboot-envtools/files/oxnas index bf872b697d..bd407c67d5 100644 --- a/package/boot/uboot-envtools/files/oxnas +++ b/package/boot/uboot-envtools/files/oxnas @@ -14,7 +14,7 @@ board=$(board_name) case "$board" in "cloudengines,pogoplug"*|\ "shuttle,kd20") - ubootenv_add_uci_config "/dev/mtd2" "0x0" "0x2000" "0x2000" "1" + ubootenv_add_uci_config "/dev/mtd2" "0x0" "0x2000" "0x20000" "1" ;; "mitrastar,stg-212") ubootenv_add_uci_config "/dev/mtd2" "0x0" "0x20000" "0x20000" "1" diff --git a/package/boot/uboot-mediatek/patches/280-image-fdt-save-name-of-FIT-configuration-in-chosen-node.patch b/package/boot/uboot-mediatek/patches/280-image-fdt-save-name-of-FIT-configuration-in-chosen-node.patch new file mode 100644 index 0000000000..76e272a2c9 --- /dev/null +++ b/package/boot/uboot-mediatek/patches/280-image-fdt-save-name-of-FIT-configuration-in-chosen-node.patch @@ -0,0 +1,60 @@ +From patchwork Mon Mar 21 23:22:23 2022 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +X-Patchwork-Submitter: Daniel Golle +X-Patchwork-Id: 1607954 +Return-Path: +X-Original-To: incoming@patchwork.ozlabs.org +Delivered-To: patchwork-incoming@bilbo.ozlabs.org +Date: Mon, 21 Mar 2022 23:22:23 +0000 +From: Daniel Golle +To: u-boot@lists.denx.de +Cc: Simon Glass , Alexandru Gagniuc , + Patrick Delaunay , + Heinrich Schuchardt +Subject: [PATCH] image-fdt: save name of FIT configuration in '/chosen' node +Message-ID: +MIME-Version: 1.0 +Content-Disposition: inline +X-BeenThere: u-boot@lists.denx.de +X-Mailman-Version: 2.1.39 +Precedence: list +List-Id: U-Boot discussion +List-Unsubscribe: , + +List-Archive: +List-Post: +List-Help: +List-Subscribe: , + +Errors-To: u-boot-bounces@lists.denx.de +Sender: "U-Boot" + +It can be useful for the OS (Linux) to know which configuration has +been chosen by U-Boot when launching a FIT image. +Store the name of the FIT configuration node used in a new string +attribute called 'bootconf' in the '/chosen' node in device tree. + +Signed-off-by: Daniel Golle +--- + boot/image-fdt.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/boot/image-fdt.c b/boot/image-fdt.c +index 692a9ad3e4..4017bc94a6 100644 +--- a/boot/image-fdt.c ++++ b/boot/image-fdt.c +@@ -601,6 +601,12 @@ int image_setup_libfdt(bootm_headers_t *images, void *blob, + goto err; + } + ++ /* Store name of configuration node as bootconf in /chosen node */ ++ if (images->fit_uname_cfg) ++ fdt_find_and_setprop(blob, "/chosen", "bootconf", ++ images->fit_uname_cfg, ++ strlen(images->fit_uname_cfg) + 1, 1); ++ + /* Update ethernet nodes */ + fdt_fixup_ethernet(blob); + #if CONFIG_IS_ENABLED(CMD_PSTORE) diff --git a/package/boot/uboot-mvebu/patches/013-mmc-xenon_sdhci-remove-wait_dat0-SDHCI-OP.patch b/package/boot/uboot-mvebu/patches/013-mmc-xenon_sdhci-remove-wait_dat0-SDHCI-OP.patch new file mode 100644 index 0000000000..3277638784 --- /dev/null +++ b/package/boot/uboot-mvebu/patches/013-mmc-xenon_sdhci-remove-wait_dat0-SDHCI-OP.patch @@ -0,0 +1,64 @@ +From 0f3466f52fbacce67e147b9234e6323edff26a6d Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Fri, 11 Mar 2022 19:14:07 +0100 +Subject: [PATCH] mmc: xenon_sdhci: remove wait_dat0 SDHCI OP +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Generic SDHCI driver received support for checking the busy status by +polling the DAT[0] level instead of waiting for the worst MMC switch time. + +Unfortunately, it appears that this does not work for Xenon controllers +despite being a part of the standard SDHCI registers and the Armada 3720 +datasheet itself telling that BIT(20) is useful for detecting the DAT[0] +busy signal. + +I have tried increasing the timeout value, but I have newer managed to +catch DAT_LEVEL bits change from 0 at all. + +This issue appears to hit most if not all SoC-s supported by Xenon driver, +at least A3720, A8040 and CN9130 have non working eMMC currently. + +So, until a better solution is found drop the wait_dat0 OP for Xenon. +I was able to only test it on A3720, but it should work for others as well. + +Fixes: 40e6f52454fc ("drivers: mmc: Add wait_dat0 support for sdhci driver") +Signed-off-by: Robert Marko +Reviewed-by: Marek Behún +Reviewed-by: Jaehoon Chung +Reviewed-by: Stefan Roese +--- + drivers/mmc/xenon_sdhci.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +--- a/drivers/mmc/xenon_sdhci.c ++++ b/drivers/mmc/xenon_sdhci.c +@@ -439,6 +439,8 @@ static const struct sdhci_ops xenon_sdhc + .set_ios_post = xenon_sdhci_set_ios_post + }; + ++static struct dm_mmc_ops xenon_mmc_ops; ++ + static int xenon_sdhci_probe(struct udevice *dev) + { + struct xenon_sdhci_plat *plat = dev_get_plat(dev); +@@ -452,6 +454,9 @@ static int xenon_sdhci_probe(struct udev + host->mmc->dev = dev; + upriv->mmc = host->mmc; + ++ xenon_mmc_ops = sdhci_ops; ++ xenon_mmc_ops.wait_dat0 = NULL; ++ + /* Set quirks */ + host->quirks = SDHCI_QUIRK_WAIT_SEND_CMD | SDHCI_QUIRK_32BIT_DMA_ADDR; + +@@ -568,7 +573,7 @@ U_BOOT_DRIVER(xenon_sdhci_drv) = { + .id = UCLASS_MMC, + .of_match = xenon_sdhci_ids, + .of_to_plat = xenon_sdhci_of_to_plat, +- .ops = &sdhci_ops, ++ .ops = &xenon_mmc_ops, + .bind = xenon_sdhci_bind, + .probe = xenon_sdhci_probe, + .remove = xenon_sdhci_remove, diff --git a/package/firmware/cypress-firmware/Makefile b/package/firmware/cypress-firmware/Makefile index 4fef8522cd..c5f41e0cb4 100644 --- a/package/firmware/cypress-firmware/Makefile +++ b/package/firmware/cypress-firmware/Makefile @@ -208,42 +208,6 @@ endef $(eval $(call BuildPackage,cypress-firmware-43570-pcie)) -# Cypress 4359 PCIe Firmware -define Package/cypress-firmware-4359-pcie - $(Package/cypress-firmware-default) - TITLE:=CYW4359 FullMac PCIe firmware -endef - -define Package/cypress-firmware-4359-pcie/install - $(INSTALL_DIR) $(1)/lib/firmware/brcm - $(INSTALL_DATA) \ - $(PKG_BUILD_DIR)/firmware/cyfmac4359-pcie.bin \ - $(1)/lib/firmware/brcm/brcmfmac4359-pcie.bin - $(INSTALL_DATA) \ - $(PKG_BUILD_DIR)/firmware/cyfmac4359-pcie.clm_blob \ - $(1)/lib/firmware/brcm/brcmfmac4359-pcie.clm_blob -endef - -$(eval $(call BuildPackage,cypress-firmware-4359-pcie)) - -# Cypress 4359 SDIO Firmware -define Package/cypress-firmware-4359-sdio - $(Package/cypress-firmware-default) - TITLE:=CYW4359 FullMac SDIO firmware -endef - -define Package/cypress-firmware-4359-sdio/install - $(INSTALL_DIR) $(1)/lib/firmware/brcm - $(INSTALL_DATA) \ - $(PKG_BUILD_DIR)/firmware/cyfmac4359-sdio.bin \ - $(1)/lib/firmware/brcm/brcmfmac4359-sdio.bin - $(INSTALL_DATA) \ - $(PKG_BUILD_DIR)/firmware/cyfmac4359-sdio.clm_blob \ - $(1)/lib/firmware/brcm/brcmfmac4359-sdio.clm_blob -endef - -$(eval $(call BuildPackage,cypress-firmware-4359-sdio)) - # Cypress 4373 SDIO Firmware define Package/cypress-firmware-4373-sdio $(Package/cypress-firmware-default) @@ -297,21 +261,3 @@ define Package/cypress-firmware-54591-pcie/install endef $(eval $(call BuildPackage,cypress-firmware-54591-pcie)) - -# Cypress 89459 PCIe Firmware -define Package/cypress-firmware-89459-pcie - $(Package/cypress-firmware-default) - TITLE:=CYW89459 FullMac PCIe firmware -endef - -define Package/cypress-firmware-89459-pcie/install - $(INSTALL_DIR) $(1)/lib/firmware/brcm - $(INSTALL_DATA) \ - $(PKG_BUILD_DIR)/firmware/cyfmac89459-pcie.bin \ - $(1)/lib/firmware/brcm/brcmfmac89459-pcie.bin - $(INSTALL_DATA) \ - $(PKG_BUILD_DIR)/firmware/cyfmac89459-pcie.clm_blob \ - $(1)/lib/firmware/brcm/brcmfmac89459-pcie.clm_blob -endef - -$(eval $(call BuildPackage,cypress-firmware-89459-pcie)) diff --git a/package/firmware/ipq-wifi/Makefile b/package/firmware/ipq-wifi/Makefile index 9842725be2..b3b04c6abf 100644 --- a/package/firmware/ipq-wifi/Makefile +++ b/package/firmware/ipq-wifi/Makefile @@ -42,6 +42,7 @@ ALLWIFIBOARDS:= \ glinet_gl-ap1300 \ glinet_gl-b2200 \ glinet_gl-s1300 \ + google_wifi \ linksys_ea8300 \ linksys_mr8300-v0 \ luma_wrtq-329acn \ @@ -54,7 +55,8 @@ ALLWIFIBOARDS:= \ p2w_r619ac \ plasmacloud_pa1200 \ plasmacloud_pa2200 \ - qxwlan_e2600ac \ + qxwlan_e2600ac-c1 \ + qxwlan_e2600ac-c2 \ teltonika_rutx \ zte_mf286d @@ -134,6 +136,7 @@ $(eval $(call generate-ipq-wifi-package,ezviz_cs-w3-wd1200g-eup,EZVIZ CS-W3-WD12 $(eval $(call generate-ipq-wifi-package,glinet_gl-ap1300,GL.iNet GL-AP1300)) $(eval $(call generate-ipq-wifi-package,glinet_gl-b2200,GL.iNet GL-B2200)) $(eval $(call generate-ipq-wifi-package,glinet_gl-s1300,GL.iNet GL-S1300)) +$(eval $(call generate-ipq-wifi-package,google_wifi,Google WiFi)) $(eval $(call generate-ipq-wifi-package,linksys_ea8300,Linksys EA8300)) $(eval $(call generate-ipq-wifi-package,linksys_mr8300-v0,Linksys MR8300)) $(eval $(call generate-ipq-wifi-package,luma_wrtq-329acn,Luma WRTQ-329ACN)) @@ -146,7 +149,8 @@ $(eval $(call generate-ipq-wifi-package,nec_wg2600hp3,NEC Platforms WG2600HP3)) $(eval $(call generate-ipq-wifi-package,p2w_r619ac,P&W R619AC)) $(eval $(call generate-ipq-wifi-package,plasmacloud_pa1200,Plasma Cloud PA1200)) $(eval $(call generate-ipq-wifi-package,plasmacloud_pa2200,Plasma Cloud PA2200)) -$(eval $(call generate-ipq-wifi-package,qxwlan_e2600ac,Qxwlan E2600AC)) +$(eval $(call generate-ipq-wifi-package,qxwlan_e2600ac-c1,Qxwlan E2600AC C1)) +$(eval $(call generate-ipq-wifi-package,qxwlan_e2600ac-c2,Qxwlan E2600AC C2)) $(eval $(call generate-ipq-wifi-package,teltonika_rutx,Teltonika RUTX)) $(eval $(call generate-ipq-wifi-package,zte_mf286d,ZTE MF286D)) diff --git a/package/firmware/ipq-wifi/board-google_wifi.qca4019 b/package/firmware/ipq-wifi/board-google_wifi.qca4019 new file mode 100644 index 0000000000..1c10731f0d Binary files /dev/null and b/package/firmware/ipq-wifi/board-google_wifi.qca4019 differ diff --git a/package/firmware/ipq-wifi/board-qxwlan_e2600ac-c1.qca4019 b/package/firmware/ipq-wifi/board-qxwlan_e2600ac-c1.qca4019 new file mode 100644 index 0000000000..9eafff1aaa Binary files /dev/null and b/package/firmware/ipq-wifi/board-qxwlan_e2600ac-c1.qca4019 differ diff --git a/package/firmware/ipq-wifi/board-qxwlan_e2600ac-c2.qca4019 b/package/firmware/ipq-wifi/board-qxwlan_e2600ac-c2.qca4019 new file mode 100644 index 0000000000..256f3e551a Binary files /dev/null and b/package/firmware/ipq-wifi/board-qxwlan_e2600ac-c2.qca4019 differ diff --git a/package/kernel/lantiq/ltq-vdsl-mei/Makefile b/package/kernel/lantiq/ltq-vdsl-mei/Makefile index 796918549a..b7060eb2b4 100644 --- a/package/kernel/lantiq/ltq-vdsl-mei/Makefile +++ b/package/kernel/lantiq/ltq-vdsl-mei/Makefile @@ -9,7 +9,7 @@ include $(INCLUDE_DIR)/kernel.mk PKG_NAME:=ltq-vdsl-vr9-mei PKG_VERSION:=1.5.17.6 -PKG_RELEASE:=4 +PKG_RELEASE:=6 PKG_BASE_NAME:=drv_mei_cpe PKG_SOURCE:=$(PKG_BASE_NAME)-$(PKG_VERSION).tar.gz @@ -29,7 +29,7 @@ define KernelPackage/ltq-vdsl-vr9-mei TITLE:=mei driver for vdsl SECTION:=sys SUBMENU:=Network Devices - DEPENDS:=@TARGET_lantiq_xrx200 +kmod-ltq-ifxos + DEPENDS:=@TARGET_lantiq_xrx200 +kmod-ltq-ifxos +kmod-ltq-vectoring FILES:=$(PKG_BUILD_DIR)/src/drv_mei_cpe.ko AUTOLOAD:=$(call AutoLoad,50,drv_mei_cpe) endef diff --git a/package/kernel/lantiq/ltq-vdsl-mei/patches/100-compat.patch b/package/kernel/lantiq/ltq-vdsl-mei/patches/100-compat.patch index 75e1500171..61ea826cb4 100644 --- a/package/kernel/lantiq/ltq-vdsl-mei/patches/100-compat.patch +++ b/package/kernel/lantiq/ltq-vdsl-mei/patches/100-compat.patch @@ -376,17 +376,6 @@ } IFX_int32_t MEI_PLL_ConfigInit(MEI_DEV_T *pMeiDev) ---- a/src/drv_mei_cpe_dsm.c -+++ b/src/drv_mei_cpe_dsm.c -@@ -144,7 +144,7 @@ IFX_void_t MEI_VRX_DSM_DataInit(MEI_DEV_ - memset((IFX_uint8_t *)&pMeiDev->firmwareFeatures, 0x00, sizeof(IOCTL_MEI_firmwareFeatures_t)); - pMeiDev->meiFwDlCount = 0; - -- pMeiDev->meiERBbuf.pCallBackFunc = mei_dsm_cb_func_hook; -+ pMeiDev->meiERBbuf.pCallBackFunc = NULL; - - PRN_DBG_USR_NL( MEI_DRV, MEI_DRV_PRN_LEVEL_NORMAL, - ("MEI_DRV: PP callback function addr = 0x%08X" MEI_DRV_CRLF, --- a/src/drv_mei_cpe_download_vrx.c +++ b/src/drv_mei_cpe_download_vrx.c @@ -3281,12 +3281,14 @@ IFX_int32_t MEI_DEV_IoctlFirmwareDownloa diff --git a/package/kernel/lantiq/ltq-vdsl-mei/patches/200-interrupt-lock.patch b/package/kernel/lantiq/ltq-vdsl-mei/patches/200-interrupt-lock.patch new file mode 100644 index 0000000000..a677ccec55 --- /dev/null +++ b/package/kernel/lantiq/ltq-vdsl-mei/patches/200-interrupt-lock.patch @@ -0,0 +1,45 @@ +--- a/src/drv_mei_cpe_common.c ++++ b/src/drv_mei_cpe_common.c +@@ -104,6 +104,8 @@ IFX_uint32_t MEI_FsmStateSetMsgPreAction + MEI_DEVCFG_DATA_T MEI_DevCfgData; + #endif + ++static DEFINE_SPINLOCK(MEI_InterruptLock); ++ + /* ============================================================================ + Proc-FS and debug variable definitions + ========================================================================= */ +@@ -2134,6 +2136,9 @@ IFX_int32_t MEI_ProcessIntPerIrq(MEIX_CN + #if (MEI_SUPPORT_DEBUG_STREAMS == 1) + IFX_int_t extraDbgStreamLoop = 0; + #endif ++ unsigned long flags; ++ ++ spin_lock_irqsave(&MEI_InterruptLock, flags); + + /* get the actual chip device from the list and step through the VRX devices */ + while(pNextXCntrl) +@@ -2167,6 +2172,8 @@ IFX_int32_t MEI_ProcessIntPerIrq(MEIX_CN + } + #endif + ++ spin_unlock_irqrestore(&MEI_InterruptLock, flags); ++ + return meiIntCnt; + } + +@@ -2639,9 +2646,14 @@ IFX_int32_t MEI_MsgSendPreAction( + */ + IFX_void_t MEI_DisableDeviceInt(MEI_DEV_T *pMeiDev) + { ++ unsigned long flags; ++ spin_lock_irqsave(&MEI_InterruptLock, flags); ++ + MEI_MaskInterrupts( &pMeiDev->meiDrvCntrl, + ME_ARC2ME_INTERRUPT_MASK_ALL); + ++ spin_unlock_irqrestore(&MEI_InterruptLock, flags); ++ + return; + } + diff --git a/package/kernel/lantiq/ltq-vectoring/Makefile b/package/kernel/lantiq/ltq-vectoring/Makefile new file mode 100644 index 0000000000..b7b1b83a07 --- /dev/null +++ b/package/kernel/lantiq/ltq-vectoring/Makefile @@ -0,0 +1,61 @@ +include $(TOPDIR)/rules.mk + +PKG_NAME:=ltq-vectoring +PKG_RELEASE:=$(AUTORELEASE) +PKG_SOURCE_PROTO:=git +PKG_SOURCE_URL:=https://gitlab.com/prpl-foundation/intel/ppa_drv.git +PKG_SOURCE_DATE:=2019-05-20 +PKG_SOURCE_VERSION:=4fa7ac30fcc8ec4eddae9debba5f4230981f469f +PKG_MIRROR_HASH:=444eb823dd9ddd25453976bf7a3230955e4148b8bf92f35f165ecffee32c4555 +PKG_LICENSE:=GPL-2.0 BSD-2-Clause + +MAKE_PATH:=src/vectoring +PKG_EXTMOD_SUBDIRS:=$(MAKE_PATH) + +include $(INCLUDE_DIR)/kernel.mk +include $(INCLUDE_DIR)/package.mk + +define KernelPackage/ltq-vectoring + SECTION:=sys + CATEGORY:=Kernel modules + SUBMENU:=Network Devices + TITLE:=driver for sending vectoring error samples + DEPENDS:=@TARGET_lantiq_xrx200 + FILES:=$(PKG_BUILD_DIR)/$(MAKE_PATH)/ltq_vectoring.ko + AUTOLOAD:=$(call AutoLoad,49,ltq_vectoring) +endef + +define Package/ltq-vectoring/description + This driver is responsible for sending error reports to the vectoring + control entity, which is required for downstream vectoring to work. + + The error reports are generated by the DSL firmware, and passed to this + driver by the MEI driver. +endef + +define KernelPackage/ltq-vectoring-test + SECTION:=sys + CATEGORY:=Kernel modules + SUBMENU:=Network Devices + TITLE:=driver for testing the vectoring driver + DEPENDS:=@TARGET_lantiq_xrx200 +kmod-ltq-vectoring + FILES:=$(PKG_BUILD_DIR)/$(MAKE_PATH)/ltq_vectoring_test.ko +endef + +define Package/ltq-vectoring-test/description + This allows to send dummy data to the vectoring error block callback. + This is only needed for test and development purposes. +endef + +define Build/Configure +endef + +define Build/Compile + +$(MAKE) $(PKG_JOBS) -C "$(LINUX_DIR)" \ + $(KERNEL_MAKE_FLAGS) \ + M="$(PKG_BUILD_DIR)/$(MAKE_PATH)" \ + modules +endef + +$(eval $(call KernelPackage,ltq-vectoring)) +$(eval $(call KernelPackage,ltq-vectoring-test)) diff --git a/package/kernel/lantiq/ltq-vectoring/patches/001-fix-compile.patch b/package/kernel/lantiq/ltq-vectoring/patches/001-fix-compile.patch new file mode 100644 index 0000000000..c97ec4d94b --- /dev/null +++ b/package/kernel/lantiq/ltq-vectoring/patches/001-fix-compile.patch @@ -0,0 +1,95 @@ +--- a/src/vectoring/Makefile ++++ b/src/vectoring/Makefile +@@ -1,5 +1,5 @@ +-obj-$(CONFIG_PTM_VECTORING) += ifxmips_vectoring.o +-obj-y += ifxmips_vectoring_stub.o +-ifeq ($(CONFIG_DSL_MEI_CPE_DRV),) +-obj-$(CONFIG_PTM_VECTORING) += ifxmips_vectoring_test.o +-endif ++obj-m += ltq_vectoring.o ++ltq_vectoring-objs = ifxmips_vectoring.o ifxmips_vectoring_stub.o ++ ++obj-m += ltq_vectoring_test.o ++ltq_vectoring_test-objs = ifxmips_vectoring_test.o +--- a/src/vectoring/ifxmips_vectoring.c ++++ b/src/vectoring/ifxmips_vectoring.c +@@ -30,9 +30,11 @@ + /* + * Common Head File + */ ++#include + #include + #include + #include ++#include + + /* + * Chip Specific Head File +@@ -239,7 +241,7 @@ static int netdev_event_handler(struct n + && event != NETDEV_UNREGISTER ) + return NOTIFY_DONE; + +- netif = (struct net_device *)netdev; ++ netif = netdev_notifier_info_to_dev(netdev); + if ( strcmp(netif->name, "ptm0") != 0 ) + return NOTIFY_DONE; + +@@ -356,6 +358,7 @@ static int proc_write_dbg(struct file *f + return count; + } + ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0) + static struct file_operations g_proc_file_vectoring_dbg_seq_fops = { + .owner = THIS_MODULE, + .open = proc_read_dbg_seq_open, +@@ -364,6 +367,15 @@ static struct file_operations g_proc_fil + .llseek = seq_lseek, + .release = single_release, + }; ++#else ++static struct proc_ops g_proc_file_vectoring_dbg_seq_fops = { ++ .proc_open = proc_read_dbg_seq_open, ++ .proc_read = seq_read, ++ .proc_write = proc_write_dbg, ++ .proc_lseek = seq_lseek, ++ .proc_release = single_release, ++}; ++#endif + + static int proc_read_dbg_seq_open(struct inode *inode, struct file *file) + { +--- a/src/vectoring/ifxmips_vectoring_test.c ++++ b/src/vectoring/ifxmips_vectoring_test.c +@@ -1,6 +1,8 @@ ++#include + #include + #include + #include ++#include + + #include "ifxmips_vectoring_stub.h" + +@@ -82,6 +84,7 @@ static int proc_write_vectoring(struct f + return count; + } + ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0) + static struct file_operations g_proc_file_vectoring_seq_fops = { + .owner = THIS_MODULE, + .open = proc_read_vectoring_seq_open, +@@ -90,6 +93,15 @@ static struct file_operations g_proc_fil + .llseek = seq_lseek, + .release = single_release, + }; ++#else ++static struct proc_ops g_proc_file_vectoring_seq_fops = { ++ .proc_open = proc_read_vectoring_seq_open, ++ .proc_read = seq_read, ++ .proc_write = proc_write_vectoring, ++ .proc_lseek = seq_lseek, ++ .proc_release = single_release, ++}; ++#endif + + static int proc_read_vectoring_seq_open(struct inode *inode, struct file *file) + { diff --git a/package/kernel/lantiq/ltq-vectoring/patches/100-cleanup.patch b/package/kernel/lantiq/ltq-vectoring/patches/100-cleanup.patch new file mode 100644 index 0000000000..2b0067379d --- /dev/null +++ b/package/kernel/lantiq/ltq-vectoring/patches/100-cleanup.patch @@ -0,0 +1,73 @@ +--- a/src/vectoring/ifxmips_vectoring.c ++++ b/src/vectoring/ifxmips_vectoring.c +@@ -325,7 +325,7 @@ static int proc_write_dbg(struct file *f + else + printk(dbg_enable_mask_str[i] + 1); + } +- printk("] > /proc/vectoring\n"); ++ printk("] > /proc/driver/vectoring\n"); + } + + if ( f_enable ) +@@ -433,11 +433,10 @@ static int __init vectoring_init(void) + { + struct proc_dir_entry *res; + +- res = proc_create("vectoring", ++ res = proc_create("driver/vectoring", + S_IRUGO|S_IWUSR, + 0, + &g_proc_file_vectoring_dbg_seq_fops); +- printk("res = %p\n", res); + + register_netdev_event_handler(); + g_ptm_net_dev = dev_get_by_name(&init_net, "ptm0"); +@@ -460,7 +459,7 @@ static void __exit vectoring_exit(void) + + unregister_netdev_event_handler(); + +- remove_proc_entry("vectoring", NULL); ++ remove_proc_entry("driver/vectoring", NULL); + } + + module_init(vectoring_init); +--- a/src/vectoring/ifxmips_vectoring_test.c ++++ b/src/vectoring/ifxmips_vectoring_test.c +@@ -79,7 +79,7 @@ static int proc_write_vectoring(struct f + } + } + else +- printk("echo send > /proc/eth/vectoring\n"); ++ printk("echo send > /proc/driver/vectoring_test\n"); + + return count; + } +@@ -112,9 +112,7 @@ static __init void proc_file_create(void + { + struct proc_dir_entry *res; + +-// g_proc_dir = proc_mkdir("eth", NULL); +- +- res = proc_create("eth/vectoring", ++ res = proc_create("driver/vectoring_test", + S_IRUGO|S_IWUSR, + g_proc_dir, + &g_proc_file_vectoring_seq_fops); +@@ -122,10 +120,7 @@ static __init void proc_file_create(void + + static __exit void proc_file_delete(void) + { +- remove_proc_entry("vectoring", +- g_proc_dir); +- +- remove_proc_entry("eth", NULL); ++ remove_proc_entry("driver/vectoring_test", NULL); + } + + +@@ -151,3 +146,5 @@ static void __exit vectoring_test_exit(v + + module_init(vectoring_test_init); + module_exit(vectoring_test_exit); ++ ++MODULE_LICENSE("GPL"); diff --git a/package/kernel/lantiq/ltq-vectoring/patches/200-compat.patch b/package/kernel/lantiq/ltq-vectoring/patches/200-compat.patch new file mode 100644 index 0000000000..04c6880dde --- /dev/null +++ b/package/kernel/lantiq/ltq-vectoring/patches/200-compat.patch @@ -0,0 +1,120 @@ +--- a/src/vectoring/ifxmips_vectoring.c ++++ b/src/vectoring/ifxmips_vectoring.c +@@ -35,6 +35,8 @@ + #include + #include + #include ++#include ++#include + + /* + * Chip Specific Head File +@@ -88,6 +90,7 @@ static void dump_skb(struct sk_buff *skb + + static void ltq_vectoring_priority(uint32_t priority); + ++static void xmit_work_handler(struct work_struct *); + + /* + * #################################### +@@ -118,9 +121,11 @@ struct erb_head { + + static struct notifier_block g_netdev_event_handler_nb = {0}; + static struct net_device *g_ptm_net_dev = NULL; +-static uint32_t vector_prio = 0; ++static uint32_t vector_prio = TC_PRIO_CONTROL; + static int g_dbg_enable = 0; + ++DECLARE_WORK(xmit_work, xmit_work_handler); ++struct sk_buff_head xmit_queue; + + + /* +@@ -129,9 +134,16 @@ static int g_dbg_enable = 0; + * #################################### + */ + ++static void xmit_work_handler(__attribute__((unused)) struct work_struct *work) { ++ struct sk_buff *skb; ++ ++ while ((skb = skb_dequeue(&xmit_queue)) != NULL) { ++ dev_queue_xmit(skb); ++ } ++} ++ + static int mei_dsm_cb_func(unsigned int *p_error_vector) + { +- int rc, ret; + struct sk_buff *skb_list = NULL; + struct sk_buff *skb; + struct erb_head *erb; +@@ -179,7 +191,6 @@ static int mei_dsm_cb_func(unsigned int + } + } + +- rc = 0; + sent_size = 0; + segment_code = 0; + while ( (skb = skb_list) != NULL ) { +@@ -197,24 +208,23 @@ static int mei_dsm_cb_func(unsigned int + segment_code |= 0xC0; + ((struct erb_head *)skb->data)->segment_code = segment_code; + +- skb->cb[13] = 0x5A; /* magic number indicating forcing QId */ +- skb->cb[15] = 0x00; /* highest priority queue */ +- skb->priority = vector_prio; ++ skb_reset_mac_header(skb); ++ skb_set_network_header(skb, offsetof(struct erb_head, llc_header)); ++ skb->protocol = htons(ETH_P_802_2); ++ skb->priority = vector_prio; + skb->dev = g_ptm_net_dev; + + dump_skb(skb, ~0, "vectoring TX", 0, 0, 1, 0); + +- ret = g_ptm_net_dev->netdev_ops->ndo_start_xmit(skb, g_ptm_net_dev); +- if ( rc == 0 ) +- rc = ret; ++ skb_queue_tail(&xmit_queue, skb); ++ schedule_work(&xmit_work); + + segment_code++; + } + + *p_error_vector = 0; /* notify DSL firmware that ERB is sent */ + +- ASSERT(rc == 0, "dev_queue_xmit fail - %d", rc); +- return rc; ++ return 0; + } + static void ltq_vectoring_priority(uint32_t priority) + { +@@ -242,7 +252,7 @@ static int netdev_event_handler(struct n + return NOTIFY_DONE; + + netif = netdev_notifier_info_to_dev(netdev); +- if ( strcmp(netif->name, "ptm0") != 0 ) ++ if ( strcmp(netif->name, "dsl0") != 0 ) + return NOTIFY_DONE; + + g_ptm_net_dev = event == NETDEV_REGISTER ? netif : NULL; +@@ -438,8 +448,10 @@ static int __init vectoring_init(void) + 0, + &g_proc_file_vectoring_dbg_seq_fops); + ++ skb_queue_head_init(&xmit_queue); ++ + register_netdev_event_handler(); +- g_ptm_net_dev = dev_get_by_name(&init_net, "ptm0"); ++ g_ptm_net_dev = dev_get_by_name(&init_net, "dsl0"); + if ( g_ptm_net_dev != NULL ) + dev_put(g_ptm_net_dev); + +@@ -459,6 +471,8 @@ static void __exit vectoring_exit(void) + + unregister_netdev_event_handler(); + ++ flush_scheduled_work(); ++ + remove_proc_entry("driver/vectoring", NULL); + } + diff --git a/package/kernel/linux/modules/lib.mk b/package/kernel/linux/modules/lib.mk index 965612df3e..c52a8133be 100644 --- a/package/kernel/linux/modules/lib.mk +++ b/package/kernel/linux/modules/lib.mk @@ -109,9 +109,10 @@ define KernelPackage/lib-lzo HIDDEN:=1 FILES:= \ $(LINUX_DIR)/crypto/lzo.ko \ + $(LINUX_DIR)/crypto/lzo-rle.ko \ $(LINUX_DIR)/lib/lzo/lzo_compress.ko \ $(LINUX_DIR)/lib/lzo/lzo_decompress.ko - AUTOLOAD:=$(call AutoProbe,lzo lzo_compress lzo_decompress) + AUTOLOAD:=$(call AutoProbe,lzo lzo-rle lzo_compress lzo_decompress) endef define KernelPackage/lib-lzo/description diff --git a/package/kernel/linux/modules/other.mk b/package/kernel/linux/modules/other.mk index 5ccc860790..d343ff8d28 100644 --- a/package/kernel/linux/modules/other.mk +++ b/package/kernel/linux/modules/other.mk @@ -182,6 +182,32 @@ endef $(eval $(call KernelPackage,eeprom-at25)) +define KernelPackage/google-firmware + SUBMENU:=$(OTHER_MENU) + TITLE:=Google firmware drivers (Coreboot, VPD, Memconsole) + KCONFIG:= \ + CONFIG_GOOGLE_FIRMWARE=y \ + CONFIG_GOOGLE_COREBOOT_TABLE \ + CONFIG_GOOGLE_MEMCONSOLE \ + CONFIG_GOOGLE_MEMCONSOLE_COREBOOT \ + CONFIG_GOOGLE_VPD + FILES:= \ + $(LINUX_DIR)/drivers/firmware/google/coreboot_table.ko \ + $(LINUX_DIR)/drivers/firmware/google/memconsole.ko \ + $(LINUX_DIR)/drivers/firmware/google/memconsole-coreboot.ko \ + $(LINUX_DIR)/drivers/firmware/google/vpd-sysfs.ko + AUTOLOAD:=$(call AutoProbe,coreboot_table memconsole-coreboot vpd-sysfs) +endef + +define KernelPackage/google-firmware/description + Kernel modules for Google firmware drivers. Useful for examining firmware and + boot details on devices using a Google bootloader based on Coreboot. Provides + files like /sys/firmware/log and /sys/firmware/vpd. +endef + +$(eval $(call KernelPackage,google-firmware)) + + define KernelPackage/gpio-f7188x SUBMENU:=$(OTHER_MENU) TITLE:=Fintek F718xx/F818xx GPIO Support diff --git a/package/kernel/mac80211/patches/subsys/327-mac80211-allow-non-standard-VHT-MCS-10-11.patch b/package/kernel/mac80211/patches/subsys/327-mac80211-allow-non-standard-VHT-MCS-10-11.patch new file mode 100644 index 0000000000..7c68e75574 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/327-mac80211-allow-non-standard-VHT-MCS-10-11.patch @@ -0,0 +1,36 @@ +From: Ping-Ke Shih +Date: Mon, 3 Jan 2022 09:36:21 +0800 +Subject: [PATCH] mac80211: allow non-standard VHT MCS-10/11 + +Some AP can possibly try non-standard VHT rate and mac80211 warns and drops +packets, and leads low TCP throughput. + + Rate marked as a VHT rate but data is invalid: MCS: 10, NSS: 2 + WARNING: CPU: 1 PID: 7817 at net/mac80211/rx.c:4856 ieee80211_rx_list+0x223/0x2f0 [mac8021 + +Since commit c27aa56a72b8 ("cfg80211: add VHT rate entries for MCS-10 and MCS-11") +has added, mac80211 adds this support as well. + +After this patch, throughput is good and iw can get the bitrate: + rx bitrate: 975.1 MBit/s VHT-MCS 10 80MHz short GI VHT-NSS 2 +or + rx bitrate: 1083.3 MBit/s VHT-MCS 11 80MHz short GI VHT-NSS 2 + +Buglink: https://bugzilla.suse.com/show_bug.cgi?id=1192891 +Reported-by: Goldwyn Rodrigues +Signed-off-by: Ping-Ke Shih +Link: https://lore.kernel.org/r/20220103013623.17052-1-pkshih@realtek.com +Signed-off-by: Johannes Berg +--- + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -4945,7 +4945,7 @@ void ieee80211_rx_list(struct ieee80211_ + goto drop; + break; + case RX_ENC_VHT: +- if (WARN_ONCE(status->rate_idx > 9 || ++ if (WARN_ONCE(status->rate_idx > 11 || + !status->nss || + status->nss > 8, + "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", diff --git a/package/libs/gettext-full/Makefile b/package/libs/gettext-full/Makefile index 6a97fd5b91..f5f5e00542 100644 --- a/package/libs/gettext-full/Makefile +++ b/package/libs/gettext-full/Makefile @@ -88,6 +88,11 @@ define Build/InstallDev $(SED) '/read dummy/d' $(STAGING_DIR_HOSTPKG)/bin/gettextize endef +define Host/Install + $(call Host/Install/Default) + $(LN) msgfmt $(STAGING_DIR_HOSTPKG)/bin/gmsgfmt +endef + define Package/libintl-full/install $(INSTALL_DIR) $(1)/usr/lib $(CP) $(PKG_INSTALL_DIR)/usr/lib/libintl.so.* $(1)/usr/lib/ diff --git a/package/libs/libselinux/Makefile b/package/libs/libselinux/Makefile index 3969ef38d9..9520afc771 100644 --- a/package/libs/libselinux/Makefile +++ b/package/libs/libselinux/Makefile @@ -7,7 +7,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libselinux PKG_VERSION:=3.3 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://github.com/SELinuxProject/selinux/releases/download/$(PKG_VERSION) @@ -18,6 +18,8 @@ PKG_LICENSE:=libselinux-1.0 PKG_LICENSE_FILES:=LICENSE PKG_MAINTAINER:=Thomas Petazzoni +HOST_BUILD_DEPENDS:=musl-fts/host pcre/host + include $(INCLUDE_DIR)/package.mk include $(INCLUDE_DIR)/host-build.mk @@ -107,7 +109,8 @@ HOST_LDFLAGS += -Wl,-rpath="$(STAGING_DIR_HOSTPKG)/lib" HOST_MAKE_FLAGS += \ PREFIX=$(STAGING_DIR_HOSTPKG) \ - SHLIBDIR=$(STAGING_DIR_HOSTPKG)/lib + SHLIBDIR=$(STAGING_DIR_HOSTPKG)/lib \ + FTS_LDLIBS=-lfts ifeq ($(CONFIG_USE_MUSL),y) MAKE_FLAGS += FTS_LDLIBS=-lfts diff --git a/package/libs/musl-fts/Makefile b/package/libs/musl-fts/Makefile index 494f700f8a..e913fe167f 100644 --- a/package/libs/musl-fts/Makefile +++ b/package/libs/musl-fts/Makefile @@ -30,6 +30,7 @@ PKG_BUILD_PARALLEL:=1 PKG_INSTALL:=1 include $(INCLUDE_DIR)/package.mk +include $(INCLUDE_DIR)/host-build.mk define Package/musl-fts SECTION:=libs @@ -58,3 +59,4 @@ define Package/musl-fts/install endef $(eval $(call BuildPackage,musl-fts)) +$(eval $(call HostBuild)) diff --git a/package/libs/openssl/Makefile b/package/libs/openssl/Makefile index 4f1dd5931c..a8ec5d015d 100644 --- a/package/libs/openssl/Makefile +++ b/package/libs/openssl/Makefile @@ -11,7 +11,7 @@ PKG_NAME:=openssl PKG_BASE:=1.1.1 PKG_BUGFIX:=n PKG_VERSION:=$(PKG_BASE)$(PKG_BUGFIX) -PKG_RELEASE:=1 +PKG_RELEASE:=$(AUTORELEASE) PKG_USE_MIPS16:=0 PKG_BUILD_PARALLEL:=1 @@ -66,7 +66,7 @@ PKG_CONFIG_DEPENDS:= \ CONFIG_OPENSSL_WITH_WHIRLPOOL include $(INCLUDE_DIR)/package.mk -include engine.mk +include $(INCLUDE_DIR)/openssl-engine.mk ifneq ($(CONFIG_CCACHE),) HOSTCC=$(HOSTCC_NOCACHE) diff --git a/package/libs/zlib/Makefile b/package/libs/zlib/Makefile index c7a8415c79..7321ec51c5 100644 --- a/package/libs/zlib/Makefile +++ b/package/libs/zlib/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=zlib PKG_VERSION:=1.2.11 -PKG_RELEASE:=3 +PKG_RELEASE:=4 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=@SF/libpng http://www.zlib.net diff --git a/package/libs/zlib/patches/006-fix-compressor-crash-on-certain-inputs.patch b/package/libs/zlib/patches/006-fix-compressor-crash-on-certain-inputs.patch new file mode 100644 index 0000000000..9f37ba5c58 --- /dev/null +++ b/package/libs/zlib/patches/006-fix-compressor-crash-on-certain-inputs.patch @@ -0,0 +1,343 @@ +From 5c44459c3b28a9bd3283aaceab7c615f8020c531 Mon Sep 17 00:00:00 2001 +From: Mark Adler +Date: Tue, 17 Apr 2018 22:09:22 -0700 +Subject: [PATCH] Fix a bug that can crash deflate on some input when using + Z_FIXED. + +This bug was reported by Danilo Ramos of Eideticom, Inc. It has +lain in wait 13 years before being found! The bug was introduced +in zlib 1.2.2.2, with the addition of the Z_FIXED option. That +option forces the use of fixed Huffman codes. For rare inputs with +a large number of distant matches, the pending buffer into which +the compressed data is written can overwrite the distance symbol +table which it overlays. That results in corrupted output due to +invalid distances, and can result in out-of-bound accesses, +crashing the application. + +The fix here combines the distance buffer and literal/length +buffers into a single symbol buffer. Now three bytes of pending +buffer space are opened up for each literal or length/distance +pair consumed, instead of the previous two bytes. This assures +that the pending buffer cannot overwrite the symbol table, since +the maximum fixed code compressed length/distance is 31 bits, and +since there are four bytes of pending space for every three bytes +of symbol space. +--- + deflate.c | 74 ++++++++++++++++++++++++++++++++++++++++--------------- + deflate.h | 25 +++++++++---------- + trees.c | 50 +++++++++++-------------------------- + 3 files changed, 79 insertions(+), 70 deletions(-) + +diff --git a/deflate.c b/deflate.c +index 425babc00..19cba873a 100644 +--- a/deflate.c ++++ b/deflate.c +@@ -255,11 +255,6 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + int wrap = 1; + static const char my_version[] = ZLIB_VERSION; + +- ushf *overlay; +- /* We overlay pending_buf and d_buf+l_buf. This works since the average +- * output size for (length,distance) codes is <= 24 bits. +- */ +- + if (version == Z_NULL || version[0] != my_version[0] || + stream_size != sizeof(z_stream)) { + return Z_VERSION_ERROR; +@@ -329,9 +324,47 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + +- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); +- s->pending_buf = (uchf *) overlay; +- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); ++ /* We overlay pending_buf and sym_buf. This works since the average size ++ * for length/distance pairs over any compressed block is assured to be 31 ++ * bits or less. ++ * ++ * Analysis: The longest fixed codes are a length code of 8 bits plus 5 ++ * extra bits, for lengths 131 to 257. The longest fixed distance codes are ++ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest ++ * possible fixed-codes length/distance pair is then 31 bits total. ++ * ++ * sym_buf starts one-fourth of the way into pending_buf. So there are ++ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol ++ * in sym_buf is three bytes -- two for the distance and one for the ++ * literal/length. As each symbol is consumed, the pointer to the next ++ * sym_buf value to read moves forward three bytes. From that symbol, up to ++ * 31 bits are written to pending_buf. The closest the written pending_buf ++ * bits gets to the next sym_buf symbol to read is just before the last ++ * code is written. At that time, 31*(n-2) bits have been written, just ++ * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at ++ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1 ++ * symbols are written.) The closest the writing gets to what is unread is ++ * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and ++ * can range from 128 to 32768. ++ * ++ * Therefore, at a minimum, there are 142 bits of space between what is ++ * written and what is read in the overlain buffers, so the symbols cannot ++ * be overwritten by the compressed data. That space is actually 139 bits, ++ * due to the three-bit fixed-code block header. ++ * ++ * That covers the case where either Z_FIXED is specified, forcing fixed ++ * codes, or when the use of fixed codes is chosen, because that choice ++ * results in a smaller compressed block than dynamic codes. That latter ++ * condition then assures that the above analysis also covers all dynamic ++ * blocks. A dynamic-code block will only be chosen to be emitted if it has ++ * fewer bits than a fixed-code block would for the same set of symbols. ++ * Therefore its average symbol length is assured to be less than 31. So ++ * the compressed data for a dynamic block also cannot overwrite the ++ * symbols from which it is being constructed. ++ */ ++ ++ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4); ++ s->pending_buf_size = (ulg)s->lit_bufsize * 4; + + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || + s->pending_buf == Z_NULL) { +@@ -340,8 +373,12 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + deflateEnd (strm); + return Z_MEM_ERROR; + } +- s->d_buf = overlay + s->lit_bufsize/sizeof(ush); +- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; ++ s->sym_buf = s->pending_buf + s->lit_bufsize; ++ s->sym_end = (s->lit_bufsize - 1) * 3; ++ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K ++ * on 16 bit machines and because stored blocks are restricted to ++ * 64K-1 bytes. ++ */ + + s->level = level; + s->strategy = strategy; +@@ -552,7 +589,7 @@ int ZEXPORT deflatePrime (strm, bits, value) + + if (deflateStateCheck(strm)) return Z_STREAM_ERROR; + s = strm->state; +- if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) ++ if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3)) + return Z_BUF_ERROR; + do { + put = Buf_size - s->bi_valid; +@@ -1113,7 +1150,6 @@ int ZEXPORT deflateCopy (dest, source) + #else + deflate_state *ds; + deflate_state *ss; +- ushf *overlay; + + + if (deflateStateCheck(source) || dest == Z_NULL) { +@@ -1133,8 +1169,7 @@ int ZEXPORT deflateCopy (dest, source) + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); +- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); +- ds->pending_buf = (uchf *) overlay; ++ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4); + + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || + ds->pending_buf == Z_NULL) { +@@ -1148,8 +1183,7 @@ int ZEXPORT deflateCopy (dest, source) + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); + + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); +- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); +- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; ++ ds->sym_buf = ds->pending_buf + ds->lit_bufsize; + + ds->l_desc.dyn_tree = ds->dyn_ltree; + ds->d_desc.dyn_tree = ds->dyn_dtree; +@@ -1925,7 +1959,7 @@ local block_state deflate_fast(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +@@ -2056,7 +2090,7 @@ local block_state deflate_slow(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +@@ -2131,7 +2165,7 @@ local block_state deflate_rle(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +@@ -2170,7 +2204,7 @@ local block_state deflate_huff(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +diff --git a/deflate.h b/deflate.h +index 23ecdd312..d4cf1a98b 100644 +--- a/deflate.h ++++ b/deflate.h +@@ -217,7 +217,7 @@ typedef struct internal_state { + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + +- uchf *l_buf; /* buffer for literals or lengths */ ++ uchf *sym_buf; /* buffer for distances and literals/lengths */ + + uInt lit_bufsize; + /* Size of match buffer for literals/lengths. There are 4 reasons for +@@ -239,13 +239,8 @@ typedef struct internal_state { + * - I can't count above 4 + */ + +- uInt last_lit; /* running index in l_buf */ +- +- ushf *d_buf; +- /* Buffer for distances. To simplify the code, d_buf and l_buf have +- * the same number of elements. To use different lengths, an extra flag +- * array would be necessary. +- */ ++ uInt sym_next; /* running index in sym_buf */ ++ uInt sym_end; /* symbol table full when sym_next reaches this */ + + ulg opt_len; /* bit length of current block with optimal trees */ + ulg static_len; /* bit length of current block with static trees */ +@@ -325,20 +320,22 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, + + # define _tr_tally_lit(s, c, flush) \ + { uch cc = (c); \ +- s->d_buf[s->last_lit] = 0; \ +- s->l_buf[s->last_lit++] = cc; \ ++ s->sym_buf[s->sym_next++] = 0; \ ++ s->sym_buf[s->sym_next++] = 0; \ ++ s->sym_buf[s->sym_next++] = cc; \ + s->dyn_ltree[cc].Freq++; \ +- flush = (s->last_lit == s->lit_bufsize-1); \ ++ flush = (s->sym_next == s->sym_end); \ + } + # define _tr_tally_dist(s, distance, length, flush) \ + { uch len = (uch)(length); \ + ush dist = (ush)(distance); \ +- s->d_buf[s->last_lit] = dist; \ +- s->l_buf[s->last_lit++] = len; \ ++ s->sym_buf[s->sym_next++] = dist; \ ++ s->sym_buf[s->sym_next++] = dist >> 8; \ ++ s->sym_buf[s->sym_next++] = len; \ + dist--; \ + s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ + s->dyn_dtree[d_code(dist)].Freq++; \ +- flush = (s->last_lit == s->lit_bufsize-1); \ ++ flush = (s->sym_next == s->sym_end); \ + } + #else + # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) +diff --git a/trees.c b/trees.c +index 4f4a65011..decaeb7c3 100644 +--- a/trees.c ++++ b/trees.c +@@ -416,7 +416,7 @@ local void init_block(s) + + s->dyn_ltree[END_BLOCK].Freq = 1; + s->opt_len = s->static_len = 0L; +- s->last_lit = s->matches = 0; ++ s->sym_next = s->matches = 0; + } + + #define SMALLEST 1 +@@ -948,7 +948,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) + + Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, +- s->last_lit)); ++ s->sym_next / 3)); + + if (static_lenb <= opt_lenb) opt_lenb = static_lenb; + +@@ -1017,8 +1017,9 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc) + unsigned dist; /* distance of matched string */ + unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ + { +- s->d_buf[s->last_lit] = (ush)dist; +- s->l_buf[s->last_lit++] = (uch)lc; ++ s->sym_buf[s->sym_next++] = dist; ++ s->sym_buf[s->sym_next++] = dist >> 8; ++ s->sym_buf[s->sym_next++] = lc; + if (dist == 0) { + /* lc is the unmatched char */ + s->dyn_ltree[lc].Freq++; +@@ -1033,30 +1034,7 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc) + s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; + s->dyn_dtree[d_code(dist)].Freq++; + } +- +-#ifdef TRUNCATE_BLOCK +- /* Try to guess if it is profitable to stop the current block here */ +- if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { +- /* Compute an upper bound for the compressed length */ +- ulg out_length = (ulg)s->last_lit*8L; +- ulg in_length = (ulg)((long)s->strstart - s->block_start); +- int dcode; +- for (dcode = 0; dcode < D_CODES; dcode++) { +- out_length += (ulg)s->dyn_dtree[dcode].Freq * +- (5L+extra_dbits[dcode]); +- } +- out_length >>= 3; +- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", +- s->last_lit, in_length, out_length, +- 100L - out_length*100L/in_length)); +- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; +- } +-#endif +- return (s->last_lit == s->lit_bufsize-1); +- /* We avoid equality with lit_bufsize because of wraparound at 64K +- * on 16 bit machines and because stored blocks are restricted to +- * 64K-1 bytes. +- */ ++ return (s->sym_next == s->sym_end); + } + + /* =========================================================================== +@@ -1069,13 +1047,14 @@ local void compress_block(s, ltree, dtree) + { + unsigned dist; /* distance of matched string */ + int lc; /* match length or unmatched char (if dist == 0) */ +- unsigned lx = 0; /* running index in l_buf */ ++ unsigned sx = 0; /* running index in sym_buf */ + unsigned code; /* the code to send */ + int extra; /* number of extra bits to send */ + +- if (s->last_lit != 0) do { +- dist = s->d_buf[lx]; +- lc = s->l_buf[lx++]; ++ if (s->sym_next != 0) do { ++ dist = s->sym_buf[sx++] & 0xff; ++ dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8; ++ lc = s->sym_buf[sx++]; + if (dist == 0) { + send_code(s, lc, ltree); /* send a literal byte */ + Tracecv(isgraph(lc), (stderr," '%c' ", lc)); +@@ -1100,11 +1079,10 @@ local void compress_block(s, ltree, dtree) + } + } /* literal or match pair ? */ + +- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ +- Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, +- "pendingBuf overflow"); ++ /* Check that the overlay between pending_buf and sym_buf is ok: */ ++ Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow"); + +- } while (lx < s->last_lit); ++ } while (sx < s->sym_next); + + send_code(s, END_BLOCK, ltree); + } diff --git a/package/network/config/ltq-vdsl-app/files/dsl_control b/package/network/config/ltq-vdsl-app/files/dsl_control index 54b739e2cd..34642dbda5 100644 --- a/package/network/config/ltq-vdsl-app/files/dsl_control +++ b/package/network/config/ltq-vdsl-app/files/dsl_control @@ -95,10 +95,11 @@ tone_vdsl_b="0x1" # B43 tone_adsl_bv="0x81" # B43 + B43c tone_vdsl_bv="0x5" # B43 + V43 -# create ADSL autoboot script. Used for SNR margin tweak +# create DSL autoboot script. Used for SNR margin tweak and to set MAC address for vectoring error reports autoboot_script() { echo "[WaitForConfiguration]={ locs 0 $1 +dsmmcs $2 } [WaitForLinkActivate]={ @@ -166,6 +167,12 @@ lowlevel_cfg() { 0" > /tmp/lowlevel.cfg } +get_macaddr() { + local name + config_get name $1 name + [ "$name" = "dsl0" ] && config_get $2 $1 macaddr +} + service_triggers() { procd_add_reload_trigger network } @@ -183,6 +190,7 @@ start_service() { local mode local lowlevel local snr + local macaddr config_load network config_get tone dsl tone @@ -191,6 +199,7 @@ start_service() { config_get xfer_mode dsl xfer_mode config_get line_mode dsl line_mode config_get snr dsl ds_snr_offset + config_foreach get_macaddr device macaddr eval "xtse=\"\${xtse_xdsl_$annex}\"" @@ -289,11 +298,10 @@ start_service() { lowlevel="-l /tmp/lowlevel.cfg" } - [ -z "${snr}" ] || { - # for SNR offset setting - autoboot_script "$snr" - autoboot="-a /tmp/dsl.scr -A /tmp/dsl.scr" - } + [ -z "${snr}" ] && snr=0 + [ -z "${macaddr}" ] && macaddr="00:00:00:00:00:00" + autoboot_script "$snr" "$macaddr" + autoboot="-a /tmp/dsl.scr -A /tmp/dsl.scr" procd_open_instance procd_set_param command /sbin/vdsl_cpe_control \ diff --git a/package/network/config/qosify/Makefile b/package/network/config/qosify/Makefile index 7aac759bcb..d0cff54053 100644 --- a/package/network/config/qosify/Makefile +++ b/package/network/config/qosify/Makefile @@ -11,9 +11,9 @@ include $(INCLUDE_DIR)/kernel.mk PKG_NAME:=qosify PKG_SOURCE_URL=$(PROJECT_GIT)/project/qosify.git PKG_SOURCE_PROTO:=git -PKG_SOURCE_DATE:=2022-03-06 -PKG_SOURCE_VERSION:=f13b67c9a786567df240a8f3f608e2724ddaadba -PKG_MIRROR_HASH:=3d8629e711e46a6be79a46a6394165fd1761687f24b8ed954dc4f16f177cd90f +PKG_SOURCE_DATE:=2022-03-22 +PKG_SOURCE_VERSION:=57c7817f91c2ff2f247b2d7eb8554e861c4aec33 +PKG_MIRROR_HASH:=409f7db13a36334557de861a016a8d9f241070b2bbf6f738e992281b36f41cd4 PKG_RELEASE:=$(AUTORELEASE) PKG_LICENSE:=GPL-2.0 diff --git a/package/network/services/odhcpd/Makefile b/package/network/services/odhcpd/Makefile index ec7fbb3601..21c87eaad4 100644 --- a/package/network/services/odhcpd/Makefile +++ b/package/network/services/odhcpd/Makefile @@ -12,9 +12,9 @@ PKG_RELEASE:=$(AUTORELEASE) PKG_SOURCE_PROTO:=git PKG_SOURCE_URL=$(PROJECT_GIT)/project/odhcpd.git -PKG_SOURCE_DATE:=2021-08-11 -PKG_SOURCE_VERSION:=01b4e6046f10e21809c3f380f2d33bf3fe59698d -PKG_MIRROR_HASH:=419bbc1dd159c25852da4abdcb9ffee8d72d12b8b998ff14e343d5f2455d8d2a +PKG_SOURCE_DATE:=2022-03-22 +PKG_SOURCE_VERSION:=860ca900e41c5d0f98cc85e67b39977f6f2cb355 +PKG_MIRROR_HASH:=555712a1e25d197e52808a0d5e42bf0d48a8b61fe7c8aad1a02a7c09f0b8b8a3 PKG_MAINTAINER:=Hans Dedecker PKG_LICENSE:=GPL-2.0 diff --git a/package/network/utils/uqmi/files/lib/netifd/proto/qmi.sh b/package/network/utils/uqmi/files/lib/netifd/proto/qmi.sh index ad577ea317..c2c5fc1eca 100755 --- a/package/network/utils/uqmi/files/lib/netifd/proto/qmi.sh +++ b/package/network/utils/uqmi/files/lib/netifd/proto/qmi.sh @@ -105,10 +105,12 @@ proto_qmi_setup() { } } else - . /usr/share/libubox/jshn.sh - json_load "$(uqmi -s -d "$device" --get-pin-status)" 2>&1 | grep -q Failed && - json_load "$(uqmi -s -d "$device" --uim-get-sim-state)" + json_load "$(uqmi -s -d "$device" --get-pin-status)" json_get_var pin1_status pin1_status + if [ -z "$pin1_status" ]; then + json_load "$(uqmi -s -d "$device" --uim-get-sim-state)" + json_get_var pin1_status pin1_status + fi json_get_var pin1_verify_tries pin1_verify_tries case "$pin1_status" in @@ -152,6 +154,7 @@ proto_qmi_setup() { return 1 ;; esac + json_cleanup fi if [ -n "$plmn" ]; then @@ -247,7 +250,8 @@ proto_qmi_setup() { echo "Starting network $interface" - pdptype=$(echo "$pdptype" | awk '{print tolower($0)}') + pdptype="$(echo "$pdptype" | awk '{print tolower($0)}')" + [ "$pdptype" = "ip" -o "$pdptype" = "ipv6" -o "$pdptype" = "ipv4v6" ] || pdptype="ip" if [ "$pdptype" = "ip" ]; then diff --git a/package/system/gpio-cdev/nu801/Makefile b/package/system/gpio-cdev/nu801/Makefile new file mode 100644 index 0000000000..b3fae616cd --- /dev/null +++ b/package/system/gpio-cdev/nu801/Makefile @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +include $(TOPDIR)/rules.mk + +PKG_NAME:=nu801 +PKG_RELEASE:=1 + +PKG_SOURCE_PROTO:=git +PKG_SOURCE_URL:=https://github.com/chunkeey/nu801.git +PKG_SOURCE_VERSION:=d9942c0ceb949080b93366a9431028de3608e535 + +PKG_MAINTAINER:=Christian Lamparter +PKG_LICENSE:=GPL-3.0-or-later +PKG_LICENSE_FILES:=LICENSE + +include $(INCLUDE_DIR)/package.mk +include $(INCLUDE_DIR)/cmake.mk + +define Package/nu801 + SECTION:=utils + CATEGORY:=Utilities + SUBMENU:=Userspace GPIO Drivers + DEPENDS:=@TARGET_x86 +kmod-leds-uleds + KCONFIG:=CONFIG_GPIO_CDEV=y + TITLE:=NU801 LED Driver +endef + +define Package/nu801/description +This package contains a userspace driver to power the NUMEN Tech. NU801 LED Driver. +endef + +define Package/nu801/install + $(INSTALL_DIR) $(1)/usr/sbin + $(INSTALL_BIN) $(PKG_BUILD_DIR)/nu801 $(1)/usr/sbin/ + $(INSTALL_DIR) $(1)/etc/init.d + $(INSTALL_BIN) ./files/nu801.init $(1)/etc/init.d/nu801 +endef + +$(eval $(call BuildPackage,nu801)) diff --git a/package/system/gpio-cdev/nu801/files/nu801.init b/package/system/gpio-cdev/nu801/files/nu801.init new file mode 100755 index 0000000000..a29554ae30 --- /dev/null +++ b/package/system/gpio-cdev/nu801/files/nu801.init @@ -0,0 +1,14 @@ +#!/bin/sh /etc/rc.common +# SPDX-License-Identifier: GPL-2.0-or-later + +START=11 + +boot() { + . /lib/functions.sh + /usr/sbin/nu801 "$(board_name)" + + # Because this is a userspace driver, we need to trigger diag.sh after + # we start the driver, but before boot is complete so we blink. + . /etc/diag.sh + set_state preinit_regular +} diff --git a/scripts/mkits.sh b/scripts/mkits.sh index f6699384ee..7bf5681add 100755 --- a/scripts/mkits.sh +++ b/scripts/mkits.sh @@ -137,7 +137,7 @@ fi if [ -n "${ROOTFS}" ]; then dd if="${ROOTFS}" of="${ROOTFS}.pagesync" bs=4096 conv=sync ROOTFS_NODE=" - rootfs-$ROOTFSNUM { + rootfs${REFERENCE_CHAR}$ROOTFSNUM { description = \"${ARCH_UPPER} OpenWrt ${DEVICE} rootfs\"; ${COMPATIBLE_PROP} data = /incbin/(\"${ROOTFS}.pagesync\"); diff --git a/target/imagebuilder/Makefile b/target/imagebuilder/Makefile index 8607a2d709..3bbadc7204 100644 --- a/target/imagebuilder/Makefile +++ b/target/imagebuilder/Makefile @@ -78,7 +78,7 @@ ifneq ($(CONFIG_SIGNATURE_CHECK),) $(CP) -L $(STAGING_DIR_ROOT)/usr/sbin/opkg-key $(PKG_BUILD_DIR)/scripts/ endif - $(CP) $(TOPDIR)/target/linux $(PKG_BUILD_DIR)/target/ + $(CP) -L $(TOPDIR)/target/linux $(PKG_BUILD_DIR)/target/ if [ -d $(TOPDIR)/staging_dir/host/lib/grub ]; then \ $(CP) $(TOPDIR)/staging_dir/host/lib/grub/ $(PKG_BUILD_DIR)/staging_dir/host/lib; \ fi diff --git a/target/linux/apm821xx/patches-5.10/150-ata-sata_dwc_460ex-Fix-crash-due-to-OOB-write.patch b/target/linux/apm821xx/patches-5.10/150-ata-sata_dwc_460ex-Fix-crash-due-to-OOB-write.patch new file mode 100644 index 0000000000..c503be9c0d --- /dev/null +++ b/target/linux/apm821xx/patches-5.10/150-ata-sata_dwc_460ex-Fix-crash-due-to-OOB-write.patch @@ -0,0 +1,65 @@ +From ba068938e629eb1a8b423a54405233e685cedb78 Mon Sep 17 00:00:00 2001 +Message-Id: +From: Christian Lamparter +Date: Thu, 17 Mar 2022 21:29:28 +0100 +Subject: [PATCH v1 1/2] ata: sata_dwc_460ex: Fix crash due to OOB write +To: linux-ide@vger.kernel.org +Cc: Damien Le Moal , + Jens Axboe , + Tejun Heo , + Andy Shevchenko + +the driver uses libata's "tag" values from in various arrays. +Since the mentioned patch bumped the ATA_TAG_INTERNAL to 32, +the value of the SATA_DWC_QCMD_MAX needs to be bumped to 33. + +Otherwise ATA_TAG_INTERNAL cause a crash like this: + +| BUG: Kernel NULL pointer dereference at 0x00000000 +| Faulting instruction address: 0xc03ed4b8 +| Oops: Kernel access of bad area, sig: 11 [#1] +| BE PAGE_SIZE=4K PowerPC 44x Platform +| CPU: 0 PID: 362 Comm: scsi_eh_1 Not tainted 5.4.163 #0 +| NIP: c03ed4b8 LR: c03d27e8 CTR: c03ed36c +| REGS: cfa59950 TRAP: 0300 Not tainted (5.4.163) +| MSR: 00021000 CR: 42000222 XER: 00000000 +| DEAR: 00000000 ESR: 00000000 +| GPR00: c03d27e8 cfa59a08 cfa55fe0 00000000 0fa46bc0 [...] +| [..] +| NIP [c03ed4b8] sata_dwc_qc_issue+0x14c/0x254 +| LR [c03d27e8] ata_qc_issue+0x1c8/0x2dc +| Call Trace: +| [cfa59a08] [c003f4e0] __cancel_work_timer+0x124/0x194 (unreliable) +| [cfa59a78] [c03d27e8] ata_qc_issue+0x1c8/0x2dc +| [cfa59a98] [c03d2b3c] ata_exec_internal_sg+0x240/0x524 +| [cfa59b08] [c03d2e98] ata_exec_internal+0x78/0xe0 +| [cfa59b58] [c03d30fc] ata_read_log_page.part.38+0x1dc/0x204 +| [cfa59bc8] [c03d324c] ata_identify_page_supported+0x68/0x130 +| [...] + +this is because sata_dwc_dma_xfer_complete() NULLs the +dma_pending's next neighbour "chan" (a *dma_chan struct) in +this '32' case right here (line ~735): +> hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; + +Then the next time, a dma gets issued; dma_dwc_xfer_setup() passes +the NULL'd hsdevp->chan to the dmaengine_slave_config() which then +causes the crash. + +Reported-by: ticerex (OpenWrt Forum) +Fixes: 28361c403683c ("libata: add extra internal command") +Cc: stable@kernel.org # 4.18+ +Link: https://forum.openwrt.org/t/my-book-live-duo-reboot-loop/122464 +Signed-off-by: Christian Lamparter +--- +--- a/drivers/ata/sata_dwc_460ex.c ++++ b/drivers/ata/sata_dwc_460ex.c +@@ -145,7 +145,7 @@ struct sata_dwc_device { + #endif + }; + +-#define SATA_DWC_QCMD_MAX 32 ++#define SATA_DWC_QCMD_MAX 33 + + struct sata_dwc_device_port { + struct sata_dwc_device *hsdev; diff --git a/target/linux/ath79/dts/ar7241_ubnt_unifi-ap-outdoor-plus.dts b/target/linux/ath79/dts/ar7241_ubnt_unifi-ap-outdoor-plus.dts index 9ccbdbd450..b5d7af421d 100644 --- a/target/linux/ath79/dts/ar7241_ubnt_unifi-ap-outdoor-plus.dts +++ b/target/linux/ath79/dts/ar7241_ubnt_unifi-ap-outdoor-plus.dts @@ -7,6 +7,7 @@ model = "Ubiquiti UniFi AP Outdoor+"; aliases { + label-mac-device = &wifi; led-boot = &led_white; led-failsafe = &led_white; led-running = &led_blue; diff --git a/target/linux/ath79/dts/ar7241_ubnt_unifi.dts b/target/linux/ath79/dts/ar7241_ubnt_unifi.dts index e8d903b305..c43867a3eb 100644 --- a/target/linux/ath79/dts/ar7241_ubnt_unifi.dts +++ b/target/linux/ath79/dts/ar7241_ubnt_unifi.dts @@ -4,9 +4,10 @@ / { compatible = "ubnt,unifi", "qca,ar7241"; - model = "Ubiquiti UniFi"; + model = "Ubiquiti UniFi AP"; aliases { + label-mac-device = ð0; led-boot = &led_dome_green; led-failsafe = &led_dome_green; led-running = &led_dome_green; diff --git a/target/linux/ath79/dts/ar7241_ubnt_unifi.dtsi b/target/linux/ath79/dts/ar7241_ubnt_unifi.dtsi index 54f133ab19..ac8c0cc709 100644 --- a/target/linux/ath79/dts/ar7241_ubnt_unifi.dtsi +++ b/target/linux/ath79/dts/ar7241_ubnt_unifi.dtsi @@ -6,10 +6,6 @@ #include / { - aliases { - label-mac-device = &wifi; - }; - extosc: ref { compatible = "fixed-clock"; #clock-cells = <0>; diff --git a/target/linux/ath79/dts/ar9344_ocedo_raccoon.dts b/target/linux/ath79/dts/ar9344_ocedo_raccoon.dts index e5e2067846..0bbeb2b533 100644 --- a/target/linux/ath79/dts/ar9344_ocedo_raccoon.dts +++ b/target/linux/ath79/dts/ar9344_ocedo_raccoon.dts @@ -152,19 +152,30 @@ phy0: ethernet-phy@0 { reg = <0>; + eee-broken-100tx; + eee-broken-1000t; }; }; ð0 { status = "okay"; - pll-data = <0x06000000 0x00000101 0x00001313>; + pll-data = <0x02000000 0x00000101 0x00001313>; nvmem-cells = <&macaddr_art_0>; nvmem-cell-names = "mac-address"; phy-mode = "rgmii-id"; phy-handle = <&phy0>; + + gmac-config { + device = <&gmac>; + rgmii-gmac0 = <1>; + rxdv-delay = <3>; + rxd-delay = <3>; + txen-delay = <0>; + txd-delay = <0>; + }; }; &art { diff --git a/target/linux/ath79/image/generic-tp-link.mk b/target/linux/ath79/image/generic-tp-link.mk index 6536f90b37..37eda98435 100644 --- a/target/linux/ath79/image/generic-tp-link.mk +++ b/target/linux/ath79/image/generic-tp-link.mk @@ -712,6 +712,7 @@ define Device/tplink_tl-wr1043nd-v4 DEVICE_VARIANT := v4 DEVICE_PACKAGES := kmod-usb2 kmod-usb-ledtrig-usbport TPLINK_HWID := 0x10430004 + TPLINK_HWREV := 0x1 TPLINK_BOARD_ID := TLWR1043NDV4 SUPPORTED_DEVICES += tl-wr1043nd-v4 endef diff --git a/target/linux/ath79/image/generic-ubnt.mk b/target/linux/ath79/image/generic-ubnt.mk index 9470adf48b..0b613df62b 100644 --- a/target/linux/ath79/image/generic-ubnt.mk +++ b/target/linux/ath79/image/generic-ubnt.mk @@ -419,7 +419,7 @@ TARGET_DEVICES += ubnt_routerstation-pro define Device/ubnt_unifi $(Device/ubnt-bz) - DEVICE_MODEL := UniFi + DEVICE_MODEL := UniFi AP SUPPORTED_DEVICES += unifi endef TARGET_DEVICES += ubnt_unifi diff --git a/target/linux/generic/backport-5.10/738-v5.14-01-net-dsa-qca8k-fix-an-endian-bug-in-qca8k-get-ethtool.patch b/target/linux/generic/backport-5.10/738-v5.14-01-net-dsa-qca8k-fix-an-endian-bug-in-qca8k-get-ethtool.patch index 429d5d50f0..aed97d0549 100644 --- a/target/linux/generic/backport-5.10/738-v5.14-01-net-dsa-qca8k-fix-an-endian-bug-in-qca8k-get-ethtool.patch +++ b/target/linux/generic/backport-5.10/738-v5.14-01-net-dsa-qca8k-fix-an-endian-bug-in-qca8k-get-ethtool.patch @@ -1,4 +1,4 @@ -aFrom aa3d020b22cb844ab7bdbb9e5d861a64666e2b74 Mon Sep 17 00:00:00 2001 +From aa3d020b22cb844ab7bdbb9e5d861a64666e2b74 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 9 Jun 2021 12:52:12 +0300 Subject: [PATCH] net: dsa: qca8k: fix an endian bug in diff --git a/target/linux/generic/backport-5.10/774-v5.15-1-igc-remove-_I_PHY_ID-checking.patch b/target/linux/generic/backport-5.10/774-v5.15-1-igc-remove-_I_PHY_ID-checking.patch new file mode 100644 index 0000000000..d7df0685dd --- /dev/null +++ b/target/linux/generic/backport-5.10/774-v5.15-1-igc-remove-_I_PHY_ID-checking.patch @@ -0,0 +1,73 @@ +From 7c496de538eebd8212dc2a3c9a468386b264d0d4 Mon Sep 17 00:00:00 2001 +From: Sasha Neftin +Date: Wed, 7 Jul 2021 08:14:40 +0300 +Subject: igc: Remove _I_PHY_ID checking + +i225 devices have only one PHY vendor. There is no point checking +_I_PHY_ID during the link establishment and auto-negotiation process. +This patch comes to clean up these pointless checkings. + +Signed-off-by: Sasha Neftin +Tested-by: Dvora Fuxbrumer +Signed-off-by: Tony Nguyen +--- + drivers/net/ethernet/intel/igc/igc_base.c | 10 +--------- + drivers/net/ethernet/intel/igc/igc_main.c | 3 +-- + drivers/net/ethernet/intel/igc/igc_phy.c | 6 ++---- + 3 files changed, 4 insertions(+), 15 deletions(-) + +(limited to 'drivers/net/ethernet/intel/igc') + +--- a/drivers/net/ethernet/intel/igc/igc_base.c ++++ b/drivers/net/ethernet/intel/igc/igc_base.c +@@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(stru + + igc_check_for_copper_link(hw); + +- /* Verify phy id and set remaining function pointers */ +- switch (phy->id) { +- case I225_I_PHY_ID: +- phy->type = igc_phy_i225; +- break; +- default: +- ret_val = -IGC_ERR_PHY; +- goto out; +- } ++ phy->type = igc_phy_i225; + + out: + return ret_val; +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -4189,8 +4189,7 @@ bool igc_has_link(struct igc_adapter *ad + break; + } + +- if (hw->mac.type == igc_i225 && +- hw->phy.id == I225_I_PHY_ID) { ++ if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { +--- a/drivers/net/ethernet/intel/igc/igc_phy.c ++++ b/drivers/net/ethernet/intel/igc/igc_phy.c +@@ -249,8 +249,7 @@ static s32 igc_phy_setup_autoneg(struct + return ret_val; + } + +- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && +- hw->phy.id == I225_I_PHY_ID) { ++ if (phy->autoneg_mask & ADVERTISE_2500_FULL) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | +@@ -390,8 +389,7 @@ static s32 igc_phy_setup_autoneg(struct + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + +- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && +- hw->phy.id == I225_I_PHY_ID) ++ if (phy->autoneg_mask & ADVERTISE_2500_FULL) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | diff --git a/target/linux/generic/backport-5.10/774-v5.15-2-igc-remove-phy-type-checking.patch b/target/linux/generic/backport-5.10/774-v5.15-2-igc-remove-phy-type-checking.patch new file mode 100644 index 0000000000..ad4d1bb0dc --- /dev/null +++ b/target/linux/generic/backport-5.10/774-v5.15-2-igc-remove-phy-type-checking.patch @@ -0,0 +1,43 @@ +From 47bca7de6a4fb8dcb564c7ca14d885c91ed19e03 Mon Sep 17 00:00:00 2001 +From: Sasha Neftin +Date: Sat, 10 Jul 2021 20:57:50 +0300 +Subject: igc: Remove phy->type checking + +i225 devices have only one phy->type: copper. There is no point checking +phy->type during the igc_has_link method from the watchdog that +invoked every 2 seconds. +This patch comes to clean up these pointless checkings. + +Signed-off-by: Sasha Neftin +Tested-by: Dvora Fuxbrumer +Signed-off-by: Tony Nguyen +--- + drivers/net/ethernet/intel/igc/igc_main.c | 15 ++++----------- + 1 file changed, 4 insertions(+), 11 deletions(-) + +(limited to 'drivers/net/ethernet/intel/igc') + +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -4177,17 +4177,10 @@ bool igc_has_link(struct igc_adapter *ad + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ +- switch (hw->phy.media_type) { +- case igc_media_type_copper: +- if (!hw->mac.get_link_status) +- return true; +- hw->mac.ops.check_for_link(hw); +- link_active = !hw->mac.get_link_status; +- break; +- default: +- case igc_media_type_unknown: +- break; +- } ++ if (!hw->mac.get_link_status) ++ return true; ++ hw->mac.ops.check_for_link(hw); ++ link_active = !hw->mac.get_link_status; + + if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { diff --git a/target/linux/bcm27xx/patches-5.10/950-0542-net-usb-r8152-Provide-missing-documentation-for-some.patch b/target/linux/generic/backport-5.10/780-v5.11-net-usb-r8152-Provide-missing-documentation-for-some.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0542-net-usb-r8152-Provide-missing-documentation-for-some.patch rename to target/linux/generic/backport-5.10/780-v5.11-net-usb-r8152-Provide-missing-documentation-for-some.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0543-net-usb-r8152-Fix-a-couple-of-spelling-errors-in-fw_.patch b/target/linux/generic/backport-5.10/781-v5.11-net-usb-r8152-Fix-a-couple-of-spelling-errors-in-fw_.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0543-net-usb-r8152-Fix-a-couple-of-spelling-errors-in-fw_.patch rename to target/linux/generic/backport-5.10/781-v5.11-net-usb-r8152-Fix-a-couple-of-spelling-errors-in-fw_.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0544-net-usb-r8153_ecm-support-ECM-mode-for-RTL8153.patch b/target/linux/generic/backport-5.10/782-v5.11-net-usb-r8153_ecm-support-ECM-mode-for-RTL8153.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0544-net-usb-r8153_ecm-support-ECM-mode-for-RTL8153.patch rename to target/linux/generic/backport-5.10/782-v5.11-net-usb-r8153_ecm-support-ECM-mode-for-RTL8153.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0545-net-usb-r8152-use-new-tasklet-API.patch b/target/linux/generic/backport-5.10/783-v5.12-net-usb-r8152-use-new-tasklet-API.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0545-net-usb-r8152-use-new-tasklet-API.patch rename to target/linux/generic/backport-5.10/783-v5.12-net-usb-r8152-use-new-tasklet-API.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0546-r8152-replace-several-functions-about-phy-patch-requ.patch b/target/linux/generic/backport-5.10/784-v5.12-r8152-replace-several-functions-about-phy-patch-requ.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0546-r8152-replace-several-functions-about-phy-patch-requ.patch rename to target/linux/generic/backport-5.10/784-v5.12-r8152-replace-several-functions-about-phy-patch-requ.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0547-r8152-adjust-the-flow-of-power-cut-for-RTL8153B.patch b/target/linux/generic/backport-5.10/785-v5.12-r8152-adjust-the-flow-of-power-cut-for-RTL8153B.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0547-r8152-adjust-the-flow-of-power-cut-for-RTL8153B.patch rename to target/linux/generic/backport-5.10/785-v5.12-r8152-adjust-the-flow-of-power-cut-for-RTL8153B.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0548-r8152-enable-U1-U2-for-USB_SPEED_SUPER.patch b/target/linux/generic/backport-5.10/786-v5.12-r8152-enable-U1-U2-for-USB_SPEED_SUPER.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0548-r8152-enable-U1-U2-for-USB_SPEED_SUPER.patch rename to target/linux/generic/backport-5.10/786-v5.12-r8152-enable-U1-U2-for-USB_SPEED_SUPER.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0549-r8152-check-if-the-pointer-of-the-function-exists.patch b/target/linux/generic/backport-5.10/787-v5.12-r8152-check-if-the-pointer-of-the-function-exists.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0549-r8152-check-if-the-pointer-of-the-function-exists.patch rename to target/linux/generic/backport-5.10/787-v5.12-r8152-check-if-the-pointer-of-the-function-exists.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0550-r8152-replace-netif_err-with-dev_err.patch b/target/linux/generic/backport-5.10/788-v5.12-r8152-replace-netif_err-with-dev_err.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0550-r8152-replace-netif_err-with-dev_err.patch rename to target/linux/generic/backport-5.10/788-v5.12-r8152-replace-netif_err-with-dev_err.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0551-r8152-spilt-rtl_set_eee_plus-and-r8153b_green_en.patch b/target/linux/generic/backport-5.10/789-v5.12-r8152-spilt-rtl_set_eee_plus-and-r8153b_green_en.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0551-r8152-spilt-rtl_set_eee_plus-and-r8153b_green_en.patch rename to target/linux/generic/backport-5.10/789-v5.12-r8152-spilt-rtl_set_eee_plus-and-r8153b_green_en.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0552-r8152-set-inter-fram-gap-time-depending-on-speed.patch b/target/linux/generic/backport-5.10/790-v5.13-r8152-set-inter-fram-gap-time-depending-on-speed.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0552-r8152-set-inter-fram-gap-time-depending-on-speed.patch rename to target/linux/generic/backport-5.10/790-v5.13-r8152-set-inter-fram-gap-time-depending-on-speed.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0553-r8152-adjust-rtl8152_check_firmware-function.patch b/target/linux/generic/backport-5.10/791-v5.13-r8152-adjust-rtl8152_check_firmware-function.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0553-r8152-adjust-rtl8152_check_firmware-function.patch rename to target/linux/generic/backport-5.10/791-v5.13-r8152-adjust-rtl8152_check_firmware-function.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0554-r8152-add-help-function-to-change-mtu.patch b/target/linux/generic/backport-5.10/792-v5.13-r8152-add-help-function-to-change-mtu.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0554-r8152-add-help-function-to-change-mtu.patch rename to target/linux/generic/backport-5.10/792-v5.13-r8152-add-help-function-to-change-mtu.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0555-r8152-support-new-chips.patch b/target/linux/generic/backport-5.10/793-v5.13-r8152-support-new-chips.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0555-r8152-support-new-chips.patch rename to target/linux/generic/backport-5.10/793-v5.13-r8152-support-new-chips.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0556-r8152-support-PHY-firmware-for-RTL8156-series.patch b/target/linux/generic/backport-5.10/794-v5.13-r8152-support-PHY-firmware-for-RTL8156-series.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0556-r8152-support-PHY-firmware-for-RTL8156-series.patch rename to target/linux/generic/backport-5.10/794-v5.13-r8152-support-PHY-firmware-for-RTL8156-series.patch diff --git a/target/linux/bcm27xx/patches-5.10/950-0557-r8152-search-the-configuration-of-vendor-mode.patch b/target/linux/generic/backport-5.10/795-v5.13-r8152-search-the-configuration-of-vendor-mode.patch similarity index 100% rename from target/linux/bcm27xx/patches-5.10/950-0557-r8152-search-the-configuration-of-vendor-mode.patch rename to target/linux/generic/backport-5.10/795-v5.13-r8152-search-the-configuration-of-vendor-mode.patch diff --git a/target/linux/generic/config-5.10 b/target/linux/generic/config-5.10 index 256338ea08..5bdf687d54 100644 --- a/target/linux/generic/config-5.10 +++ b/target/linux/generic/config-5.10 @@ -419,6 +419,7 @@ CONFIG_ARM_GIC_MAX_NR=1 # CONFIG_ASUS_WIRELESS is not set # CONFIG_ASYMMETRIC_KEY_TYPE is not set # CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE is not set +# CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE is not set # CONFIG_ASYNC_RAID6_TEST is not set # CONFIG_ASYNC_TX_DMA is not set # CONFIG_AT76C50X_USB is not set @@ -2070,7 +2071,13 @@ CONFIG_GENERIC_VDSO_TIME_NS=y # CONFIG_GLOB_SELFTEST is not set # CONFIG_GNSS is not set # CONFIG_GOLDFISH is not set +# CONFIG_GOOGLE_COREBOOT_TABLE is not set # CONFIG_GOOGLE_FIRMWARE is not set +# CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT is not set +# CONFIG_GOOGLE_MEMCONSOLE_COREBOOT is not set +# CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set +# CONFIG_GOOGLE_SMI is not set +# CONFIG_GOOGLE_VPD is not set # CONFIG_GP2AP002 is not set # CONFIG_GP2AP020A00F is not set # CONFIG_GPD_POCKET_FAN is not set @@ -4621,7 +4628,7 @@ CONFIG_PROC_SYSCTL=y # CONFIG_PSTORE is not set # CONFIG_PSTORE_842_COMPRESS is not set # CONFIG_PSTORE_COMPRESS is not set -# CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_COMPRESS_DEFAULT is not set # CONFIG_PSTORE_CONSOLE is not set # CONFIG_PSTORE_DEFLATE_COMPRESS is not set # CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT is not set diff --git a/target/linux/generic/files/block/partitions/fit.c b/target/linux/generic/files/block/partitions/fit.c index fa73e64af8..75516af493 100644 --- a/target/linux/generic/files/block/partitions/fit.c +++ b/target/linux/generic/files/block/partitions/fit.c @@ -87,6 +87,12 @@ int parse_fit_partitions(struct parsed_partitions *state, u64 fit_start_sector, config_description_len, config_loadables_len; sector_t start_sect, nr_sects; size_t label_min; + struct device_node *np = NULL; + const char *bootconf; + const char *loadable; + const char *select_rootfs = NULL; + bool found; + int loadables_rem_len, loadable_len; if (fit_start_sector % (1<<(PAGE_SHIFT - SECTOR_SHIFT))) return -ERANGE; @@ -115,7 +121,6 @@ int parse_fit_partitions(struct parsed_partitions *state, u64 fit_start_sector, dsectors = (dsectors>sectors)?sectors:dsectors; dsize = dsectors << SECTOR_SHIFT; - size = fdt_totalsize(init_fit); /* silently skip non-external-data legacy FIT images */ @@ -135,6 +140,12 @@ int parse_fit_partitions(struct parsed_partitions *state, u64 fit_start_sector, if (!fit) return -ENOMEM; + np = of_find_node_by_path("/chosen"); + if (np) + bootconf = of_get_property(np, "bootconf", NULL); + else + bootconf = NULL; + config = fdt_path_offset(fit, FIT_CONFS_PATH); if (config < 0) { printk(KERN_ERR "FIT: Cannot find %s node: %d\n", FIT_CONFS_PATH, images); @@ -144,15 +155,15 @@ int parse_fit_partitions(struct parsed_partitions *state, u64 fit_start_sector, config_default = fdt_getprop(fit, config, FIT_DEFAULT_PROP, &config_default_len); - if (!config_default) { + if (!config_default && !bootconf) { printk(KERN_ERR "FIT: Cannot find default configuration\n"); ret = -ENOENT; goto ret_out; } - node = fdt_subnode_offset(fit, config, config_default); + node = fdt_subnode_offset(fit, config, bootconf?:config_default); if (node < 0) { - printk(KERN_ERR "FIT: Cannot find %s node: %d\n", config_default, node); + printk(KERN_ERR "FIT: Cannot find %s node: %d\n", bootconf?:config_default, node); ret = -ENOENT; goto ret_out; } @@ -160,9 +171,16 @@ int parse_fit_partitions(struct parsed_partitions *state, u64 fit_start_sector, config_description = fdt_getprop(fit, node, FIT_DESC_PROP, &config_description_len); config_loadables = fdt_getprop(fit, node, FIT_LOADABLE_PROP, &config_loadables_len); - printk(KERN_DEBUG "FIT: Default configuration: \"%s\"%s%s%s\n", config_default, + printk(KERN_DEBUG "FIT: %s configuration: \"%s\"%s%s%s\n", + bootconf?"Selected":"Default", bootconf?:config_default, config_description?" (":"", config_description?:"", config_description?")":""); + if (!config_loadables || !config_loadables_len) { + printk(KERN_ERR "FIT: No loadables configured in \"%s\"\n", bootconf?:config_default); + ret = -ENOENT; + goto ret_out; + } + images = fdt_path_offset(fit, FIT_IMAGES_PATH); if (images < 0) { printk(KERN_ERR "FIT: Cannot find %s node: %d\n", FIT_IMAGES_PATH, images); @@ -199,6 +217,22 @@ int parse_fit_partitions(struct parsed_partitions *state, u64 fit_start_sector, if (strcmp(image_type, FIT_FILESYSTEM_PROP)) continue; + /* check if sub-image is part of configured loadables */ + found = false; + loadable = config_loadables; + loadables_rem_len = config_loadables_len; + while (loadables_rem_len > 1) { + loadable_len = strnlen(loadable, loadables_rem_len - 1) + 1; + loadables_rem_len -= loadable_len; + if (!strncmp(image_name, loadable, loadable_len)) { + found = true; + break; + } + loadable += loadable_len; + } + if (!found) + continue; + if (image_pos & ((1 << PAGE_SHIFT)-1)) { printk(KERN_ERR "FIT: image %s start not aligned to page boundaries, skipping\n", image_name); continue; @@ -219,7 +253,8 @@ int parse_fit_partitions(struct parsed_partitions *state, u64 fit_start_sector, } put_partition(state, ++(*slot), fit_start_sector + start_sect, nr_sects); - state->parts[*slot].flags = 0; + state->parts[*slot].flags = ADDPART_FLAG_READONLY; + state->parts[*slot].has_info = true; info = &state->parts[*slot].info; label_min = min_t(int, sizeof(info->volname) - 1, image_name_len); @@ -229,14 +264,16 @@ int parse_fit_partitions(struct parsed_partitions *state, u64 fit_start_sector, snprintf(tmp, sizeof(tmp), "(%s)", info->volname); strlcat(state->pp_buf, tmp, PAGE_SIZE); - state->parts[*slot].has_info = true; - state->parts[*slot].flags |= ADDPART_FLAG_READONLY; - if (config_loadables && !strcmp(image_name, config_loadables)) { - printk(KERN_DEBUG "FIT: selecting configured loadable \"%s\" to be root filesystem\n", image_name); + /* Mark first loadable listed to be mounted as rootfs */ + if (!strcmp(image_name, config_loadables)) { + select_rootfs = image_name; state->parts[*slot].flags |= ADDPART_FLAG_ROOTDEV; } } + if (select_rootfs) + printk(KERN_DEBUG "FIT: selecting configured loadable \"%s\" to be root filesystem\n", select_rootfs); + if (add_remain && (imgmaxsect + MIN_FREE_SECT) < dsectors) { put_partition(state, ++(*slot), fit_start_sector + imgmaxsect, dsectors - imgmaxsect); state->parts[*slot].flags = 0; diff --git a/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c b/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c index 3230d859b0..d8fb74ea38 100644 --- a/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c +++ b/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c @@ -256,9 +256,11 @@ mtdsplit_fit_parse(struct mtd_info *mtd, * last external data refernced. */ if (fit_size > 0x1000) { + enum mtdsplit_part_type type; + /* Search for the rootfs partition after the FIT image */ ret = mtd_find_rootfs_from(mtd, fit_offset + fit_size, mtd->size, - &rootfs_offset, NULL); + &rootfs_offset, &type); if (ret) { pr_info("no rootfs found after FIT image in \"%s\"\n", mtd->name); @@ -275,7 +277,10 @@ mtdsplit_fit_parse(struct mtd_info *mtd, parts[0].offset = fit_offset; parts[0].size = mtd_rounddown_to_eb(fit_size, mtd) + mtd->erasesize; - parts[1].name = ROOTFS_PART_NAME; + if (type == MTDSPLIT_PART_TYPE_UBI) + parts[1].name = UBI_PART_NAME; + else + parts[1].name = ROOTFS_PART_NAME; parts[1].offset = rootfs_offset; parts[1].size = rootfs_size; diff --git a/target/linux/generic/files/drivers/mtd/nand/mtk_bmt.c b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt.c index 7c06ee01f0..f1df4a11f1 100644 --- a/target/linux/generic/files/drivers/mtd/nand/mtk_bmt.c +++ b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt.c @@ -1,7 +1,7 @@ /* * Copyright (c) 2017 MediaTek Inc. * Author: Xiangsheng Hou - * Copyright (c) 2020 Felix Fietkau + * Copyright (c) 2020-2022 Felix Fietkau * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -13,135 +13,16 @@ * GNU General Public License for more details. */ -#include -#include -#include -#include -#include -#include -#include #include -#include +#include +#include #include +#include "mtk_bmt.h" -#define MAIN_SIGNATURE_OFFSET 0 -#define OOB_SIGNATURE_OFFSET 1 -#define BBPOOL_RATIO 2 - -#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__) - -/* Maximum 8k blocks */ -#define BB_TABLE_MAX bmtd.table_size -#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100) -#define BMT_TBL_DEF_VAL 0x0 - -struct mtk_bmt_ops { - char *sig; - unsigned int sig_len; - int (*init)(struct device_node *np); - bool (*remap_block)(u16 block, u16 mapped_block, int copy_len); - void (*unmap_block)(u16 block); - u16 (*get_mapping_block)(int block); - int (*debug)(void *data, u64 val); -}; - -struct bbbt { - char signature[3]; - /* This version is used to distinguish the legacy and new algorithm */ -#define BBMT_VERSION 2 - unsigned char version; - /* Below 2 tables will be written in SLC */ - u16 bb_tbl[]; -}; - -struct bbmt { - u16 block; -#define NO_MAPPED 0 -#define NORMAL_MAPPED 1 -#define BMT_MAPPED 2 - u16 mapped; -}; - -static struct bmt_desc { - struct mtd_info *mtd; - - int (*_read_oob) (struct mtd_info *mtd, loff_t from, - struct mtd_oob_ops *ops); - int (*_write_oob) (struct mtd_info *mtd, loff_t to, - struct mtd_oob_ops *ops); - int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); - int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); - int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); - - const struct mtk_bmt_ops *ops; - - struct bbbt *bbt; - - struct dentry *debugfs_dir; - - u32 table_size; - u32 pg_size; - u32 blk_size; - u16 pg_shift; - u16 blk_shift; - /* bbt logical address */ - u16 pool_lba; - /* bbt physical address */ - u16 pool_pba; - /* Maximum count of bad blocks that the vendor guaranteed */ - u16 bb_max; - /* Total blocks of the Nand Chip */ - u16 total_blks; - /* The block(n) BMT is located at (bmt_tbl[n]) */ - u16 bmt_blk_idx; - /* How many pages needs to store 'struct bbbt' */ - u32 bmt_pgs; - - const __be32 *remap_range; - int remap_range_len; - - /* to compensate for driver level remapping */ - u8 oob_offset; -} bmtd = {0}; - -static unsigned char *nand_bbt_buf; -static unsigned char *nand_data_buf; - -/* -------- Unit conversions -------- */ -static inline u32 blk_pg(u16 block) -{ - return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift)); -} +struct bmt_desc bmtd = {}; /* -------- Nand operations wrapper -------- */ -static inline int -bbt_nand_read(u32 page, unsigned char *dat, int dat_len, - unsigned char *fdm, int fdm_len) -{ - struct mtd_oob_ops ops = { - .mode = MTD_OPS_PLACE_OOB, - .ooboffs = bmtd.oob_offset, - .oobbuf = fdm, - .ooblen = fdm_len, - .datbuf = dat, - .len = dat_len, - }; - - return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops); -} - -static inline int bbt_nand_erase(u16 block) -{ - struct mtd_info *mtd = bmtd.mtd; - struct erase_info instr = { - .addr = (loff_t)block << bmtd.blk_shift, - .len = bmtd.blk_size, - }; - - return bmtd._erase(mtd, &instr); -} - -static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset) +int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset) { int pages = bmtd.blk_size >> bmtd.pg_shift; loff_t src = (loff_t)src_blk << bmtd.blk_shift; @@ -155,13 +36,13 @@ static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset) .mode = MTD_OPS_PLACE_OOB, .oobbuf = oob, .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)), - .datbuf = nand_data_buf, + .datbuf = bmtd.data_buf, .len = bmtd.pg_size, }; struct mtd_oob_ops wr_ops = { .mode = MTD_OPS_PLACE_OOB, .oobbuf = oob, - .datbuf = nand_data_buf, + .datbuf = bmtd.data_buf, .len = bmtd.pg_size, }; @@ -187,289 +68,7 @@ static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset) } /* -------- Bad Blocks Management -------- */ -static inline struct bbmt *bmt_tbl(struct bbbt *bbbt) -{ - return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size]; -} - -static int -read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len) -{ - u32 len = bmtd.bmt_pgs << bmtd.pg_shift; - - return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len); -} - -static int write_bmt(u16 block, unsigned char *dat) -{ - struct mtd_oob_ops ops = { - .mode = MTD_OPS_PLACE_OOB, - .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset, - .oobbuf = bmtd.ops->sig, - .ooblen = bmtd.ops->sig_len, - .datbuf = dat, - .len = bmtd.bmt_pgs << bmtd.pg_shift, - }; - loff_t addr = (loff_t)block << bmtd.blk_shift; - - return bmtd._write_oob(bmtd.mtd, addr, &ops); -} - -static u16 find_valid_block(u16 block) -{ - u8 fdm[4]; - int ret; - int loop = 0; - -retry: - if (block >= bmtd.total_blks) - return 0; - - ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size, - fdm, sizeof(fdm)); - /* Read the 1st byte of FDM to judge whether it's a bad - * or not - */ - if (ret || fdm[0] != 0xff) { - pr_info("nand: found bad block 0x%x\n", block); - if (loop >= bmtd.bb_max) { - pr_info("nand: FATAL ERR: too many bad blocks!!\n"); - return 0; - } - - loop++; - block++; - goto retry; - } - - return block; -} - -/* Find out all bad blocks, and fill in the mapping table */ -static int scan_bad_blocks(struct bbbt *bbt) -{ - int i; - u16 block = 0; - - /* First time download, the block0 MUST NOT be a bad block, - * this is guaranteed by vendor - */ - bbt->bb_tbl[0] = 0; - - /* - * Construct the mapping table of Normal data area(non-PMT/BMTPOOL) - * G - Good block; B - Bad block - * --------------------------- - * physical |G|G|B|G|B|B|G|G|G|G|B|G|B| - * --------------------------- - * What bb_tbl[i] looks like: - * physical block(i): - * 0 1 2 3 4 5 6 7 8 9 a b c - * mapped block(bb_tbl[i]): - * 0 1 3 6 7 8 9 b ...... - * ATTENTION: - * If new bad block ocurred(n), search bmt_tbl to find - * a available block(x), and fill in the bb_tbl[n] = x; - */ - for (i = 1; i < bmtd.pool_lba; i++) { - bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1); - BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]); - if (bbt->bb_tbl[i] == 0) - return -1; - } - - /* Physical Block start Address of BMT pool */ - bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1; - if (bmtd.pool_pba >= bmtd.total_blks - 2) { - pr_info("nand: FATAL ERR: Too many bad blocks!!\n"); - return -1; - } - - BBT_LOG("pool_pba=0x%x", bmtd.pool_pba); - i = 0; - block = bmtd.pool_pba; - /* - * The bmt table is used for runtime bad block mapping - * G - Good block; B - Bad block - * --------------------------- - * physical |G|G|B|G|B|B|G|G|G|G|B|G|B| - * --------------------------- - * block: 0 1 2 3 4 5 6 7 8 9 a b c - * What bmt_tbl[i] looks like in initial state: - * i: - * 0 1 2 3 4 5 6 7 - * bmt_tbl[i].block: - * 0 1 3 6 7 8 9 b - * bmt_tbl[i].mapped: - * N N N N N N N B - * N - Not mapped(Available) - * M - Mapped - * B - BMT - * ATTENTION: - * BMT always in the last valid block in pool - */ - while ((block = find_valid_block(block)) != 0) { - bmt_tbl(bbt)[i].block = block; - bmt_tbl(bbt)[i].mapped = NO_MAPPED; - BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block); - block++; - i++; - } - - /* i - How many available blocks in pool, which is the length of bmt_tbl[] - * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block - */ - bmtd.bmt_blk_idx = i - 1; - bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED; - - if (i < 1) { - pr_info("nand: FATAL ERR: no space to store BMT!!\n"); - return -1; - } - - pr_info("[BBT] %d available blocks in BMT pool\n", i); - - return 0; -} - -static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm) -{ - struct bbbt *bbt = (struct bbbt *)buf; - u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET; - - - if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 && - memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) { - if (bbt->version == BBMT_VERSION) - return true; - } - BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x", - sig[0], sig[1], sig[2], - fdm[1], fdm[2], fdm[3]); - return false; -} - -static u16 get_bmt_index(struct bbmt *bmt) -{ - int i = 0; - - while (bmt[i].block != BMT_TBL_DEF_VAL) { - if (bmt[i].mapped == BMT_MAPPED) - return i; - i++; - } - return 0; -} - -static struct bbbt *scan_bmt(u16 block) -{ - u8 fdm[4]; - - if (block < bmtd.pool_lba) - return NULL; - - if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm))) - return scan_bmt(block - 1); - - if (is_valid_bmt(nand_bbt_buf, fdm)) { - bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf)); - if (bmtd.bmt_blk_idx == 0) { - pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n"); - return NULL; - } - pr_info("[BBT] BMT.v2 is found at 0x%x\n", block); - return (struct bbbt *)nand_bbt_buf; - } else - return scan_bmt(block - 1); -} - -/* Write the Burner Bad Block Table to Nand Flash - * n - write BMT to bmt_tbl[n] - */ -static u16 upload_bmt(struct bbbt *bbt, int n) -{ - u16 block; - -retry: - if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) { - pr_info("nand: FATAL ERR: no space to store BMT!\n"); - return (u16)-1; - } - - block = bmt_tbl(bbt)[n].block; - BBT_LOG("n = 0x%x, block = 0x%x", n, block); - if (bbt_nand_erase(block)) { - bmt_tbl(bbt)[n].block = 0; - /* erase failed, try the previous block: bmt_tbl[n - 1].block */ - n--; - goto retry; - } - - /* The signature offset is fixed set to 0, - * oob signature offset is fixed set to 1 - */ - memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3); - bbt->version = BBMT_VERSION; - - if (write_bmt(block, (unsigned char *)bbt)) { - bmt_tbl(bbt)[n].block = 0; - - /* write failed, try the previous block in bmt_tbl[n - 1] */ - n--; - goto retry; - } - - /* Return the current index(n) of BMT pool (bmt_tbl[n]) */ - return n; -} - -static u16 find_valid_block_in_pool(struct bbbt *bbt) -{ - int i; - - if (bmtd.bmt_blk_idx == 0) - goto error; - - for (i = 0; i < bmtd.bmt_blk_idx; i++) { - if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) { - bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED; - return bmt_tbl(bbt)[i].block; - } - } - -error: - pr_info("nand: FATAL ERR: BMT pool is run out!\n"); - return 0; -} - -/* We met a bad block, mark it as bad and map it to a valid block in pool, - * if it's a write failure, we need to write the data to mapped block - */ -static bool remap_block_v2(u16 block, u16 mapped_block, int copy_len) -{ - u16 mapped_blk; - struct bbbt *bbt; - - bbt = bmtd.bbt; - mapped_blk = find_valid_block_in_pool(bbt); - if (mapped_blk == 0) - return false; - - /* Map new bad block to available block in pool */ - bbt->bb_tbl[block] = mapped_blk; - - /* Erase new block */ - bbt_nand_erase(mapped_blk); - if (copy_len > 0) - bbt_nand_copy(mapped_blk, block, copy_len); - - bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx); - - return true; -} - -static bool -mapping_block_in_range(int block, int *start, int *end) +bool mapping_block_in_range(int block, int *start, int *end) { const __be32 *cur = bmtd.remap_range; u32 addr = block << bmtd.blk_shift; @@ -493,18 +92,15 @@ mapping_block_in_range(int block, int *start, int *end) return false; } -static u16 -get_mapping_block_index_v2(int block) +static bool +mtk_bmt_remap_block(u32 block, u32 mapped_block, int copy_len) { int start, end; - if (block >= bmtd.pool_lba) - return block; - if (!mapping_block_in_range(block, &start, &end)) - return block; + return false; - return bmtd.bbt->bb_tbl[block]; + return bmtd.ops->remap_block(block, mapped_block, copy_len); } static int @@ -516,7 +112,6 @@ mtk_bmt_read(struct mtd_info *mtd, loff_t from, loff_t cur_from; int ret = 0; int max_bitflips = 0; - int start, end; ops->retlen = 0; ops->oobretlen = 0; @@ -526,9 +121,12 @@ mtk_bmt_read(struct mtd_info *mtd, loff_t from, u32 offset = from & (bmtd.blk_size - 1); u32 block = from >> bmtd.blk_shift; - u32 cur_block; + int cur_block; cur_block = bmtd.ops->get_mapping_block(block); + if (cur_block < 0) + return -EIO; + cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset; cur_ops.oobretlen = 0; @@ -541,16 +139,15 @@ mtk_bmt_read(struct mtd_info *mtd, loff_t from, else max_bitflips = max_t(int, max_bitflips, cur_ret); if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) { - bmtd.ops->remap_block(block, cur_block, mtd->erasesize); - if (retry_count++ < 10) + if (mtk_bmt_remap_block(block, cur_block, mtd->erasesize) && + retry_count++ < 10) continue; goto out; } - if (cur_ret >= mtd->bitflip_threshold && - mapping_block_in_range(block, &start, &end)) - bmtd.ops->remap_block(block, cur_block, mtd->erasesize); + if (cur_ret >= mtd->bitflip_threshold) + mtk_bmt_remap_block(block, cur_block, mtd->erasesize); ops->retlen += cur_ops.retlen; ops->oobretlen += cur_ops.oobretlen; @@ -589,9 +186,12 @@ mtk_bmt_write(struct mtd_info *mtd, loff_t to, while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { u32 offset = to & (bmtd.blk_size - 1); u32 block = to >> bmtd.blk_shift; - u32 cur_block; + int cur_block; cur_block = bmtd.ops->get_mapping_block(block); + if (cur_block < 0) + return -EIO; + cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset; cur_ops.oobretlen = 0; @@ -600,8 +200,8 @@ mtk_bmt_write(struct mtd_info *mtd, loff_t to, ops->len - ops->retlen); ret = bmtd._write_oob(mtd, cur_to, &cur_ops); if (ret < 0) { - bmtd.ops->remap_block(block, cur_block, offset); - if (retry_count++ < 10) + if (mtk_bmt_remap_block(block, cur_block, offset) && + retry_count++ < 10) continue; return ret; @@ -634,7 +234,8 @@ mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr) int retry_count = 0; u64 start_addr, end_addr; int ret; - u16 orig_block, block; + u16 orig_block; + int block; start_addr = instr->addr & (~mtd->erasesize_mask); end_addr = instr->addr + instr->len; @@ -642,11 +243,13 @@ mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr) while (start_addr < end_addr) { orig_block = start_addr >> bmtd.blk_shift; block = bmtd.ops->get_mapping_block(orig_block); + if (block < 0) + return -EIO; mapped_instr.addr = (loff_t)block << bmtd.blk_shift; ret = bmtd._erase(mtd, &mapped_instr); if (ret) { - bmtd.ops->remap_block(orig_block, block, 0); - if (retry_count++ < 10) + if (mtk_bmt_remap_block(orig_block, block, 0) && + retry_count++ < 10) continue; instr->fail_addr = start_addr; break; @@ -669,8 +272,8 @@ retry: block = bmtd.ops->get_mapping_block(orig_block); ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift); if (ret) { - bmtd.ops->remap_block(orig_block, block, bmtd.blk_size); - if (retry_count++ < 10) + if (mtk_bmt_remap_block(orig_block, block, bmtd.blk_size) && + retry_count++ < 10) goto retry; } return ret; @@ -680,9 +283,13 @@ static int mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs) { u16 orig_block = ofs >> bmtd.blk_shift; - u16 block = bmtd.ops->get_mapping_block(orig_block); + int block; - bmtd.ops->remap_block(orig_block, block, bmtd.blk_size); + block = bmtd.ops->get_mapping_block(orig_block); + if (block < 0) + return -EIO; + + mtk_bmt_remap_block(orig_block, block, bmtd.blk_size); return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift); } @@ -703,16 +310,32 @@ mtk_bmt_replace_ops(struct mtd_info *mtd) mtd->_block_markbad = mtk_bmt_block_markbad; } -static void -unmap_block_v2(u16 block) +static int mtk_bmt_debug_repair(void *data, u64 val) { - bmtd.bbt->bb_tbl[block] = block; - bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); + int block = val >> bmtd.blk_shift; + int prev_block, new_block; + + prev_block = bmtd.ops->get_mapping_block(block); + if (prev_block < 0) + return -EIO; + + bmtd.ops->unmap_block(block); + new_block = bmtd.ops->get_mapping_block(block); + if (new_block < 0) + return -EIO; + + if (prev_block == new_block) + return 0; + + bbt_nand_erase(new_block); + bbt_nand_copy(new_block, prev_block, bmtd.blk_size); + + return 0; } static int mtk_bmt_debug_mark_good(void *data, u64 val) { - bmtd.ops->unmap_block(val >> bmtd.blk_shift); + bmtd.ops->unmap_block(val >> bmtd.blk_shift); return 0; } @@ -720,102 +343,13 @@ static int mtk_bmt_debug_mark_good(void *data, u64 val) static int mtk_bmt_debug_mark_bad(void *data, u64 val) { u32 block = val >> bmtd.blk_shift; - u16 cur_block = bmtd.ops->get_mapping_block(block); + int cur_block; - bmtd.ops->remap_block(block, cur_block, bmtd.blk_size); + cur_block = bmtd.ops->get_mapping_block(block); + if (cur_block < 0) + return -EIO; - return 0; -} - -static unsigned long * -mtk_bmt_get_mapping_mask(void) -{ - struct bbmt *bbmt = bmt_tbl(bmtd.bbt); - int main_blocks = bmtd.mtd->size >> bmtd.blk_shift; - unsigned long *used; - int i, k; - - used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL); - if (!used) - return NULL; - - for (i = 1; i < main_blocks; i++) { - if (bmtd.bbt->bb_tbl[i] == i) - continue; - - for (k = 0; k < bmtd.bmt_blk_idx; k++) { - if (bmtd.bbt->bb_tbl[i] != bbmt[k].block) - continue; - - set_bit(k, used); - break; - } - } - - return used; -} - -static int mtk_bmt_debug_v2(void *data, u64 val) -{ - struct bbmt *bbmt = bmt_tbl(bmtd.bbt); - struct mtd_info *mtd = bmtd.mtd; - unsigned long *used; - int main_blocks = mtd->size >> bmtd.blk_shift; - int n_remap = 0; - int i; - - used = mtk_bmt_get_mapping_mask(); - if (!used) - return -ENOMEM; - - switch (val) { - case 0: - for (i = 1; i < main_blocks; i++) { - if (bmtd.bbt->bb_tbl[i] == i) - continue; - - printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]); - n_remap++; - } - for (i = 0; i <= bmtd.bmt_blk_idx; i++) { - char c; - - switch (bbmt[i].mapped) { - case NO_MAPPED: - continue; - case NORMAL_MAPPED: - c = 'm'; - if (test_bit(i, used)) - c = 'M'; - break; - case BMT_MAPPED: - c = 'B'; - break; - default: - c = 'X'; - break; - } - printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block); - } - break; - case 100: - for (i = 0; i <= bmtd.bmt_blk_idx; i++) { - if (bbmt[i].mapped != NORMAL_MAPPED) - continue; - - if (test_bit(i, used)) - continue; - - n_remap++; - bbmt[i].mapped = NO_MAPPED; - printk("free block [%d:%x]\n", i, bbmt[i].block); - } - if (n_remap) - bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); - break; - } - - kfree(used); + mtk_bmt_remap_block(block, cur_block, bmtd.blk_size); return 0; } @@ -826,6 +360,7 @@ static int mtk_bmt_debug(void *data, u64 val) } +DEFINE_DEBUGFS_ATTRIBUTE(fops_repair, NULL, mtk_bmt_debug_repair, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n"); @@ -839,6 +374,7 @@ mtk_bmt_add_debugfs(void) if (!dir) return; + debugfs_create_file_unsafe("repair", S_IWUSR, dir, NULL, &fops_repair); debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good); debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad); debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug); @@ -853,8 +389,8 @@ void mtk_bmt_detach(struct mtd_info *mtd) debugfs_remove_recursive(bmtd.debugfs_dir); bmtd.debugfs_dir = NULL; - kfree(nand_bbt_buf); - kfree(nand_data_buf); + kfree(bmtd.bbt_buf); + kfree(bmtd.data_buf); mtd->_read_oob = bmtd._read_oob; mtd->_write_oob = bmtd._write_oob; @@ -866,283 +402,9 @@ void mtk_bmt_detach(struct mtd_info *mtd) memset(&bmtd, 0, sizeof(bmtd)); } -static int mtk_bmt_init_v2(struct device_node *np) -{ - u32 bmt_pool_size, bmt_table_size; - u32 bufsz, block; - u16 pmt_block; - - if (of_property_read_u32(np, "mediatek,bmt-pool-size", - &bmt_pool_size) != 0) - bmt_pool_size = 80; - - if (of_property_read_u8(np, "mediatek,bmt-oob-offset", - &bmtd.oob_offset) != 0) - bmtd.oob_offset = 0; - - if (of_property_read_u32(np, "mediatek,bmt-table-size", - &bmt_table_size) != 0) - bmt_table_size = 0x2000U; - - bmtd.table_size = bmt_table_size; - - pmt_block = bmtd.total_blks - bmt_pool_size - 2; - - bmtd.mtd->size = pmt_block << bmtd.blk_shift; - - /* - * --------------------------------------- - * | PMT(2blks) | BMT POOL(totalblks * 2%) | - * --------------------------------------- - * ^ ^ - * | | - * pmt_block pmt_block + 2blocks(pool_lba) - * - * ATTETION!!!!!! - * The blocks ahead of the boundary block are stored in bb_tbl - * and blocks behind are stored in bmt_tbl - */ - - bmtd.pool_lba = (u16)(pmt_block + 2); - bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100; - - bufsz = round_up(sizeof(struct bbbt) + - bmt_table_size * sizeof(struct bbmt), bmtd.pg_size); - bmtd.bmt_pgs = bufsz >> bmtd.pg_shift; - - nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL); - if (!nand_bbt_buf) - return -ENOMEM; - - memset(nand_bbt_buf, 0xff, bufsz); - - /* Scanning start from the first page of the last block - * of whole flash - */ - bmtd.bbt = scan_bmt(bmtd.total_blks - 1); - if (!bmtd.bbt) { - /* BMT not found */ - if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) { - pr_info("nand: FATAL: Too many blocks, can not support!\n"); - return -1; - } - - bmtd.bbt = (struct bbbt *)nand_bbt_buf; - memset(bmt_tbl(bmtd.bbt), BMT_TBL_DEF_VAL, - bmtd.table_size * sizeof(struct bbmt)); - - if (scan_bad_blocks(bmtd.bbt)) - return -1; - - /* BMT always in the last valid block in pool */ - bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); - block = bmt_tbl(bmtd.bbt)[bmtd.bmt_blk_idx].block; - pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block); - - if (bmtd.bmt_blk_idx == 0) - pr_info("nand: Warning: no available block in BMT pool!\n"); - else if (bmtd.bmt_blk_idx == (u16)-1) - return -1; - } - - return 0; -} - -static bool -bbt_block_is_bad(u16 block) -{ - u8 cur = nand_bbt_buf[block / 4]; - - return cur & (3 << ((block % 4) * 2)); -} - -static void -bbt_set_block_state(u16 block, bool bad) -{ - u8 mask = (3 << ((block % 4) * 2)); - - if (bad) - nand_bbt_buf[block / 4] |= mask; - else - nand_bbt_buf[block / 4] &= ~mask; - - bbt_nand_erase(bmtd.bmt_blk_idx); - write_bmt(bmtd.bmt_blk_idx, nand_bbt_buf); -} - -static u16 -get_mapping_block_index_bbt(int block) -{ - int start, end, ofs; - int bad_blocks = 0; - int i; - - if (!mapping_block_in_range(block, &start, &end)) - return block; - - start >>= bmtd.blk_shift; - end >>= bmtd.blk_shift; - /* skip bad blocks within the mapping range */ - ofs = block - start; - for (i = start; i < end; i++) { - if (bbt_block_is_bad(i)) - bad_blocks++; - else if (ofs) - ofs--; - else - break; - } - - if (i < end) - return i; - - /* when overflowing, remap remaining blocks to bad ones */ - for (i = end - 1; bad_blocks > 0; i--) { - if (!bbt_block_is_bad(i)) - continue; - - bad_blocks--; - if (bad_blocks <= ofs) - return i; - } - - return block; -} - -static bool remap_block_bbt(u16 block, u16 mapped_blk, int copy_len) -{ - int start, end; - u16 new_blk; - - if (!mapping_block_in_range(block, &start, &end)) - return false; - - bbt_set_block_state(mapped_blk, true); - - new_blk = get_mapping_block_index_bbt(block); - bbt_nand_erase(new_blk); - if (copy_len > 0) - bbt_nand_copy(new_blk, mapped_blk, copy_len); - - return false; -} - -static void -unmap_block_bbt(u16 block) -{ - bbt_set_block_state(block, false); -} - -static int -mtk_bmt_read_bbt(void) -{ - u8 oob_buf[8]; - int i; - - for (i = bmtd.total_blks - 1; i >= bmtd.total_blks - 5; i--) { - u32 page = i << (bmtd.blk_shift - bmtd.pg_shift); - - if (bbt_nand_read(page, nand_bbt_buf, bmtd.pg_size, - oob_buf, sizeof(oob_buf))) { - pr_info("read_bbt: could not read block %d\n", i); - continue; - } - - if (oob_buf[0] != 0xff) { - pr_info("read_bbt: bad block at %d\n", i); - continue; - } - - if (memcmp(&oob_buf[1], "mtknand", 7) != 0) { - pr_info("read_bbt: signature mismatch in block %d\n", i); - print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, oob_buf, 8, 1); - continue; - } - - pr_info("read_bbt: found bbt at block %d\n", i); - bmtd.bmt_blk_idx = i; - return 0; - } - - return -EIO; -} - - -static int -mtk_bmt_init_bbt(struct device_node *np) -{ - int buf_size = round_up(bmtd.total_blks >> 2, bmtd.blk_size); - int ret; - - nand_bbt_buf = kmalloc(buf_size, GFP_KERNEL); - if (!nand_bbt_buf) - return -ENOMEM; - - memset(nand_bbt_buf, 0xff, buf_size); - bmtd.mtd->size -= 4 * bmtd.mtd->erasesize; - - ret = mtk_bmt_read_bbt(); - if (ret) - return ret; - - bmtd.bmt_pgs = buf_size / bmtd.pg_size; - - return 0; -} - -static int mtk_bmt_debug_bbt(void *data, u64 val) -{ - char buf[5]; - int i, k; - - switch (val) { - case 0: - for (i = 0; i < bmtd.total_blks; i += 4) { - u8 cur = nand_bbt_buf[i / 4]; - - for (k = 0; k < 4; k++, cur >>= 2) - buf[k] = (cur & 3) ? 'B' : '.'; - - buf[4] = 0; - printk("[%06x] %s\n", i * bmtd.blk_size, buf); - } - break; - case 100: -#if 0 - for (i = bmtd.bmt_blk_idx; i < bmtd.total_blks - 1; i++) - bbt_nand_erase(bmtd.bmt_blk_idx); -#endif - - bmtd.bmt_blk_idx = bmtd.total_blks - 1; - bbt_nand_erase(bmtd.bmt_blk_idx); - write_bmt(bmtd.bmt_blk_idx, nand_bbt_buf); - break; - default: - break; - } - return 0; -} int mtk_bmt_attach(struct mtd_info *mtd) { - static const struct mtk_bmt_ops v2_ops = { - .sig = "bmt", - .sig_len = 3, - .init = mtk_bmt_init_v2, - .remap_block = remap_block_v2, - .unmap_block = unmap_block_v2, - .get_mapping_block = get_mapping_block_index_v2, - .debug = mtk_bmt_debug_v2, - }; - static const struct mtk_bmt_ops bbt_ops = { - .sig = "mtknand", - .sig_len = 7, - .init = mtk_bmt_init_bbt, - .remap_block = remap_block_bbt, - .unmap_block = unmap_block_bbt, - .get_mapping_block = get_mapping_block_index_bbt, - .debug = mtk_bmt_debug_bbt, - }; struct device_node *np; int ret = 0; @@ -1154,9 +416,11 @@ int mtk_bmt_attach(struct mtd_info *mtd) return 0; if (of_property_read_bool(np, "mediatek,bmt-v2")) - bmtd.ops = &v2_ops; + bmtd.ops = &mtk_bmt_v2_ops; + else if (of_property_read_bool(np, "mediatek,nmbm")) + bmtd.ops = &mtk_bmt_nmbm_ops; else if (of_property_read_bool(np, "mediatek,bbt")) - bmtd.ops = &bbt_ops; + bmtd.ops = &mtk_bmt_bbt_ops; else return 0; @@ -1173,14 +437,14 @@ int mtk_bmt_attach(struct mtd_info *mtd) bmtd.pg_shift = ffs(bmtd.pg_size) - 1; bmtd.total_blks = mtd->size >> bmtd.blk_shift; - nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL); - if (!nand_data_buf) { + bmtd.data_buf = kzalloc(bmtd.pg_size + bmtd.mtd->oobsize, GFP_KERNEL); + if (!bmtd.data_buf) { pr_info("nand: FATAL ERR: allocate buffer failed!\n"); ret = -1; goto error; } - memset(nand_data_buf, 0xff, bmtd.pg_size); + memset(bmtd.data_buf, 0xff, bmtd.pg_size + bmtd.mtd->oobsize); ret = bmtd.ops->init(np); if (ret) diff --git a/target/linux/generic/files/drivers/mtd/nand/mtk_bmt.h b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt.h new file mode 100644 index 0000000000..dff1f28c81 --- /dev/null +++ b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt.h @@ -0,0 +1,131 @@ +#ifndef __MTK_BMT_PRIV_H +#define __MTK_BMT_PRIV_H + +#include +#include +#include +#include +#include +#include + +#define MAIN_SIGNATURE_OFFSET 0 +#define OOB_SIGNATURE_OFFSET 1 + +#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__) + +struct mtk_bmt_ops { + char *sig; + unsigned int sig_len; + int (*init)(struct device_node *np); + bool (*remap_block)(u16 block, u16 mapped_block, int copy_len); + void (*unmap_block)(u16 block); + int (*get_mapping_block)(int block); + int (*debug)(void *data, u64 val); +}; + +struct bbbt; +struct nmbm_instance; + +struct bmt_desc { + struct mtd_info *mtd; + unsigned char *bbt_buf; + unsigned char *data_buf; + + int (*_read_oob) (struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops); + int (*_write_oob) (struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops); + int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); + int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); + int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); + + const struct mtk_bmt_ops *ops; + + union { + struct bbbt *bbt; + struct nmbm_instance *ni; + }; + + struct dentry *debugfs_dir; + + u32 table_size; + u32 pg_size; + u32 blk_size; + u16 pg_shift; + u16 blk_shift; + /* bbt logical address */ + u16 pool_lba; + /* bbt physical address */ + u16 pool_pba; + /* Maximum count of bad blocks that the vendor guaranteed */ + u16 bb_max; + /* Total blocks of the Nand Chip */ + u16 total_blks; + /* The block(n) BMT is located at (bmt_tbl[n]) */ + u16 bmt_blk_idx; + /* How many pages needs to store 'struct bbbt' */ + u32 bmt_pgs; + + const __be32 *remap_range; + int remap_range_len; + + /* to compensate for driver level remapping */ + u8 oob_offset; +}; + +extern struct bmt_desc bmtd; +extern const struct mtk_bmt_ops mtk_bmt_v2_ops; +extern const struct mtk_bmt_ops mtk_bmt_bbt_ops; +extern const struct mtk_bmt_ops mtk_bmt_nmbm_ops; + +static inline u32 blk_pg(u16 block) +{ + return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift)); +} + +static inline int +bbt_nand_read(u32 page, unsigned char *dat, int dat_len, + unsigned char *fdm, int fdm_len) +{ + struct mtd_oob_ops ops = { + .mode = MTD_OPS_PLACE_OOB, + .ooboffs = bmtd.oob_offset, + .oobbuf = fdm, + .ooblen = fdm_len, + .datbuf = dat, + .len = dat_len, + }; + + return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops); +} + +static inline int bbt_nand_erase(u16 block) +{ + struct mtd_info *mtd = bmtd.mtd; + struct erase_info instr = { + .addr = (loff_t)block << bmtd.blk_shift, + .len = bmtd.blk_size, + }; + + return bmtd._erase(mtd, &instr); +} + +static inline int write_bmt(u16 block, unsigned char *dat) +{ + struct mtd_oob_ops ops = { + .mode = MTD_OPS_PLACE_OOB, + .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset, + .oobbuf = bmtd.ops->sig, + .ooblen = bmtd.ops->sig_len, + .datbuf = dat, + .len = bmtd.bmt_pgs << bmtd.pg_shift, + }; + loff_t addr = (loff_t)block << bmtd.blk_shift; + + return bmtd._write_oob(bmtd.mtd, addr, &ops); +} + +int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset); +bool mapping_block_in_range(int block, int *start, int *end); + +#endif diff --git a/target/linux/generic/files/drivers/mtd/nand/mtk_bmt_bbt.c b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt_bbt.c new file mode 100644 index 0000000000..519e1ed70c --- /dev/null +++ b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt_bbt.c @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Xiangsheng Hou + * Copyright (c) 2020-2022 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include "mtk_bmt.h" + +static bool +bbt_block_is_bad(u16 block) +{ + u8 cur = bmtd.bbt_buf[block / 4]; + + return cur & (3 << ((block % 4) * 2)); +} + +static void +bbt_set_block_state(u16 block, bool bad) +{ + u8 mask = (3 << ((block % 4) * 2)); + + if (bad) + bmtd.bbt_buf[block / 4] |= mask; + else + bmtd.bbt_buf[block / 4] &= ~mask; + + bbt_nand_erase(bmtd.bmt_blk_idx); + write_bmt(bmtd.bmt_blk_idx, bmtd.bbt_buf); +} + +static int +get_mapping_block_index_bbt(int block) +{ + int start, end, ofs; + int bad_blocks = 0; + int i; + + if (!mapping_block_in_range(block, &start, &end)) + return block; + + start >>= bmtd.blk_shift; + end >>= bmtd.blk_shift; + /* skip bad blocks within the mapping range */ + ofs = block - start; + for (i = start; i < end; i++) { + if (bbt_block_is_bad(i)) + bad_blocks++; + else if (ofs) + ofs--; + else + break; + } + + if (i < end) + return i; + + /* when overflowing, remap remaining blocks to bad ones */ + for (i = end - 1; bad_blocks > 0; i--) { + if (!bbt_block_is_bad(i)) + continue; + + bad_blocks--; + if (bad_blocks <= ofs) + return i; + } + + return block; +} + +static bool remap_block_bbt(u16 block, u16 mapped_blk, int copy_len) +{ + int start, end; + u16 new_blk; + + if (!mapping_block_in_range(block, &start, &end)) + return false; + + bbt_set_block_state(mapped_blk, true); + + new_blk = get_mapping_block_index_bbt(block); + bbt_nand_erase(new_blk); + if (copy_len > 0) + bbt_nand_copy(new_blk, mapped_blk, copy_len); + + return true; +} + +static void +unmap_block_bbt(u16 block) +{ + bbt_set_block_state(block, false); +} + +static int +mtk_bmt_read_bbt(void) +{ + u8 oob_buf[8]; + int i; + + for (i = bmtd.total_blks - 1; i >= bmtd.total_blks - 5; i--) { + u32 page = i << (bmtd.blk_shift - bmtd.pg_shift); + + if (bbt_nand_read(page, bmtd.bbt_buf, bmtd.pg_size, + oob_buf, sizeof(oob_buf))) { + pr_info("read_bbt: could not read block %d\n", i); + continue; + } + + if (oob_buf[0] != 0xff) { + pr_info("read_bbt: bad block at %d\n", i); + continue; + } + + if (memcmp(&oob_buf[1], "mtknand", 7) != 0) { + pr_info("read_bbt: signature mismatch in block %d\n", i); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, oob_buf, 8, 1); + continue; + } + + pr_info("read_bbt: found bbt at block %d\n", i); + bmtd.bmt_blk_idx = i; + return 0; + } + + return -EIO; +} + + +static int +mtk_bmt_init_bbt(struct device_node *np) +{ + int buf_size = round_up(bmtd.total_blks >> 2, bmtd.blk_size); + int ret; + + bmtd.bbt_buf = kmalloc(buf_size, GFP_KERNEL); + if (!bmtd.bbt_buf) + return -ENOMEM; + + memset(bmtd.bbt_buf, 0xff, buf_size); + bmtd.mtd->size -= 4 * bmtd.mtd->erasesize; + + ret = mtk_bmt_read_bbt(); + if (ret) + return ret; + + bmtd.bmt_pgs = buf_size / bmtd.pg_size; + + return 0; +} + +static int mtk_bmt_debug_bbt(void *data, u64 val) +{ + char buf[5]; + int i, k; + + switch (val) { + case 0: + for (i = 0; i < bmtd.total_blks; i += 4) { + u8 cur = bmtd.bbt_buf[i / 4]; + + for (k = 0; k < 4; k++, cur >>= 2) + buf[k] = (cur & 3) ? 'B' : '.'; + + buf[4] = 0; + printk("[%06x] %s\n", i * bmtd.blk_size, buf); + } + break; + case 100: +#if 0 + for (i = bmtd.bmt_blk_idx; i < bmtd.total_blks - 1; i++) + bbt_nand_erase(bmtd.bmt_blk_idx); +#endif + + bmtd.bmt_blk_idx = bmtd.total_blks - 1; + bbt_nand_erase(bmtd.bmt_blk_idx); + write_bmt(bmtd.bmt_blk_idx, bmtd.bbt_buf); + break; + default: + break; + } + return 0; +} + +const struct mtk_bmt_ops mtk_bmt_bbt_ops = { + .sig = "mtknand", + .sig_len = 7, + .init = mtk_bmt_init_bbt, + .remap_block = remap_block_bbt, + .unmap_block = unmap_block_bbt, + .get_mapping_block = get_mapping_block_index_bbt, + .debug = mtk_bmt_debug_bbt, +}; diff --git a/target/linux/generic/files/drivers/mtd/nand/mtk_bmt_nmbm.c b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt_nmbm.c new file mode 100644 index 0000000000..a896e49ec0 --- /dev/null +++ b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt_nmbm.c @@ -0,0 +1,2348 @@ +#include +#include +#include "mtk_bmt.h" + +#define nlog_err(ni, ...) printk(KERN_ERR __VA_ARGS__) +#define nlog_info(ni, ...) printk(KERN_INFO __VA_ARGS__) +#define nlog_debug(ni, ...) printk(KERN_INFO __VA_ARGS__) +#define nlog_warn(ni, ...) printk(KERN_WARNING __VA_ARGS__) + +#define NMBM_MAGIC_SIGNATURE 0x304d4d4e /* NMM0 */ +#define NMBM_MAGIC_INFO_TABLE 0x314d4d4e /* NMM1 */ + +#define NMBM_VERSION_MAJOR_S 0 +#define NMBM_VERSION_MAJOR_M 0xffff +#define NMBM_VERSION_MINOR_S 16 +#define NMBM_VERSION_MINOR_M 0xffff +#define NMBM_VERSION_MAKE(major, minor) (((major) & NMBM_VERSION_MAJOR_M) | \ + (((minor) & NMBM_VERSION_MINOR_M) << \ + NMBM_VERSION_MINOR_S)) +#define NMBM_VERSION_MAJOR_GET(ver) (((ver) >> NMBM_VERSION_MAJOR_S) & \ + NMBM_VERSION_MAJOR_M) +#define NMBM_VERSION_MINOR_GET(ver) (((ver) >> NMBM_VERSION_MINOR_S) & \ + NMBM_VERSION_MINOR_M) + +#define NMBM_BITMAP_UNIT_SIZE (sizeof(u32)) +#define NMBM_BITMAP_BITS_PER_BLOCK 2 +#define NMBM_BITMAP_BITS_PER_UNIT (8 * sizeof(u32)) +#define NMBM_BITMAP_BLOCKS_PER_UNIT (NMBM_BITMAP_BITS_PER_UNIT / \ + NMBM_BITMAP_BITS_PER_BLOCK) + +#define NMBM_SPARE_BLOCK_MULTI 1 +#define NMBM_SPARE_BLOCK_DIV 2 +#define NMBM_SPARE_BLOCK_MIN 2 + +#define NMBM_MGMT_DIV 16 +#define NMBM_MGMT_BLOCKS_MIN 32 + +#define NMBM_TRY_COUNT 3 + +#define BLOCK_ST_BAD 0 +#define BLOCK_ST_NEED_REMAP 2 +#define BLOCK_ST_GOOD 3 +#define BLOCK_ST_MASK 3 + +#define NMBM_VER_MAJOR 1 +#define NMBM_VER_MINOR 0 +#define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \ + NMBM_VER_MINOR) + +struct nmbm_header { + u32 magic; + u32 version; + u32 size; + u32 checksum; +}; + +struct nmbm_signature { + struct nmbm_header header; + uint64_t nand_size; + u32 block_size; + u32 page_size; + u32 spare_size; + u32 mgmt_start_pb; + u8 max_try_count; + u8 padding[3]; +}; + +struct nmbm_info_table_header { + struct nmbm_header header; + u32 write_count; + u32 state_table_off; + u32 mapping_table_off; + u32 padding; +}; + +struct nmbm_instance { + u32 rawpage_size; + u32 rawblock_size; + u32 rawchip_size; + + struct nmbm_signature signature; + + u8 *info_table_cache; + u32 info_table_size; + u32 info_table_spare_blocks; + struct nmbm_info_table_header info_table; + + u32 *block_state; + u32 block_state_changed; + u32 state_table_size; + + int32_t *block_mapping; + u32 block_mapping_changed; + u32 mapping_table_size; + + u8 *page_cache; + + int protected; + + u32 block_count; + u32 data_block_count; + + u32 mgmt_start_ba; + u32 main_table_ba; + u32 backup_table_ba; + u32 mapping_blocks_ba; + u32 mapping_blocks_top_ba; + u32 signature_ba; + + u32 max_ratio; + u32 max_reserved_blocks; + bool empty_page_ecc_ok; + bool force_create; +}; + +static inline u32 nmbm_crc32(u32 crcval, const void *buf, size_t size) +{ + unsigned int chksz; + const unsigned char *p = buf; + + while (size) { + if (size > UINT_MAX) + chksz = UINT_MAX; + else + chksz = (uint)size; + + crcval = crc32_le(crcval, p, chksz); + size -= chksz; + p += chksz; + } + + return crcval; +} +/* + * nlog_table_creation - Print log of table creation event + * @ni: NMBM instance structure + * @main_table: whether the table is main info table + * @start_ba: start block address of the table + * @end_ba: block address after the end of the table + */ +static void nlog_table_creation(struct nmbm_instance *ni, bool main_table, + uint32_t start_ba, uint32_t end_ba) +{ + if (start_ba == end_ba - 1) + nlog_info(ni, "%s info table has been written to block %u\n", + main_table ? "Main" : "Backup", start_ba); + else + nlog_info(ni, "%s info table has been written to block %u-%u\n", + main_table ? "Main" : "Backup", start_ba, end_ba - 1); +} + +/* + * nlog_table_update - Print log of table update event + * @ni: NMBM instance structure + * @main_table: whether the table is main info table + * @start_ba: start block address of the table + * @end_ba: block address after the end of the table + */ +static void nlog_table_update(struct nmbm_instance *ni, bool main_table, + uint32_t start_ba, uint32_t end_ba) +{ + if (start_ba == end_ba - 1) + nlog_debug(ni, "%s info table has been updated in block %u\n", + main_table ? "Main" : "Backup", start_ba); + else + nlog_debug(ni, "%s info table has been updated in block %u-%u\n", + main_table ? "Main" : "Backup", start_ba, end_ba - 1); +} + +/* + * nlog_table_found - Print log of table found event + * @ni: NMBM instance structure + * @first_table: whether the table is first found info table + * @write_count: write count of the info table + * @start_ba: start block address of the table + * @end_ba: block address after the end of the table + */ +static void nlog_table_found(struct nmbm_instance *ni, bool first_table, + uint32_t write_count, uint32_t start_ba, + uint32_t end_ba) +{ + if (start_ba == end_ba - 1) + nlog_info(ni, "%s info table with writecount %u found in block %u\n", + first_table ? "First" : "Second", write_count, + start_ba); + else + nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n", + first_table ? "First" : "Second", write_count, + start_ba, end_ba - 1); +} + +/*****************************************************************************/ +/* Address conversion functions */ +/*****************************************************************************/ + +/* + * ba2addr - Convert a block address to linear address + * @ni: NMBM instance structure + * @ba: Block address + */ +static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba) +{ + return (uint64_t)ba << bmtd.blk_shift; +} +/* + * size2blk - Get minimum required blocks for storing specific size of data + * @ni: NMBM instance structure + * @size: size for storing + */ +static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size) +{ + return (size + bmtd.blk_size - 1) >> bmtd.blk_shift; +} + +/*****************************************************************************/ +/* High level NAND chip APIs */ +/*****************************************************************************/ + +/* + * nmbm_read_phys_page - Read page with retry + * @ni: NMBM instance structure + * @addr: linear address where the data will be read from + * @data: the main data to be read + * @oob: the oob data to be read + * + * Read a page for at most NMBM_TRY_COUNT times. + * + * Return 0 for success, positive value for corrected bitflip count, + * -EBADMSG for ecc error, other negative values for other errors + */ +static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr, + void *data, void *oob) +{ + int tries, ret; + + for (tries = 0; tries < NMBM_TRY_COUNT; tries++) { + struct mtd_oob_ops ops = { + .mode = MTD_OPS_PLACE_OOB, + .oobbuf = oob, + .datbuf = data, + }; + + if (data) + ops.len = bmtd.pg_size; + if (oob) + ops.ooblen = mtd_oobavail(bmtd.mtd, &ops); + + ret = bmtd._read_oob(bmtd.mtd, addr, &ops); + if (ret == -EUCLEAN) + return min_t(u32, bmtd.mtd->bitflip_threshold + 1, + bmtd.mtd->ecc_strength); + if (ret >= 0) + return 0; + } + + if (ret != -EBADMSG) + nlog_err(ni, "Page read failed at address 0x%08llx\n", addr); + + return ret; +} + +/* + * nmbm_write_phys_page - Write page with retry + * @ni: NMBM instance structure + * @addr: linear address where the data will be written to + * @data: the main data to be written + * @oob: the oob data to be written + * + * Write a page for at most NMBM_TRY_COUNT times. + */ +static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr, + const void *data, const void *oob) +{ + int tries, ret; + + for (tries = 0; tries < NMBM_TRY_COUNT; tries++) { + struct mtd_oob_ops ops = { + .mode = MTD_OPS_PLACE_OOB, + .oobbuf = (void *)oob, + .datbuf = (void *)data, + }; + + if (data) + ops.len = bmtd.pg_size; + if (oob) + ops.ooblen = mtd_oobavail(bmtd.mtd, &ops); + + ret = bmtd._write_oob(bmtd.mtd, addr, &ops); + if (!ret) + return true; + } + + nlog_err(ni, "Page write failed at address 0x%08llx\n", addr); + + return false; +} + +/* + * nmbm_erase_phys_block - Erase a block with retry + * @ni: NMBM instance structure + * @addr: Linear address + * + * Erase a block for at most NMBM_TRY_COUNT times. + */ +static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr) +{ + int tries, ret; + + for (tries = 0; tries < NMBM_TRY_COUNT; tries++) { + struct erase_info ei = { + .addr = addr, + .len = bmtd.mtd->erasesize, + }; + + ret = bmtd._erase(bmtd.mtd, &ei); + if (!ret) + return true; + } + + nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr); + + return false; +} + +/* + * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB + * @ni: NMBM instance structure + * @ba: block address + */ +static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba) +{ + uint64_t addr = ba2addr(ni, ba); + + return bmtd._block_isbad(bmtd.mtd, addr); +} + +/* + * nmbm_mark_phys_bad_block - Mark a block bad + * @ni: NMBM instance structure + * @addr: Linear address + */ +static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba) +{ + uint64_t addr = ba2addr(ni, ba); + + nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr); + + return bmtd._block_markbad(bmtd.mtd, addr); +} + +/*****************************************************************************/ +/* NMBM related functions */ +/*****************************************************************************/ + +/* + * nmbm_check_header - Check whether a NMBM structure is valid + * @data: pointer to a NMBM structure with a NMBM header at beginning + * @size: Size of the buffer pointed by @header + * + * The size of the NMBM structure may be larger than NMBM header, + * e.g. block mapping table and block state table. + */ +static bool nmbm_check_header(const void *data, uint32_t size) +{ + const struct nmbm_header *header = data; + struct nmbm_header nhdr; + uint32_t new_checksum; + + /* + * Make sure expected structure size is equal or smaller than + * buffer size. + */ + if (header->size > size) + return false; + + memcpy(&nhdr, data, sizeof(nhdr)); + + nhdr.checksum = 0; + new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr)); + if (header->size > sizeof(nhdr)) + new_checksum = nmbm_crc32(new_checksum, + (const uint8_t *)data + sizeof(nhdr), + header->size - sizeof(nhdr)); + + if (header->checksum != new_checksum) + return false; + + return true; +} + +/* + * nmbm_update_checksum - Update checksum of a NMBM structure + * @header: pointer to a NMBM structure with a NMBM header at beginning + * + * The size of the NMBM structure must be specified by @header->size + */ +static void nmbm_update_checksum(struct nmbm_header *header) +{ + header->checksum = 0; + header->checksum = nmbm_crc32(0, header, header->size); +} + +/* + * nmbm_get_spare_block_count - Calculate number of blocks should be reserved + * @block_count: number of blocks of data + * + * Calculate number of blocks should be reserved for data + */ +static uint32_t nmbm_get_spare_block_count(uint32_t block_count) +{ + uint32_t val; + + val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV; + val *= NMBM_SPARE_BLOCK_MULTI; + + if (val < NMBM_SPARE_BLOCK_MIN) + val = NMBM_SPARE_BLOCK_MIN; + + return val; +} + +/* + * nmbm_get_block_state_raw - Get state of a block from raw block state table + * @block_state: pointer to raw block state table (bitmap) + * @ba: block address + */ +static uint32_t nmbm_get_block_state_raw(u32 *block_state, + uint32_t ba) +{ + uint32_t unit, shift; + + unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT; + shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK; + + return (block_state[unit] >> shift) & BLOCK_ST_MASK; +} + +/* + * nmbm_get_block_state - Get state of a block from block state table + * @ni: NMBM instance structure + * @ba: block address + */ +static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba) +{ + return nmbm_get_block_state_raw(ni->block_state, ba); +} + +/* + * nmbm_set_block_state - Set state of a block to block state table + * @ni: NMBM instance structure + * @ba: block address + * @state: block state + * + * Set state of a block. If the block state changed, ni->block_state_changed + * will be increased. + */ +static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba, + uint32_t state) +{ + uint32_t unit, shift, orig; + u32 uv; + + unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT; + shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK; + + orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK; + state &= BLOCK_ST_MASK; + + uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift)); + uv |= state << shift; + ni->block_state[unit] = uv; + + if (orig != state) { + ni->block_state_changed++; + return true; + } + + return false; +} + +/* + * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr. + * @ni: NMBM instance structure + * @ba: start physical block address + * @nba: return physical block address after walk + * @count: number of good blocks to be skipped + * @limit: highest block address allowed for walking + * + * Start from @ba, skipping any bad blocks, counting @count good blocks, and + * return the next good block address. + * + * If no enough good blocks counted while @limit reached, false will be returned. + * + * If @count == 0, nearest good block address will be returned. + * @limit is not counted in walking. + */ +static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba, + uint32_t *nba, uint32_t count, + uint32_t limit) +{ + int32_t nblock = count; + + if (limit >= ni->block_count) + limit = ni->block_count - 1; + + while (ba < limit) { + if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD) + nblock--; + + if (nblock < 0) { + *nba = ba; + return true; + } + + ba++; + } + + return false; +} + +/* + * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr + * @ni: NMBM instance structure + * @ba: start physical block address + * @nba: return physical block address after walk + * @count: number of good blocks to be skipped + * @limit: lowest block address allowed for walking + * + * Start from @ba, skipping any bad blocks, counting @count good blocks, and + * return the next good block address. + * + * If no enough good blocks counted while @limit reached, false will be returned. + * + * If @count == 0, nearest good block address will be returned. + * @limit is not counted in walking. + */ +static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba, + uint32_t *nba, uint32_t count, uint32_t limit) +{ + int32_t nblock = count; + + if (limit >= ni->block_count) + limit = ni->block_count - 1; + + while (ba > limit) { + if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD) + nblock--; + + if (nblock < 0) { + *nba = ba; + return true; + } + + ba--; + } + + return false; +} + +/* + * nmbm_block_walk - Skip specified number of good blocks from curr. block addr + * @ni: NMBM instance structure + * @ascending: whether to walk ascending + * @ba: start physical block address + * @nba: return physical block address after walk + * @count: number of good blocks to be skipped + * @limit: highest/lowest block address allowed for walking + * + * Start from @ba, skipping any bad blocks, counting @count good blocks, and + * return the next good block address. + * + * If no enough good blocks counted while @limit reached, false will be returned. + * + * If @count == 0, nearest good block address will be returned. + * @limit can be set to negative if no limit required. + * @limit is not counted in walking. + */ +static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending, + uint32_t ba, uint32_t *nba, int32_t count, + int32_t limit) +{ + if (ascending) + return nmbm_block_walk_asc(ni, ba, nba, count, limit); + + return nmbm_block_walk_desc(ni, ba, nba, count, limit); +} + +/* + * nmbm_scan_badblocks - Scan and record all bad blocks + * @ni: NMBM instance structure + * + * Scan the entire lower NAND chip and record all bad blocks in to block state + * table. + */ +static void nmbm_scan_badblocks(struct nmbm_instance *ni) +{ + uint32_t ba; + + for (ba = 0; ba < ni->block_count; ba++) { + if (nmbm_check_bad_phys_block(ni, ba)) { + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD); + nlog_info(ni, "Bad block %u [0x%08llx]\n", ba, + ba2addr(ni, ba)); + } + } +} + +/* + * nmbm_build_mapping_table - Build initial block mapping table + * @ni: NMBM instance structure + * + * The initial mapping table will be compatible with the stratage of + * factory production. + */ +static void nmbm_build_mapping_table(struct nmbm_instance *ni) +{ + uint32_t pb, lb; + + for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) { + if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD) + continue; + + /* Always map to the next good block */ + ni->block_mapping[lb++] = pb; + } + + ni->data_block_count = lb; + + /* Unusable/Management blocks */ + for (pb = lb; pb < ni->block_count; pb++) + ni->block_mapping[pb] = -1; +} + +/* + * nmbm_erase_block_and_check - Erase a block and check its usability + * @ni: NMBM instance structure + * @ba: block address to be erased + * + * Erase a block anc check its usability + * + * Return true if the block is usable, false if erasure failure or the block + * has too many bitflips. + */ +static bool nmbm_erase_block_and_check(struct nmbm_instance *ni, uint32_t ba) +{ + uint64_t addr, off; + bool success; + int ret; + + success = nmbm_erase_phys_block(ni, ba2addr(ni, ba)); + if (!success) + return false; + + if (!ni->empty_page_ecc_ok) + return true; + + /* Check every page to make sure there aren't too many bitflips */ + + addr = ba2addr(ni, ba); + + for (off = 0; off < bmtd.blk_size; off += bmtd.pg_size) { + ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL); + if (ret == -EBADMSG) { + /* + * empty_page_ecc_ok means the empty page is + * still protected by ECC. So reading pages with ECC + * enabled and -EBADMSG means there are too many + * bitflips that can't be recovered, and the block + * containing the page should be marked bad. + */ + nlog_err(ni, + "Too many bitflips in empty page at 0x%llx\n", + addr + off); + return false; + } + } + + return true; +} + +/* + * nmbm_erase_range - Erase a range of blocks + * @ni: NMBM instance structure + * @ba: block address where the erasure will start + * @limit: top block address allowed for erasure + * + * Erase blocks within the specific range. Newly-found bad blocks will be + * marked. + * + * @limit is not counted into the allowed erasure address. + */ +static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba, + uint32_t limit) +{ + bool success; + + while (ba < limit) { + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD) + goto next_block; + + /* Insurance to detect unexpected bad block marked by user */ + if (nmbm_check_bad_phys_block(ni, ba)) { + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD); + goto next_block; + } + + success = nmbm_erase_block_and_check(ni, ba); + if (success) + goto next_block; + + nmbm_mark_phys_bad_block(ni, ba); + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD); + + next_block: + ba++; + } +} + +/* + * nmbm_write_repeated_data - Write critical data to a block with retry + * @ni: NMBM instance structure + * @ba: block address where the data will be written to + * @data: the data to be written + * @size: size of the data + * + * Write data to every page of the block. Success only if all pages within + * this block have been successfully written. + * + * Make sure data size is not bigger than one page. + * + * This function will write and verify every page for at most + * NMBM_TRY_COUNT times. + */ +static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba, + const void *data, uint32_t size) +{ + uint64_t addr, off; + bool success; + int ret; + + if (size > bmtd.pg_size) + return false; + + addr = ba2addr(ni, ba); + + for (off = 0; off < bmtd.blk_size; off += bmtd.pg_size) { + /* Prepare page data. fill 0xff to unused region */ + memcpy(ni->page_cache, data, size); + memset(ni->page_cache + size, 0xff, ni->rawpage_size - size); + + success = nmbm_write_phys_page(ni, addr + off, ni->page_cache, NULL); + if (!success) + return false; + + /* Verify the data just written. ECC error indicates failure */ + ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL); + if (ret < 0) + return false; + + if (memcmp(ni->page_cache, data, size)) + return false; + } + + return true; +} + +/* + * nmbm_write_signature - Write signature to NAND chip + * @ni: NMBM instance structure + * @limit: top block address allowed for writing + * @signature: the signature to be written + * @signature_ba: the actual block address where signature is written to + * + * Write signature within a specific range, from chip bottom to limit. + * At most one block will be written. + * + * @limit is not counted into the allowed write address. + */ +static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit, + const struct nmbm_signature *signature, + uint32_t *signature_ba) +{ + uint32_t ba = ni->block_count - 1; + bool success; + + while (ba > limit) { + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD) + goto next_block; + + /* Insurance to detect unexpected bad block marked by user */ + if (nmbm_check_bad_phys_block(ni, ba)) { + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD); + goto next_block; + } + + success = nmbm_erase_block_and_check(ni, ba); + if (!success) + goto skip_bad_block; + + success = nmbm_write_repeated_data(ni, ba, signature, + sizeof(*signature)); + if (success) { + *signature_ba = ba; + return true; + } + + skip_bad_block: + nmbm_mark_phys_bad_block(ni, ba); + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD); + + next_block: + ba--; + }; + + return false; +} + +/* + * nmbn_read_data - Read data + * @ni: NMBM instance structure + * @addr: linear address where the data will be read from + * @data: the data to be read + * @size: the size of data + * + * Read data range. + * Every page will be tried for at most NMBM_TRY_COUNT times. + * + * Return 0 for success, positive value for corrected bitflip count, + * -EBADMSG for ecc error, other negative values for other errors + */ +static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data, + uint32_t size) +{ + uint64_t off = addr; + uint8_t *ptr = data; + uint32_t sizeremain = size, chunksize, leading; + int ret; + + while (sizeremain) { + leading = off & (bmtd.pg_size - 1); + chunksize = bmtd.pg_size - leading; + if (chunksize > sizeremain) + chunksize = sizeremain; + + if (chunksize == bmtd.pg_size) { + ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL); + if (ret < 0) + return ret; + } else { + ret = nmbm_read_phys_page(ni, off - leading, + ni->page_cache, NULL); + if (ret < 0) + return ret; + + memcpy(ptr, ni->page_cache + leading, chunksize); + } + + off += chunksize; + ptr += chunksize; + sizeremain -= chunksize; + } + + return 0; +} + +/* + * nmbn_write_verify_data - Write data with validation + * @ni: NMBM instance structure + * @addr: linear address where the data will be written to + * @data: the data to be written + * @size: the size of data + * + * Write data and verify. + * Every page will be tried for at most NMBM_TRY_COUNT times. + */ +static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr, + const void *data, uint32_t size) +{ + uint64_t off = addr; + const uint8_t *ptr = data; + uint32_t sizeremain = size, chunksize, leading; + bool success; + int ret; + + while (sizeremain) { + leading = off & (bmtd.pg_size - 1); + chunksize = bmtd.pg_size - leading; + if (chunksize > sizeremain) + chunksize = sizeremain; + + /* Prepare page data. fill 0xff to unused region */ + memset(ni->page_cache, 0xff, ni->rawpage_size); + memcpy(ni->page_cache + leading, ptr, chunksize); + + success = nmbm_write_phys_page(ni, off - leading, + ni->page_cache, NULL); + if (!success) + return false; + + /* Verify the data just written. ECC error indicates failure */ + ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache, NULL); + if (ret < 0) + return false; + + if (memcmp(ni->page_cache + leading, ptr, chunksize)) + return false; + + off += chunksize; + ptr += chunksize; + sizeremain -= chunksize; + } + + return true; +} + +/* + * nmbm_write_mgmt_range - Write management data into NAND within a range + * @ni: NMBM instance structure + * @addr: preferred start block address for writing + * @limit: highest block address allowed for writing + * @data: the data to be written + * @size: the size of data + * @actual_start_ba: actual start block address of data + * @actual_end_ba: block address after the end of data + * + * @limit is not counted into the allowed write address. + */ +static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba, + uint32_t limit, const void *data, + uint32_t size, uint32_t *actual_start_ba, + uint32_t *actual_end_ba) +{ + const uint8_t *ptr = data; + uint32_t sizeremain = size, chunksize; + bool success; + + while (sizeremain && ba < limit) { + chunksize = sizeremain; + if (chunksize > bmtd.blk_size) + chunksize = bmtd.blk_size; + + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD) + goto next_block; + + /* Insurance to detect unexpected bad block marked by user */ + if (nmbm_check_bad_phys_block(ni, ba)) { + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD); + goto next_block; + } + + success = nmbm_erase_block_and_check(ni, ba); + if (!success) + goto skip_bad_block; + + success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr, + chunksize); + if (!success) + goto skip_bad_block; + + if (sizeremain == size) + *actual_start_ba = ba; + + ptr += chunksize; + sizeremain -= chunksize; + + goto next_block; + + skip_bad_block: + nmbm_mark_phys_bad_block(ni, ba); + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD); + + next_block: + ba++; + } + + if (sizeremain) + return false; + + *actual_end_ba = ba; + + return true; +} + +/* + * nmbm_generate_info_table_cache - Generate info table cache data + * @ni: NMBM instance structure + * + * Generate info table cache data to be written into flash. + */ +static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni) +{ + bool changed = false; + + memset(ni->info_table_cache, 0xff, ni->info_table_size); + + memcpy(ni->info_table_cache + ni->info_table.state_table_off, + ni->block_state, ni->state_table_size); + + memcpy(ni->info_table_cache + ni->info_table.mapping_table_off, + ni->block_mapping, ni->mapping_table_size); + + ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE; + ni->info_table.header.version = NMBM_VER; + ni->info_table.header.size = ni->info_table_size; + + if (ni->block_state_changed || ni->block_mapping_changed) { + ni->info_table.write_count++; + changed = true; + } + + memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table)); + + nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache); + + return changed; +} + +/* + * nmbm_write_info_table - Write info table into NAND within a range + * @ni: NMBM instance structure + * @ba: preferred start block address for writing + * @limit: highest block address allowed for writing + * @actual_start_ba: actual start block address of info table + * @actual_end_ba: block address after the end of info table + * + * @limit is counted into the allowed write address. + */ +static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba, + uint32_t limit, uint32_t *actual_start_ba, + uint32_t *actual_end_ba) +{ + return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache, + ni->info_table_size, actual_start_ba, + actual_end_ba); +} + +/* + * nmbm_mark_tables_clean - Mark info table `clean' + * @ni: NMBM instance structure + */ +static void nmbm_mark_tables_clean(struct nmbm_instance *ni) +{ + ni->block_state_changed = 0; + ni->block_mapping_changed = 0; +} + +/* + * nmbm_try_reserve_blocks - Reserve blocks with compromisation + * @ni: NMBM instance structure + * @ba: start physical block address + * @nba: return physical block address after reservation + * @count: number of good blocks to be skipped + * @min_count: minimum number of good blocks to be skipped + * @limit: highest/lowest block address allowed for walking + * + * Reserve specific blocks. If failed, try to reserve as many as possible. + */ +static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba, + uint32_t *nba, uint32_t count, + int32_t min_count, int32_t limit) +{ + int32_t nblocks = count; + bool success; + + while (nblocks >= min_count) { + success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit); + if (success) + return true; + + nblocks--; + } + + return false; +} + +/* + * nmbm_rebuild_info_table - Build main & backup info table from scratch + * @ni: NMBM instance structure + * @allow_no_gap: allow no spare blocks between two tables + */ +static bool nmbm_rebuild_info_table(struct nmbm_instance *ni) +{ + uint32_t table_start_ba, table_end_ba, next_start_ba; + uint32_t main_table_end_ba; + bool success; + + /* Set initial value */ + ni->main_table_ba = 0; + ni->backup_table_ba = 0; + ni->mapping_blocks_ba = ni->mapping_blocks_top_ba; + + /* Write main table */ + success = nmbm_write_info_table(ni, ni->mgmt_start_ba, + ni->mapping_blocks_top_ba, + &table_start_ba, &table_end_ba); + if (!success) { + /* Failed to write main table, data will be lost */ + nlog_err(ni, "Unable to write at least one info table!\n"); + nlog_err(ni, "Please save your data before power off!\n"); + ni->protected = 1; + return false; + } + + /* Main info table is successfully written, record its offset */ + ni->main_table_ba = table_start_ba; + main_table_end_ba = table_end_ba; + + /* Adjust mapping_blocks_ba */ + ni->mapping_blocks_ba = table_end_ba; + + nmbm_mark_tables_clean(ni); + + nlog_table_creation(ni, true, table_start_ba, table_end_ba); + + /* Reserve spare blocks for main info table. */ + success = nmbm_try_reserve_blocks(ni, table_end_ba, + &next_start_ba, + ni->info_table_spare_blocks, 0, + ni->mapping_blocks_top_ba - + size2blk(ni, ni->info_table_size)); + if (!success) { + /* There is no spare block. */ + nlog_debug(ni, "No room for backup info table\n"); + return true; + } + + /* Write backup info table. */ + success = nmbm_write_info_table(ni, next_start_ba, + ni->mapping_blocks_top_ba, + &table_start_ba, &table_end_ba); + if (!success) { + /* There is no enough blocks for backup table. */ + nlog_debug(ni, "No room for backup info table\n"); + return true; + } + + /* Backup table is successfully written, record its offset */ + ni->backup_table_ba = table_start_ba; + + /* Adjust mapping_blocks_off */ + ni->mapping_blocks_ba = table_end_ba; + + /* Erase spare blocks of main table to clean possible interference data */ + nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba); + + nlog_table_creation(ni, false, table_start_ba, table_end_ba); + + return true; +} + +/* + * nmbm_rescue_single_info_table - Rescue when there is only one info table + * @ni: NMBM instance structure + * + * This function is called when there is only one info table exists. + * This function may fail if we can't write new info table + */ +static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni) +{ + uint32_t table_start_ba, table_end_ba, write_ba; + bool success; + + /* Try to write new info table in front of existing table */ + success = nmbm_write_info_table(ni, ni->mgmt_start_ba, + ni->main_table_ba, + &table_start_ba, + &table_end_ba); + if (success) { + /* + * New table becomes the main table, existing table becomes + * the backup table. + */ + ni->backup_table_ba = ni->main_table_ba; + ni->main_table_ba = table_start_ba; + + nmbm_mark_tables_clean(ni); + + /* Erase spare blocks of main table to clean possible interference data */ + nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba); + + nlog_table_creation(ni, true, table_start_ba, table_end_ba); + + return true; + } + + /* Try to reserve spare blocks for existing table */ + success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba, + ni->info_table_spare_blocks, 0, + ni->mapping_blocks_top_ba - + size2blk(ni, ni->info_table_size)); + if (!success) { + nlog_warn(ni, "Failed to rescue single info table\n"); + return false; + } + + /* Try to write new info table next to the existing table */ + while (write_ba >= ni->mapping_blocks_ba) { + success = nmbm_write_info_table(ni, write_ba, + ni->mapping_blocks_top_ba, + &table_start_ba, + &table_end_ba); + if (success) + break; + + write_ba--; + } + + if (success) { + /* Erase spare blocks of main table to clean possible interference data */ + nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba); + + /* New table becomes the backup table */ + ni->backup_table_ba = table_start_ba; + ni->mapping_blocks_ba = table_end_ba; + + nmbm_mark_tables_clean(ni); + + nlog_table_creation(ni, false, table_start_ba, table_end_ba); + + return true; + } + + nlog_warn(ni, "Failed to rescue single info table\n"); + return false; +} + +/* + * nmbm_update_single_info_table - Update specific one info table + * @ni: NMBM instance structure + */ +static bool nmbm_update_single_info_table(struct nmbm_instance *ni, + bool update_main_table) +{ + uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba; + bool success; + + /* Determine the write range */ + if (update_main_table) { + write_start_ba = ni->main_table_ba; + write_limit = ni->backup_table_ba; + } else { + write_start_ba = ni->backup_table_ba; + write_limit = ni->mapping_blocks_top_ba; + } + + success = nmbm_write_info_table(ni, write_start_ba, write_limit, + &table_start_ba, &table_end_ba); + if (success) { + if (update_main_table) { + ni->main_table_ba = table_start_ba; + } else { + ni->backup_table_ba = table_start_ba; + ni->mapping_blocks_ba = table_end_ba; + } + + nmbm_mark_tables_clean(ni); + + nlog_table_update(ni, update_main_table, table_start_ba, + table_end_ba); + + return true; + } + + if (update_main_table) { + /* + * If failed to update main table, make backup table the new + * main table, and call nmbm_rescue_single_info_table() + */ + nlog_warn(ni, "Unable to update %s info table\n", + update_main_table ? "Main" : "Backup"); + + ni->main_table_ba = ni->backup_table_ba; + ni->backup_table_ba = 0; + return nmbm_rescue_single_info_table(ni); + } + + /* Only one table left */ + ni->mapping_blocks_ba = ni->backup_table_ba; + ni->backup_table_ba = 0; + + return false; +} + +/* + * nmbm_rescue_main_info_table - Rescue when failed to write main info table + * @ni: NMBM instance structure + * + * This function is called when main info table failed to be written, and + * backup info table exists. + */ +static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni) +{ + uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba; + uint32_t main_table_end_ba, write_ba; + uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size); + bool success; + + /* Try to reserve spare blocks for existing backup info table */ + success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba, + ni->info_table_spare_blocks, 0, + ni->mapping_blocks_top_ba - + info_table_erasesize); + if (!success) { + /* There is no spare block. Backup info table becomes the main table. */ + nlog_err(ni, "No room for temporary info table\n"); + ni->main_table_ba = ni->backup_table_ba; + ni->backup_table_ba = 0; + return true; + } + + /* Try to write temporary info table into spare unmapped blocks */ + while (write_ba >= ni->mapping_blocks_ba) { + success = nmbm_write_info_table(ni, write_ba, + ni->mapping_blocks_top_ba, + &tmp_table_start_ba, + &tmp_table_end_ba); + if (success) + break; + + write_ba--; + } + + if (!success) { + /* Backup info table becomes the main table */ + nlog_err(ni, "Failed to update main info table\n"); + ni->main_table_ba = ni->backup_table_ba; + ni->backup_table_ba = 0; + return true; + } + + /* Adjust mapping_blocks_off */ + ni->mapping_blocks_ba = tmp_table_end_ba; + + /* + * Now write main info table at the beginning of management area. + * This operation will generally destroy the original backup info + * table. + */ + success = nmbm_write_info_table(ni, ni->mgmt_start_ba, + tmp_table_start_ba, + &main_table_start_ba, + &main_table_end_ba); + if (!success) { + /* Temporary info table becomes the main table */ + ni->main_table_ba = tmp_table_start_ba; + ni->backup_table_ba = 0; + + nmbm_mark_tables_clean(ni); + + nlog_err(ni, "Failed to update main info table\n"); + + return true; + } + + /* Main info table has been successfully written, record its offset */ + ni->main_table_ba = main_table_start_ba; + + nmbm_mark_tables_clean(ni); + + nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba); + + /* + * Temporary info table becomes the new backup info table if it's + * not overwritten. + */ + if (main_table_end_ba <= tmp_table_start_ba) { + ni->backup_table_ba = tmp_table_start_ba; + + nlog_table_creation(ni, false, tmp_table_start_ba, + tmp_table_end_ba); + + return true; + } + + /* Adjust mapping_blocks_off */ + ni->mapping_blocks_ba = main_table_end_ba; + + /* Try to reserve spare blocks for new main info table */ + success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba, + ni->info_table_spare_blocks, 0, + ni->mapping_blocks_top_ba - + info_table_erasesize); + if (!success) { + /* There is no spare block. Only main table exists. */ + nlog_err(ni, "No room for backup info table\n"); + ni->backup_table_ba = 0; + return true; + } + + /* Write new backup info table. */ + while (write_ba >= main_table_end_ba) { + success = nmbm_write_info_table(ni, write_ba, + ni->mapping_blocks_top_ba, + &tmp_table_start_ba, + &tmp_table_end_ba); + if (success) + break; + + write_ba--; + } + + if (!success) { + nlog_err(ni, "No room for backup info table\n"); + ni->backup_table_ba = 0; + return true; + } + + /* Backup info table has been successfully written, record its offset */ + ni->backup_table_ba = tmp_table_start_ba; + + /* Adjust mapping_blocks_off */ + ni->mapping_blocks_ba = tmp_table_end_ba; + + /* Erase spare blocks of main table to clean possible interference data */ + nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba); + + nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba); + + return true; +} + +/* + * nmbm_update_info_table_once - Update info table once + * @ni: NMBM instance structure + * @force: force update + * + * Update both main and backup info table. Return true if at least one info + * table has been successfully written. + * This function only try to update info table once regard less of the result. + */ +static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force) +{ + uint32_t table_start_ba, table_end_ba; + uint32_t main_table_limit; + bool success; + + /* Do nothing if there is no change */ + if (!nmbm_generate_info_table_cache(ni) && !force) + return true; + + /* Check whether both two tables exist */ + if (!ni->backup_table_ba) { + main_table_limit = ni->mapping_blocks_top_ba; + goto write_main_table; + } + + /* + * Write backup info table in its current range. + * Note that limit is set to mapping_blocks_top_off to provide as many + * spare blocks as possible for the backup table. If at last + * unmapped blocks are used by backup table, mapping_blocks_off will + * be adjusted. + */ + success = nmbm_write_info_table(ni, ni->backup_table_ba, + ni->mapping_blocks_top_ba, + &table_start_ba, &table_end_ba); + if (!success) { + /* + * There is nothing to do if failed to write backup table. + * Write the main table now. + */ + nlog_err(ni, "No room for backup table\n"); + ni->mapping_blocks_ba = ni->backup_table_ba; + ni->backup_table_ba = 0; + main_table_limit = ni->mapping_blocks_top_ba; + goto write_main_table; + } + + /* Backup table is successfully written, record its offset */ + ni->backup_table_ba = table_start_ba; + + /* Adjust mapping_blocks_off */ + ni->mapping_blocks_ba = table_end_ba; + + nmbm_mark_tables_clean(ni); + + /* The normal limit of main table */ + main_table_limit = ni->backup_table_ba; + + nlog_table_update(ni, false, table_start_ba, table_end_ba); + +write_main_table: + if (!ni->main_table_ba) + goto rebuild_tables; + + /* Write main info table in its current range */ + success = nmbm_write_info_table(ni, ni->main_table_ba, + main_table_limit, &table_start_ba, + &table_end_ba); + if (!success) { + /* If failed to write main table, go rescue procedure */ + if (!ni->backup_table_ba) + goto rebuild_tables; + + return nmbm_rescue_main_info_table(ni); + } + + /* Main info table is successfully written, record its offset */ + ni->main_table_ba = table_start_ba; + + /* Adjust mapping_blocks_off */ + if (!ni->backup_table_ba) + ni->mapping_blocks_ba = table_end_ba; + + nmbm_mark_tables_clean(ni); + + nlog_table_update(ni, true, table_start_ba, table_end_ba); + + return true; + +rebuild_tables: + return nmbm_rebuild_info_table(ni); +} + +/* + * nmbm_update_info_table - Update info table + * @ni: NMBM instance structure + * + * Update both main and backup info table. Return true if at least one table + * has been successfully written. + * This function will try to update info table repeatedly until no new bad + * block found during updating. + */ +static bool nmbm_update_info_table(struct nmbm_instance *ni) +{ + bool success; + + if (ni->protected) + return true; + + while (ni->block_state_changed || ni->block_mapping_changed) { + success = nmbm_update_info_table_once(ni, false); + if (!success) { + nlog_err(ni, "Failed to update info table\n"); + return false; + } + } + + return true; +} + +/* + * nmbm_map_block - Map a bad block to a unused spare block + * @ni: NMBM instance structure + * @lb: logic block addr to map + */ +static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb) +{ + uint32_t pb; + bool success; + + if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) { + nlog_warn(ni, "No spare unmapped blocks.\n"); + return false; + } + + success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0, + ni->mapping_blocks_ba); + if (!success) { + nlog_warn(ni, "No spare unmapped blocks.\n"); + nmbm_update_info_table(ni); + ni->mapping_blocks_top_ba = ni->mapping_blocks_ba; + return false; + } + + ni->block_mapping[lb] = pb; + ni->mapping_blocks_top_ba--; + ni->block_mapping_changed++; + + nlog_info(ni, "Logic block %u mapped to physical block %u\n", lb, pb); + + return true; +} + +/* + * nmbm_create_info_table - Create info table(s) + * @ni: NMBM instance structure + * + * This function assumes that the chip has no existing info table(s) + */ +static bool nmbm_create_info_table(struct nmbm_instance *ni) +{ + uint32_t lb; + bool success; + + /* Set initial mapping_blocks_top_off */ + success = nmbm_block_walk(ni, false, ni->signature_ba, + &ni->mapping_blocks_top_ba, 1, + ni->mgmt_start_ba); + if (!success) { + nlog_err(ni, "No room for spare blocks\n"); + return false; + } + + /* Generate info table cache */ + nmbm_generate_info_table_cache(ni); + + /* Write info table */ + success = nmbm_rebuild_info_table(ni); + if (!success) { + nlog_err(ni, "Failed to build info tables\n"); + return false; + } + + /* Remap bad block(s) at end of data area */ + for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) { + success = nmbm_map_block(ni, lb); + if (!success) + break; + + ni->data_block_count++; + } + + /* If state table and/or mapping table changed, update info table. */ + success = nmbm_update_info_table(ni); + if (!success) + return false; + + return true; +} + +/* + * nmbm_create_new - Create NMBM on a new chip + * @ni: NMBM instance structure + */ +static bool nmbm_create_new(struct nmbm_instance *ni) +{ + bool success; + + /* Determine the boundary of management blocks */ + ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->max_ratio) / NMBM_MGMT_DIV; + + if (ni->max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->max_reserved_blocks) + ni->mgmt_start_ba = ni->block_count - ni->max_reserved_blocks; + + nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n", + ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba)); + + /* Fill block state table & mapping table */ + nmbm_scan_badblocks(ni); + nmbm_build_mapping_table(ni); + + /* Write signature */ + ni->signature.header.magic = NMBM_MAGIC_SIGNATURE; + ni->signature.header.version = NMBM_VER; + ni->signature.header.size = sizeof(ni->signature); + ni->signature.nand_size = bmtd.total_blks << bmtd.blk_shift; + ni->signature.block_size = bmtd.blk_size; + ni->signature.page_size = bmtd.pg_size; + ni->signature.spare_size = bmtd.mtd->oobsize; + ni->signature.mgmt_start_pb = ni->mgmt_start_ba; + ni->signature.max_try_count = NMBM_TRY_COUNT; + nmbm_update_checksum(&ni->signature.header); + + success = nmbm_write_signature(ni, ni->mgmt_start_ba, + &ni->signature, &ni->signature_ba); + if (!success) { + nlog_err(ni, "Failed to write signature to a proper offset\n"); + return false; + } + + nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n", + ni->signature_ba, ba2addr(ni, ni->signature_ba)); + + /* Write info table(s) */ + success = nmbm_create_info_table(ni); + if (success) { + nlog_info(ni, "NMBM has been successfully created\n"); + return true; + } + + return false; +} + +/* + * nmbm_check_info_table_header - Check if a info table header is valid + * @ni: NMBM instance structure + * @data: pointer to the info table header + */ +static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data) +{ + struct nmbm_info_table_header *ifthdr = data; + + if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE) + return false; + + if (ifthdr->header.size != ni->info_table_size) + return false; + + if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size) + return false; + + if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size) + return false; + + return true; +} + +/* + * nmbm_check_info_table - Check if a whole info table is valid + * @ni: NMBM instance structure + * @start_ba: start block address of this table + * @end_ba: end block address of this table + * @data: pointer to the info table header + * @mapping_blocks_top_ba: return the block address of top remapped block + */ +static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba, + uint32_t end_ba, void *data, + uint32_t *mapping_blocks_top_ba) +{ + struct nmbm_info_table_header *ifthdr = data; + int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off); + u32 *block_state = (u32 *)((uintptr_t)data + ifthdr->state_table_off); + uint32_t minimum_mapping_pb = ni->signature_ba; + uint32_t ba; + + for (ba = 0; ba < ni->data_block_count; ba++) { + if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) || + block_mapping[ba] == ni->signature_ba) + return false; + + if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb) + minimum_mapping_pb = block_mapping[ba]; + } + + for (ba = start_ba; ba < end_ba; ba++) { + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD) + continue; + + if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD) + return false; + } + + *mapping_blocks_top_ba = minimum_mapping_pb - 1; + + return true; +} + +/* + * nmbm_try_load_info_table - Try to load info table from a address + * @ni: NMBM instance structure + * @ba: start block address of the info table + * @eba: return the block address after end of the table + * @write_count: return the write count of this table + * @mapping_blocks_top_ba: return the block address of top remapped block + * @table_loaded: used to record whether ni->info_table has valid data + */ +static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba, + uint32_t *eba, uint32_t *write_count, + uint32_t *mapping_blocks_top_ba, + bool table_loaded) +{ + struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache; + uint8_t *off = ni->info_table_cache; + uint32_t limit = ba + size2blk(ni, ni->info_table_size); + uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size; + bool success, checkhdr = true; + int ret; + + while (sizeremain && ba < limit) { + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD) + goto next_block; + + if (nmbm_check_bad_phys_block(ni, ba)) { + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD); + goto next_block; + } + + chunksize = sizeremain; + if (chunksize > bmtd.blk_size) + chunksize = bmtd.blk_size; + + /* Assume block with ECC error has no info table data */ + ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize); + if (ret < 0) + goto skip_bad_block; + else if (ret > 0) + return false; + + if (checkhdr) { + success = nmbm_check_info_table_header(ni, off); + if (!success) + return false; + + start_ba = ba; + checkhdr = false; + } + + off += chunksize; + sizeremain -= chunksize; + + goto next_block; + + skip_bad_block: + /* Only mark bad in memory */ + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD); + + next_block: + ba++; + } + + if (sizeremain) + return false; + + success = nmbm_check_header(ni->info_table_cache, ni->info_table_size); + if (!success) + return false; + + *eba = ba; + *write_count = ifthdr->write_count; + + success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache, + mapping_blocks_top_ba); + if (!success) + return false; + + if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) { + memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table)); + memcpy(ni->block_state, + (uint8_t *)ifthdr + ifthdr->state_table_off, + ni->state_table_size); + memcpy(ni->block_mapping, + (uint8_t *)ifthdr + ifthdr->mapping_table_off, + ni->mapping_table_size); + ni->info_table.write_count = ifthdr->write_count; + } + + return true; +} + +/* + * nmbm_search_info_table - Search info table from specific address + * @ni: NMBM instance structure + * @ba: start block address to search + * @limit: highest block address allowed for searching + * @table_start_ba: return the start block address of this table + * @table_end_ba: return the block address after end of this table + * @write_count: return the write count of this table + * @mapping_blocks_top_ba: return the block address of top remapped block + * @table_loaded: used to record whether ni->info_table has valid data + */ +static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba, + uint32_t limit, uint32_t *table_start_ba, + uint32_t *table_end_ba, + uint32_t *write_count, + uint32_t *mapping_blocks_top_ba, + bool table_loaded) +{ + bool success; + + while (ba < limit - size2blk(ni, ni->info_table_size)) { + success = nmbm_try_load_info_table(ni, ba, table_end_ba, + write_count, + mapping_blocks_top_ba, + table_loaded); + if (success) { + *table_start_ba = ba; + return true; + } + + ba++; + } + + return false; +} + +/* + * nmbm_load_info_table - Load info table(s) from a chip + * @ni: NMBM instance structure + * @ba: start block address to search info table + * @limit: highest block address allowed for searching + */ +static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba, + uint32_t limit) +{ + uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba; + uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba; + uint32_t main_table_write_count, backup_table_write_count; + uint32_t i; + bool success; + + /* Set initial value */ + ni->main_table_ba = 0; + ni->backup_table_ba = 0; + ni->info_table.write_count = 0; + ni->mapping_blocks_top_ba = ni->signature_ba - 1; + ni->data_block_count = ni->signature.mgmt_start_pb; + + /* Find first info table */ + success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba, + &main_table_end_ba, &main_table_write_count, + &main_mapping_blocks_top_ba, false); + if (!success) { + nlog_warn(ni, "No valid info table found\n"); + return false; + } + + table_end_ba = main_table_end_ba; + + nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba, + main_table_end_ba); + + /* Find second info table */ + success = nmbm_search_info_table(ni, main_table_end_ba, limit, + &ni->backup_table_ba, &backup_table_end_ba, + &backup_table_write_count, &backup_mapping_blocks_top_ba, true); + if (!success) { + nlog_warn(ni, "Second info table not found\n"); + } else { + table_end_ba = backup_table_end_ba; + + nlog_table_found(ni, false, backup_table_write_count, + ni->backup_table_ba, backup_table_end_ba); + } + + /* Pick mapping_blocks_top_ba */ + if (!ni->backup_table_ba) { + ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba; + } else { + if (main_table_write_count >= backup_table_write_count) + ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba; + else + ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba; + } + + /* Set final mapping_blocks_ba */ + ni->mapping_blocks_ba = table_end_ba; + + /* Set final data_block_count */ + for (i = ni->signature.mgmt_start_pb; i > 0; i--) { + if (ni->block_mapping[i - 1] >= 0) { + ni->data_block_count = i; + break; + } + } + + /* Regenerate the info table cache from the final selected info table */ + nmbm_generate_info_table_cache(ni); + + /* + * If only one table exists, try to write another table. + * If two tables have different write count, try to update info table + */ + if (!ni->backup_table_ba) { + success = nmbm_rescue_single_info_table(ni); + } else if (main_table_write_count != backup_table_write_count) { + /* Mark state & mapping tables changed */ + ni->block_state_changed = 1; + ni->block_mapping_changed = 1; + + success = nmbm_update_single_info_table(ni, + main_table_write_count < backup_table_write_count); + } else { + success = true; + } + + /* + * If there is no spare unmapped blocks, or still only one table + * exists, set the chip to read-only + */ + if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) { + nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n"); + ni->protected = 1; + } else if (!success) { + nlog_warn(ni, "Only one info table found. Device is now read-only\n"); + ni->protected = 1; + } + + return true; +} + +/* + * nmbm_load_existing - Load NMBM from a new chip + * @ni: NMBM instance structure + */ +static bool nmbm_load_existing(struct nmbm_instance *ni) +{ + bool success; + + /* Calculate the boundary of management blocks */ + ni->mgmt_start_ba = ni->signature.mgmt_start_pb; + + nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n", + ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba)); + + /* Look for info table(s) */ + success = nmbm_load_info_table(ni, ni->mgmt_start_ba, + ni->signature_ba); + if (success) { + nlog_info(ni, "NMBM has been successfully attached\n"); + return true; + } + + if (!ni->force_create) { + printk("not creating NMBM table\n"); + return false; + } + + /* Fill block state table & mapping table */ + nmbm_scan_badblocks(ni); + nmbm_build_mapping_table(ni); + + /* Write info table(s) */ + success = nmbm_create_info_table(ni); + if (success) { + nlog_info(ni, "NMBM has been successfully created\n"); + return true; + } + + return false; +} + +/* + * nmbm_find_signature - Find signature in the lower NAND chip + * @ni: NMBM instance structure + * @signature_ba: used for storing block address of the signature + * @signature_ba: return the actual block address of signature block + * + * Find a valid signature from a specific range in the lower NAND chip, + * from bottom (highest address) to top (lowest address) + * + * Return true if found. + */ +static bool nmbm_find_signature(struct nmbm_instance *ni, + struct nmbm_signature *signature, + uint32_t *signature_ba) +{ + struct nmbm_signature sig; + uint64_t off, addr; + uint32_t block_count, ba, limit; + bool success; + int ret; + + /* Calculate top and bottom block address */ + block_count = bmtd.total_blks; + ba = block_count; + limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->max_ratio); + if (ni->max_reserved_blocks && block_count - limit > ni->max_reserved_blocks) + limit = block_count - ni->max_reserved_blocks; + + while (ba >= limit) { + ba--; + addr = ba2addr(ni, ba); + + if (nmbm_check_bad_phys_block(ni, ba)) + continue; + + /* Check every page. + * As long as at leaset one page contains valid signature, + * the block is treated as a valid signature block. + */ + for (off = 0; off < bmtd.blk_size; + off += bmtd.pg_size) { + ret = nmbn_read_data(ni, addr + off, &sig, + sizeof(sig)); + if (ret) + continue; + + /* Check for header size and checksum */ + success = nmbm_check_header(&sig, sizeof(sig)); + if (!success) + continue; + + /* Check for header magic */ + if (sig.header.magic == NMBM_MAGIC_SIGNATURE) { + /* Found it */ + memcpy(signature, &sig, sizeof(sig)); + *signature_ba = ba; + return true; + } + } + }; + + return false; +} + +/* + * nmbm_calc_structure_size - Calculate the instance structure size + * @nld: NMBM lower device structure + */ +static size_t nmbm_calc_structure_size(void) +{ + uint32_t state_table_size, mapping_table_size, info_table_size; + uint32_t block_count; + + block_count = bmtd.total_blks; + + /* Calculate info table size */ + state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) / + NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE; + mapping_table_size = block_count * sizeof(int32_t); + + info_table_size = ALIGN(sizeof(struct nmbm_info_table_header), + bmtd.pg_size); + info_table_size += ALIGN(state_table_size, bmtd.pg_size); + info_table_size += ALIGN(mapping_table_size, bmtd.pg_size); + + return info_table_size + state_table_size + mapping_table_size + + sizeof(struct nmbm_instance); +} + +/* + * nmbm_init_structure - Initialize members of instance structure + * @ni: NMBM instance structure + */ +static void nmbm_init_structure(struct nmbm_instance *ni) +{ + uint32_t pages_per_block, blocks_per_chip; + uintptr_t ptr; + + pages_per_block = bmtd.blk_size / bmtd.pg_size; + blocks_per_chip = bmtd.total_blks; + + ni->rawpage_size = bmtd.pg_size + bmtd.mtd->oobsize; + ni->rawblock_size = pages_per_block * ni->rawpage_size; + ni->rawchip_size = blocks_per_chip * ni->rawblock_size; + + /* Calculate number of block this chip */ + ni->block_count = blocks_per_chip; + + /* Calculate info table size */ + ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) / + NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE; + ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping); + + ni->info_table_size = ALIGN(sizeof(ni->info_table), + bmtd.pg_size); + ni->info_table.state_table_off = ni->info_table_size; + + ni->info_table_size += ALIGN(ni->state_table_size, + bmtd.pg_size); + ni->info_table.mapping_table_off = ni->info_table_size; + + ni->info_table_size += ALIGN(ni->mapping_table_size, + bmtd.pg_size); + + ni->info_table_spare_blocks = nmbm_get_spare_block_count( + size2blk(ni, ni->info_table_size)); + + /* Assign memory to members */ + ptr = (uintptr_t)ni + sizeof(*ni); + + ni->info_table_cache = (void *)ptr; + ptr += ni->info_table_size; + + ni->block_state = (void *)ptr; + ptr += ni->state_table_size; + + ni->block_mapping = (void *)ptr; + ptr += ni->mapping_table_size; + + ni->page_cache = bmtd.data_buf; + + /* Initialize block state table */ + ni->block_state_changed = 0; + memset(ni->block_state, 0xff, ni->state_table_size); + + /* Initialize block mapping table */ + ni->block_mapping_changed = 0; +} + +/* + * nmbm_attach - Attach to a lower device + * @ni: NMBM instance structure + */ +static int nmbm_attach(struct nmbm_instance *ni) +{ + bool success; + + if (!ni) + return -EINVAL; + + /* Initialize NMBM instance */ + nmbm_init_structure(ni); + + success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba); + if (!success) { + if (!ni->force_create) { + nlog_err(ni, "Signature not found\n"); + return -ENODEV; + } + + success = nmbm_create_new(ni); + if (!success) + return -ENODEV; + + return 0; + } + + nlog_info(ni, "Signature found at block %u [0x%08llx]\n", + ni->signature_ba, ba2addr(ni, ni->signature_ba)); + + if (ni->signature.header.version != NMBM_VER) { + nlog_err(ni, "NMBM version %u.%u is not supported\n", + NMBM_VERSION_MAJOR_GET(ni->signature.header.version), + NMBM_VERSION_MINOR_GET(ni->signature.header.version)); + return -EINVAL; + } + + if (ni->signature.nand_size != bmtd.total_blks << bmtd.blk_shift || + ni->signature.block_size != bmtd.blk_size || + ni->signature.page_size != bmtd.pg_size || + ni->signature.spare_size != bmtd.mtd->oobsize) { + nlog_err(ni, "NMBM configuration mismatch\n"); + return -EINVAL; + } + + success = nmbm_load_existing(ni); + if (!success) + return -ENODEV; + + return 0; +} + +static bool remap_block_nmbm(u16 block, u16 mapped_block, int copy_len) +{ + struct nmbm_instance *ni = bmtd.ni; + int new_block; + + if (block >= ni->data_block_count) + return false; + + nmbm_set_block_state(ni, mapped_block, BLOCK_ST_BAD); + if (!nmbm_map_block(ni, block)) + return false; + + new_block = ni->block_mapping[block]; + bbt_nand_erase(new_block); + if (copy_len > 0) + bbt_nand_copy(new_block, mapped_block, copy_len); + nmbm_update_info_table(ni); + + return true; +} + +static int get_mapping_block_index_nmbm(int block) +{ + struct nmbm_instance *ni = bmtd.ni; + + if (block >= ni->data_block_count) + return -1; + + return ni->block_mapping[block]; +} + +static int mtk_bmt_init_nmbm(struct device_node *np) +{ + struct nmbm_instance *ni; + int ret; + + ni = kzalloc(nmbm_calc_structure_size(), GFP_KERNEL); + if (!ni) + return -ENOMEM; + + bmtd.ni = ni; + + if (of_property_read_u32(np, "mediatek,bmt-max-ratio", &ni->max_ratio)) + ni->max_ratio = 1; + if (of_property_read_u32(np, "mediatek,bmt-max-reserved-blocks", + &ni->max_reserved_blocks)) + ni->max_reserved_blocks = 256; + if (of_property_read_bool(np, "mediatek,empty-page-ecc-protected")) + ni->empty_page_ecc_ok = true; + if (of_property_read_bool(np, "mediatek,bmt-force-create")) + ni->force_create = true; + + ret = nmbm_attach(ni); + if (ret) + goto out; + + bmtd.mtd->size = ni->data_block_count << bmtd.blk_shift; + + return 0; + +out: + kfree(ni); + bmtd.ni = NULL; + + return ret; +} + +static int mtk_bmt_debug_nmbm(void *data, u64 val) +{ + struct nmbm_instance *ni = bmtd.ni; + int i; + + switch (val) { + case 0: + for (i = 1; i < ni->data_block_count; i++) { + if (ni->block_mapping[i] < ni->mapping_blocks_ba) + continue; + + printk("remap [%x->%x]\n", i, ni->block_mapping[i]); + } + } + + return 0; +} + +static void unmap_block_nmbm(u16 block) +{ + struct nmbm_instance *ni = bmtd.ni; + int start, offset; + int new_block; + + if (block >= ni->data_block_count) + return; + + start = block; + offset = 0; + while (ni->block_mapping[start] >= ni->mapping_blocks_ba) { + start--; + offset++; + if (start < 0) + return; + } + + if (!offset) + return; + + new_block = ni->block_mapping[start] + offset; + nmbm_set_block_state(ni, new_block, BLOCK_ST_GOOD); + ni->block_mapping[block] = new_block; + ni->block_mapping_changed++; + + new_block = ni->signature_ba - 1; + for (block = 0; block < ni->data_block_count; block++) { + int cur = ni->block_mapping[block]; + + if (cur < ni->mapping_blocks_ba) + continue; + + if (cur <= new_block) + new_block = cur - 1; + } + + ni->mapping_blocks_top_ba = new_block; + + nmbm_update_info_table(ni); +} + +const struct mtk_bmt_ops mtk_bmt_nmbm_ops = { + .init = mtk_bmt_init_nmbm, + .remap_block = remap_block_nmbm, + .unmap_block = unmap_block_nmbm, + .get_mapping_block = get_mapping_block_index_nmbm, + .debug = mtk_bmt_debug_nmbm, +}; diff --git a/target/linux/generic/files/drivers/mtd/nand/mtk_bmt_v2.c b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt_v2.c new file mode 100644 index 0000000000..2770376e98 --- /dev/null +++ b/target/linux/generic/files/drivers/mtd/nand/mtk_bmt_v2.c @@ -0,0 +1,513 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Xiangsheng Hou + * Copyright (c) 2020-2022 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include "mtk_bmt.h" + +struct bbbt { + char signature[3]; + /* This version is used to distinguish the legacy and new algorithm */ +#define BBMT_VERSION 2 + unsigned char version; + /* Below 2 tables will be written in SLC */ + u16 bb_tbl[]; +}; + +struct bbmt { + u16 block; +#define NO_MAPPED 0 +#define NORMAL_MAPPED 1 +#define BMT_MAPPED 2 + u16 mapped; +}; + +/* Maximum 8k blocks */ +#define BBPOOL_RATIO 2 +#define BB_TABLE_MAX bmtd.table_size +#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100) +#define BMT_TBL_DEF_VAL 0x0 + +static inline struct bbmt *bmt_tbl(struct bbbt *bbbt) +{ + return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size]; +} + +static u16 find_valid_block(u16 block) +{ + u8 fdm[4]; + int ret; + int loop = 0; + +retry: + if (block >= bmtd.total_blks) + return 0; + + ret = bbt_nand_read(blk_pg(block), bmtd.data_buf, bmtd.pg_size, + fdm, sizeof(fdm)); + /* Read the 1st byte of FDM to judge whether it's a bad + * or not + */ + if (ret || fdm[0] != 0xff) { + pr_info("nand: found bad block 0x%x\n", block); + if (loop >= bmtd.bb_max) { + pr_info("nand: FATAL ERR: too many bad blocks!!\n"); + return 0; + } + + loop++; + block++; + goto retry; + } + + return block; +} + +/* Find out all bad blocks, and fill in the mapping table */ +static int scan_bad_blocks(struct bbbt *bbt) +{ + int i; + u16 block = 0; + + /* First time download, the block0 MUST NOT be a bad block, + * this is guaranteed by vendor + */ + bbt->bb_tbl[0] = 0; + + /* + * Construct the mapping table of Normal data area(non-PMT/BMTPOOL) + * G - Good block; B - Bad block + * --------------------------- + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B| + * --------------------------- + * What bb_tbl[i] looks like: + * physical block(i): + * 0 1 2 3 4 5 6 7 8 9 a b c + * mapped block(bb_tbl[i]): + * 0 1 3 6 7 8 9 b ...... + * ATTENTION: + * If new bad block ocurred(n), search bmt_tbl to find + * a available block(x), and fill in the bb_tbl[n] = x; + */ + for (i = 1; i < bmtd.pool_lba; i++) { + bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1); + BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]); + if (bbt->bb_tbl[i] == 0) + return -1; + } + + /* Physical Block start Address of BMT pool */ + bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1; + if (bmtd.pool_pba >= bmtd.total_blks - 2) { + pr_info("nand: FATAL ERR: Too many bad blocks!!\n"); + return -1; + } + + BBT_LOG("pool_pba=0x%x", bmtd.pool_pba); + i = 0; + block = bmtd.pool_pba; + /* + * The bmt table is used for runtime bad block mapping + * G - Good block; B - Bad block + * --------------------------- + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B| + * --------------------------- + * block: 0 1 2 3 4 5 6 7 8 9 a b c + * What bmt_tbl[i] looks like in initial state: + * i: + * 0 1 2 3 4 5 6 7 + * bmt_tbl[i].block: + * 0 1 3 6 7 8 9 b + * bmt_tbl[i].mapped: + * N N N N N N N B + * N - Not mapped(Available) + * M - Mapped + * B - BMT + * ATTENTION: + * BMT always in the last valid block in pool + */ + while ((block = find_valid_block(block)) != 0) { + bmt_tbl(bbt)[i].block = block; + bmt_tbl(bbt)[i].mapped = NO_MAPPED; + BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block); + block++; + i++; + } + + /* i - How many available blocks in pool, which is the length of bmt_tbl[] + * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block + */ + bmtd.bmt_blk_idx = i - 1; + bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED; + + if (i < 1) { + pr_info("nand: FATAL ERR: no space to store BMT!!\n"); + return -1; + } + + pr_info("[BBT] %d available blocks in BMT pool\n", i); + + return 0; +} + +static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm) +{ + struct bbbt *bbt = (struct bbbt *)buf; + u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET; + + + if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 && + memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) { + if (bbt->version == BBMT_VERSION) + return true; + } + BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x", + sig[0], sig[1], sig[2], + fdm[1], fdm[2], fdm[3]); + return false; +} + +static u16 get_bmt_index(struct bbmt *bmt) +{ + int i = 0; + + while (bmt[i].block != BMT_TBL_DEF_VAL) { + if (bmt[i].mapped == BMT_MAPPED) + return i; + i++; + } + return 0; +} + +static int +read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len) +{ + u32 len = bmtd.bmt_pgs << bmtd.pg_shift; + + return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len); +} + +static struct bbbt *scan_bmt(u16 block) +{ + u8 fdm[4]; + + if (block < bmtd.pool_lba) + return NULL; + + if (read_bmt(block, bmtd.bbt_buf, fdm, sizeof(fdm))) + return scan_bmt(block - 1); + + if (is_valid_bmt(bmtd.bbt_buf, fdm)) { + bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)bmtd.bbt_buf)); + if (bmtd.bmt_blk_idx == 0) { + pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n"); + return NULL; + } + pr_info("[BBT] BMT.v2 is found at 0x%x\n", block); + return (struct bbbt *)bmtd.bbt_buf; + } else + return scan_bmt(block - 1); +} + +/* Write the Burner Bad Block Table to Nand Flash + * n - write BMT to bmt_tbl[n] + */ +static u16 upload_bmt(struct bbbt *bbt, int n) +{ + u16 block; + +retry: + if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) { + pr_info("nand: FATAL ERR: no space to store BMT!\n"); + return (u16)-1; + } + + block = bmt_tbl(bbt)[n].block; + BBT_LOG("n = 0x%x, block = 0x%x", n, block); + if (bbt_nand_erase(block)) { + bmt_tbl(bbt)[n].block = 0; + /* erase failed, try the previous block: bmt_tbl[n - 1].block */ + n--; + goto retry; + } + + /* The signature offset is fixed set to 0, + * oob signature offset is fixed set to 1 + */ + memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3); + bbt->version = BBMT_VERSION; + + if (write_bmt(block, (unsigned char *)bbt)) { + bmt_tbl(bbt)[n].block = 0; + + /* write failed, try the previous block in bmt_tbl[n - 1] */ + n--; + goto retry; + } + + /* Return the current index(n) of BMT pool (bmt_tbl[n]) */ + return n; +} + +static u16 find_valid_block_in_pool(struct bbbt *bbt) +{ + int i; + + if (bmtd.bmt_blk_idx == 0) + goto error; + + for (i = 0; i < bmtd.bmt_blk_idx; i++) { + if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) { + bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED; + return bmt_tbl(bbt)[i].block; + } + } + +error: + pr_info("nand: FATAL ERR: BMT pool is run out!\n"); + return 0; +} + +/* We met a bad block, mark it as bad and map it to a valid block in pool, + * if it's a write failure, we need to write the data to mapped block + */ +static bool remap_block_v2(u16 block, u16 mapped_block, int copy_len) +{ + u16 new_block; + struct bbbt *bbt; + + bbt = bmtd.bbt; + new_block = find_valid_block_in_pool(bbt); + if (new_block == 0) + return false; + + /* Map new bad block to available block in pool */ + bbt->bb_tbl[block] = new_block; + + /* Erase new block */ + bbt_nand_erase(new_block); + if (copy_len > 0) + bbt_nand_copy(new_block, mapped_block, copy_len); + + bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx); + + return true; +} + +static int get_mapping_block_index_v2(int block) +{ + int start, end; + + if (block >= bmtd.pool_lba) + return block; + + if (!mapping_block_in_range(block, &start, &end)) + return block; + + return bmtd.bbt->bb_tbl[block]; +} + +static void +unmap_block_v2(u16 block) +{ + bmtd.bbt->bb_tbl[block] = block; + bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); +} + +static unsigned long * +mtk_bmt_get_mapping_mask(void) +{ + struct bbmt *bbmt = bmt_tbl(bmtd.bbt); + int main_blocks = bmtd.mtd->size >> bmtd.blk_shift; + unsigned long *used; + int i, k; + + used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL); + if (!used) + return NULL; + + for (i = 1; i < main_blocks; i++) { + if (bmtd.bbt->bb_tbl[i] == i) + continue; + + for (k = 0; k < bmtd.bmt_blk_idx; k++) { + if (bmtd.bbt->bb_tbl[i] != bbmt[k].block) + continue; + + set_bit(k, used); + break; + } + } + + return used; +} + +static int mtk_bmt_debug_v2(void *data, u64 val) +{ + struct bbmt *bbmt = bmt_tbl(bmtd.bbt); + struct mtd_info *mtd = bmtd.mtd; + unsigned long *used; + int main_blocks = mtd->size >> bmtd.blk_shift; + int n_remap = 0; + int i; + + used = mtk_bmt_get_mapping_mask(); + if (!used) + return -ENOMEM; + + switch (val) { + case 0: + for (i = 1; i < main_blocks; i++) { + if (bmtd.bbt->bb_tbl[i] == i) + continue; + + printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]); + n_remap++; + } + for (i = 0; i <= bmtd.bmt_blk_idx; i++) { + char c; + + switch (bbmt[i].mapped) { + case NO_MAPPED: + continue; + case NORMAL_MAPPED: + c = 'm'; + if (test_bit(i, used)) + c = 'M'; + break; + case BMT_MAPPED: + c = 'B'; + break; + default: + c = 'X'; + break; + } + printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block); + } + break; + case 100: + for (i = 0; i <= bmtd.bmt_blk_idx; i++) { + if (bbmt[i].mapped != NORMAL_MAPPED) + continue; + + if (test_bit(i, used)) + continue; + + n_remap++; + bbmt[i].mapped = NO_MAPPED; + printk("free block [%d:%x]\n", i, bbmt[i].block); + } + if (n_remap) + bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); + break; + } + + kfree(used); + + return 0; +} + +static int mtk_bmt_init_v2(struct device_node *np) +{ + u32 bmt_pool_size, bmt_table_size; + u32 bufsz, block; + u16 pmt_block; + + if (of_property_read_u32(np, "mediatek,bmt-pool-size", + &bmt_pool_size) != 0) + bmt_pool_size = 80; + + if (of_property_read_u8(np, "mediatek,bmt-oob-offset", + &bmtd.oob_offset) != 0) + bmtd.oob_offset = 0; + + if (of_property_read_u32(np, "mediatek,bmt-table-size", + &bmt_table_size) != 0) + bmt_table_size = 0x2000U; + + bmtd.table_size = bmt_table_size; + + pmt_block = bmtd.total_blks - bmt_pool_size - 2; + + bmtd.mtd->size = pmt_block << bmtd.blk_shift; + + /* + * --------------------------------------- + * | PMT(2blks) | BMT POOL(totalblks * 2%) | + * --------------------------------------- + * ^ ^ + * | | + * pmt_block pmt_block + 2blocks(pool_lba) + * + * ATTETION!!!!!! + * The blocks ahead of the boundary block are stored in bb_tbl + * and blocks behind are stored in bmt_tbl + */ + + bmtd.pool_lba = (u16)(pmt_block + 2); + bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100; + + bufsz = round_up(sizeof(struct bbbt) + + bmt_table_size * sizeof(struct bbmt), bmtd.pg_size); + bmtd.bmt_pgs = bufsz >> bmtd.pg_shift; + + bmtd.bbt_buf = kzalloc(bufsz, GFP_KERNEL); + if (!bmtd.bbt_buf) + return -ENOMEM; + + memset(bmtd.bbt_buf, 0xff, bufsz); + + /* Scanning start from the first page of the last block + * of whole flash + */ + bmtd.bbt = scan_bmt(bmtd.total_blks - 1); + if (!bmtd.bbt) { + /* BMT not found */ + if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) { + pr_info("nand: FATAL: Too many blocks, can not support!\n"); + return -1; + } + + bmtd.bbt = (struct bbbt *)bmtd.bbt_buf; + memset(bmt_tbl(bmtd.bbt), BMT_TBL_DEF_VAL, + bmtd.table_size * sizeof(struct bbmt)); + + if (scan_bad_blocks(bmtd.bbt)) + return -1; + + /* BMT always in the last valid block in pool */ + bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); + block = bmt_tbl(bmtd.bbt)[bmtd.bmt_blk_idx].block; + pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block); + + if (bmtd.bmt_blk_idx == 0) + pr_info("nand: Warning: no available block in BMT pool!\n"); + else if (bmtd.bmt_blk_idx == (u16)-1) + return -1; + } + + return 0; +} + + +const struct mtk_bmt_ops mtk_bmt_v2_ops = { + .sig = "bmt", + .sig_len = 3, + .init = mtk_bmt_init_v2, + .remap_block = remap_block_v2, + .unmap_block = unmap_block_v2, + .get_mapping_block = get_mapping_block_index_v2, + .debug = mtk_bmt_debug_v2, +}; diff --git a/target/linux/generic/hack-5.10/410-block-fit-partition-parser.patch b/target/linux/generic/hack-5.10/410-block-fit-partition-parser.patch index bc48296d9c..7816356227 100644 --- a/target/linux/generic/hack-5.10/410-block-fit-partition-parser.patch +++ b/target/linux/generic/hack-5.10/410-block-fit-partition-parser.patch @@ -156,7 +156,7 @@ .name = "mtdblock", .major = MTD_BLOCK_MAJOR, +#ifdef CONFIG_FIT_PARTITION -+ .part_bits = 1, ++ .part_bits = 2, +#else .part_bits = 0, +#endif diff --git a/target/linux/generic/hack-5.10/430-mtk-bmt-support.patch b/target/linux/generic/hack-5.10/430-mtk-bmt-support.patch index 749dcb8356..62ddd66bf2 100644 --- a/target/linux/generic/hack-5.10/430-mtk-bmt-support.patch +++ b/target/linux/generic/hack-5.10/430-mtk-bmt-support.patch @@ -17,7 +17,7 @@ nandcore-objs := core.o bbt.o obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o -+obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o ++obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o obj-y += onenand/ obj-y += raw/ diff --git a/target/linux/generic/pending-5.10/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch b/target/linux/generic/pending-5.10/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch new file mode 100644 index 0000000000..da9c040322 --- /dev/null +++ b/target/linux/generic/pending-5.10/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch @@ -0,0 +1,41 @@ +From: Felix Fietkau +Date: Mon, 21 Mar 2022 20:39:59 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: enable threaded NAPI + +This can improve performance under load by ensuring that NAPI processing is +not pinned on CPU 0. + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2171,8 +2171,8 @@ static irqreturn_t mtk_handle_irq_rx(int + + eth->rx_events++; + if (likely(napi_schedule_prep(ð->rx_napi))) { +- __napi_schedule(ð->rx_napi); + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); ++ __napi_schedule(ð->rx_napi); + } + + return IRQ_HANDLED; +@@ -2184,8 +2184,8 @@ static irqreturn_t mtk_handle_irq_tx(int + + eth->tx_events++; + if (likely(napi_schedule_prep(ð->tx_napi))) { +- __napi_schedule(ð->tx_napi); + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); ++ __napi_schedule(ð->tx_napi); + } + + return IRQ_HANDLED; +@@ -3229,6 +3229,8 @@ static int mtk_probe(struct platform_dev + * for NAPI to work + */ + init_dummy_netdev(ð->dummy_dev); ++ eth->dummy_dev.threaded = 1; ++ strcpy(eth->dummy_dev.name, "mtk_eth"); + netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, + MTK_NAPI_WEIGHT); + netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, diff --git a/target/linux/ipq40xx/Makefile b/target/linux/ipq40xx/Makefile index fe0bdec5e6..c0d64ded9f 100644 --- a/target/linux/ipq40xx/Makefile +++ b/target/linux/ipq40xx/Makefile @@ -6,7 +6,7 @@ BOARDNAME:=Qualcomm Atheros IPQ40XX FEATURES:=squashfs fpu ramdisk nand CPU_TYPE:=cortex-a7 CPU_SUBTYPE:=neon-vfpv4 -SUBTARGETS:=generic mikrotik +SUBTARGETS:=generic chromium mikrotik KERNEL_PATCHVER:=5.10 KERNEL_TESTING_PATCHVER:=5.10 diff --git a/target/linux/ipq40xx/base-files/etc/board.d/02_network b/target/linux/ipq40xx/base-files/etc/board.d/02_network index bda2d8f0ef..f41b5ce7a8 100644 --- a/target/linux/ipq40xx/base-files/etc/board.d/02_network +++ b/target/linux/ipq40xx/base-files/etc/board.d/02_network @@ -46,6 +46,7 @@ ipq40xx_setup_interfaces() cilab,meshpoint-one|\ edgecore,ecw5211|\ edgecore,oap100|\ + google,wifi|\ openmesh,a42|\ openmesh,a62) ucidef_set_interfaces_lan_wan "eth1" "eth0" diff --git a/target/linux/ipq40xx/base-files/etc/uci-defaults/04_led_migration b/target/linux/ipq40xx/base-files/etc/uci-defaults/04_led_migration index c4f82b35dc..ef4d3d037c 100644 --- a/target/linux/ipq40xx/base-files/etc/uci-defaults/04_led_migration +++ b/target/linux/ipq40xx/base-files/etc/uci-defaults/04_led_migration @@ -10,6 +10,9 @@ engenius,ens620ext|\ zyxel,nbg6617) migrate_leds ":wlan2G=:wlan2g" ":wlan5G=:wlan5g" ;; +netgear,wac510) + migrate_leds ":wlan2g=:wlan-0" ":wlan5g=:wlan-1" ":act=:activity" + ;; esac remove_devicename_leds diff --git a/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh b/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh index d83e8f808a..aa2d60d564 100644 --- a/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh +++ b/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh @@ -154,6 +154,13 @@ platform_do_upgrade() { p2w,r619ac-128m) nand_do_upgrade "$1" ;; + google,wifi) + export_bootdevice + export_partdevice CI_ROOTDEV 0 + CI_KERNPART="kernel" + CI_ROOTPART="rootfs" + emmc_do_upgrade "$1" + ;; linksys,ea6350v3 |\ linksys,ea8300 |\ linksys,mr8300) @@ -202,7 +209,8 @@ platform_do_upgrade() { platform_copy_config() { case "$(board_name)" in - glinet,gl-b2200) + glinet,gl-b2200 |\ + google,wifi) emmc_copy_config ;; esac diff --git a/target/linux/ipq40xx/chromium/config-default b/target/linux/ipq40xx/chromium/config-default new file mode 100644 index 0000000000..83a42dfb66 --- /dev/null +++ b/target/linux/ipq40xx/chromium/config-default @@ -0,0 +1,10 @@ +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_SCSI=y +CONFIG_SG_POOL=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_HOST=y +CONFIG_USB_DWC3_QCOM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PLATFORM=y diff --git a/target/linux/ipq40xx/chromium/target.mk b/target/linux/ipq40xx/chromium/target.mk new file mode 100644 index 0000000000..3983a9281a --- /dev/null +++ b/target/linux/ipq40xx/chromium/target.mk @@ -0,0 +1,2 @@ +BOARDNAME:=Google Chromium +FEATURES += emmc boot-part rootfs-part diff --git a/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4018-wac510.dts b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4018-wac510.dts index 699e2e28b4..d7972cd53d 100644 --- a/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4018-wac510.dts +++ b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4018-wac510.dts @@ -5,6 +5,7 @@ #include #include #include +#include / { model = "Netgear WAC510"; @@ -42,14 +43,6 @@ qcom,wifi_glb_cfg = ; }; - tcsr@194b000 { - status = "okay"; - - compatible = "qcom,tcsr"; - reg = <0x194b000 0x100>; - qcom,usb-hsphy-mode-select = ; - }; - ess_tcsr@1953000 { compatible = "qcom,tcsr"; reg = <0x1953000 0x1000>; @@ -62,14 +55,6 @@ qcom,wifi_noc_memtype_m0_m2 = ; }; - usb2: usb2@60f8800 { - status = "okay"; - }; - - usb3: usb3@8af8800 { - status = "okay"; - }; - crypto@8e3a000 { status = "okay"; }; @@ -119,43 +104,60 @@ leds { compatible = "gpio-leds"; - led_power_amber: power_amber { + led_power_amber: led-0 { label = "amber:power"; + color = ; + function = LED_FUNCTION_POWER; gpios = <&ssr 6 GPIO_ACTIVE_LOW>; panic-indicator; }; - led_power_green: power_green { + led_power_green: led-1 { label = "green:power"; + color = ; + function = LED_FUNCTION_POWER; gpios = <&ssr 5 GPIO_ACTIVE_LOW>; }; - wlan2g_blue { - label = "blue:wlan2g"; + led-2 { + /* 2.4GHz blue - activity */ + color = ; + function = LED_FUNCTION_WLAN; + function-enumerator = <0>; gpios = <&ssr 4 GPIO_ACTIVE_LOW>; linux,default-trigger = "phy0tpt"; }; - wlan2g_green { - label = "green:wlan2g"; + led-3 { + /* 2.4GHz green - link */ + color = ; + function = LED_FUNCTION_WLAN; + function-enumerator = <0>; gpios = <&ssr 3 GPIO_ACTIVE_LOW>; linux,default-trigger = "phy0radio"; }; - wlan5g_blue { - label = "blue:wlan5g"; + led-4 { + /* 5GHz blue - activity */ + color = ; + function = LED_FUNCTION_WLAN; + function-enumerator = <1>; gpios = <&ssr 2 GPIO_ACTIVE_LOW>; linux,default-trigger = "phy1tpt"; }; - wlan5g_green { - label = "green:wlan5g"; + led-5 { + /* 5GHz green - link */ + color = ; + function = LED_FUNCTION_WLAN; + function-enumerator = <1>; gpios = <&ssr 1 GPIO_ACTIVE_LOW>; linux,default-trigger = "phy1radio"; }; - act_green { - label = "green:act"; + led-6 { + color = ; + function = LED_FUNCTION_ACTIVITY; gpios = <&ssr 0 GPIO_ACTIVE_LOW>; }; }; @@ -378,15 +380,3 @@ mac-address-increment = <16>; qcom,ath10k-calibration-variant = "Netgear-WAC510"; }; - -&usb3_ss_phy { - status = "okay"; -}; - -&usb3_hs_phy { - status = "okay"; -}; - -&usb2_hs_phy { - status = "okay"; -}; diff --git a/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac-c1.dts b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac-c1.dts index a50ee4a84e..f9205c28fa 100644 --- a/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac-c1.dts +++ b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac-c1.dts @@ -89,3 +89,16 @@ }; }; +&wifi0 { + status = "okay"; + nvmem-cell-names = "pre-calibration"; + nvmem-cells = <&precal_art_1000>; + qcom,ath10k-calibration-variant = "Qxwlan-E2600AC-C1"; +}; + +&wifi1 { + status = "okay"; + nvmem-cell-names = "pre-calibration"; + nvmem-cells = <&precal_art_5000>; + qcom,ath10k-calibration-variant = "Qxwlan-E2600AC-C1"; +}; diff --git a/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac-c2.dts b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac-c2.dts index ce54ce9f9b..1f88322a4f 100644 --- a/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac-c2.dts +++ b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac-c2.dts @@ -124,3 +124,16 @@ }; }; +&wifi0 { + status = "okay"; + nvmem-cell-names = "pre-calibration"; + nvmem-cells = <&precal_art_1000>; + qcom,ath10k-calibration-variant = "Qxwlan-E2600AC-C2"; +}; + +&wifi1 { + status = "okay"; + nvmem-cell-names = "pre-calibration"; + nvmem-cells = <&precal_art_5000>; + qcom,ath10k-calibration-variant = "Qxwlan-E2600AC-C2"; +}; diff --git a/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac.dtsi b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac.dtsi index 45681b19bb..5bfb758e50 100644 --- a/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac.dtsi +++ b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-e2600ac.dtsi @@ -254,17 +254,3 @@ &usb2_hs_phy { status = "okay"; }; - -&wifi0 { - status = "okay"; - nvmem-cell-names = "pre-calibration"; - nvmem-cells = <&precal_art_1000>; - qcom,ath10k-calibration-variant = "Qxwlan-E2600AC"; -}; - -&wifi1 { - status = "okay"; - nvmem-cell-names = "pre-calibration"; - nvmem-cells = <&precal_art_5000>; - qcom,ath10k-calibration-variant = "Qxwlan-E2600AC"; -}; diff --git a/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-wifi.dts b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-wifi.dts new file mode 100644 index 0000000000..9448e5145e --- /dev/null +++ b/target/linux/ipq40xx/files/arch/arm/boot/dts/qcom-ipq4019-wifi.dts @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2016 Google, Inc + */ + +#include "qcom-ipq4019.dtsi" +#include +#include +#include + +/ { + model = "Google WiFi (Gale)"; + compatible = "google,wifi", "google,gale-v2", "qcom,ipq4019"; + + chosen { + /* + * rootwait: in case we're booting from slow/async USB storage. + */ + bootargs-append = " rootwait"; + stdout-path = &blsp1_uart1; + }; + + memory { + device_type = "memory"; + reg = <0x80000000 0x20000000>; /* 512MB */ + }; + + soc { + ess-switch@c000000 { + status = "okay"; + }; + + edma@c080000 { + status = "okay"; + }; + + ess-psgmii@98000 { + status = "okay"; + }; + }; +}; + +&tlmm { + fw_pinmux { + wp { + pins = "gpio53"; + output-low; + }; + recovery { + pins = "gpio57"; + bias-none; + }; + developer { + pins = "gpio41"; + bias-none; + }; + }; + + reset802_15_4 { + pins = "gpio60"; + }; + + led_reset { + pins = "gpio22"; + output-high; + }; + + sys_reset { + pins = "gpio19"; + output-high; + }; + + rx_active { + pins = "gpio43"; + bias-pull,down; + }; + + spi_0_pins: spi_0_pinmux { + pinmux { + function = "blsp_spi0"; + pins = "gpio13", "gpio14","gpio15"; + }; + pinmux_cs { + function = "gpio"; + pins = "gpio12"; + }; + pinconf { + pins = "gpio13", "gpio14","gpio15"; + drive-strength = <12>; + bias-disable; + }; + pinconf_cs { + pins = "gpio12"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; + + spi_1_pins: spi_1_pinmux { + pinmux { + function = "blsp_spi1"; + pins = "gpio44", "gpio46","gpio47"; + }; + pinmux_cs { + function = "gpio"; + pins = "gpio45"; + }; + pinconf { + pins = "gpio44", "gpio46","gpio47"; + drive-strength = <12>; + bias-disable; + }; + pinconf_cs { + pins = "gpio45"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; + + serial_0_pins: serial0_pinmux { + mux { + pins = "gpio16", "gpio17"; + function = "blsp_uart0"; + bias-disable; + }; + }; + + serial_1_pins: serial1_pinmux { + mux { + pins = "gpio8", "gpio9", "gpio10", "gpio11"; + function = "blsp_uart1"; + bias-disable; + }; + }; + + i2c_0_pins: i2c_0_pinmux { + mux { + pins = "gpio20", "gpio21"; + function = "blsp_i2c0"; + drive-open-drain; + }; + }; + + i2c_1_pins: i2c_1_pinmux { + mux { + pins = "gpio34", "gpio35"; + function = "blsp_i2c1"; + drive-open-drain; + }; + }; + + sd_0_pins: sd_0_pinmux { + sd0 { + pins = "gpio23", "gpio24", "gpio25", "gpio26", "gpio29", "gpio30", "gpio31", "gpio32"; + function = "sdio"; + drive-strength = <10>; + bias-pull-up; + pull-up-res = <0>; + }; + sdclk { + pins = "gpio27"; + function = "sdio"; + drive-strength = <2>; + bias-pull-up; + pull-up-res = <0>; + }; + sdcmd { + pins = "gpio28"; + function = "sdio"; + drive-strength = <10>; + bias-pull-up; + pull-up-res = <0>; + }; + }; + + mdio_pins: mdio_pinmux { + mux_1 { + pins = "gpio6"; + function = "mdio"; + bias-disable; + }; + mux_2 { + pins = "gpio7"; + function = "mdc"; + bias-disable; + }; + mux_3 { + pins = "gpio40"; + function = "gpio"; + bias-disable; + output-high; + }; + }; + + wifi1_1_pins: wifi2_pinmux { + mux { + pins = "gpio58"; + output-low; + }; + }; +}; + +&blsp_dma { + status = "okay"; +}; + +&blsp1_i2c3 { + pinctrl-0 = <&i2c_0_pins>; + pinctrl-names = "default"; + status = "okay"; + + tpm@20 { + compatible = "infineon,slb9645tt"; + reg = <0x20>; + powered-while-suspended; + }; +}; + +&blsp1_i2c4 { + pinctrl-0 = <&i2c_1_pins>; + pinctrl-names = "default"; + status = "okay"; + + led-controller@32 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "national,lp5523"; + reg = <0x32>; + clock-mode = /bits/ 8 <1>; + +#if 1 + led@0 { + reg = <0>; + chan-name = "LED0_Red"; + led-cur = /bits/ 8 <0x64>; + max-cur = /bits/ 8 <0x78>; + color = ; + }; + + led@1 { + reg = <1>; + chan-name = "LED0_Green"; + led-cur = /bits/ 8 <0x64>; + max-cur = /bits/ 8 <0x78>; + color = ; + }; + + led@2 { + reg = <2>; + chan-name = "LED0_Blue"; + led-cur = /bits/ 8 <0x64>; + max-cur = /bits/ 8 <0x78>; + color = ; + }; +#else + /* + * openwrt isn't ready to handle multi-intensity leds yet + * # echo 255 255 255 > /sys/class/leds/tricolor/multi_intensity + * # echo 255 > /sys/class/leds/tricolor/brightness + */ + multi-led@2 { + reg = <2>; + color = ; + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0>; + chan-name = "tricolor"; + led-cur = /bits/ 8 <0x64>; + max-cur = /bits/ 8 <0x78>; + color = ; + }; + + led@1 { + reg = <1>; + chan-name = "tricolor"; + led-cur = /bits/ 8 <0x64>; + max-cur = /bits/ 8 <0x78>; + color = ; + }; + + led@2 { + reg = <2>; + chan-name = "tricolor"; + led-cur = /bits/ 8 <0x64>; + max-cur = /bits/ 8 <0x78>; + color = ; + }; + }; +#endif + }; +}; + +&blsp1_spi1 { + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + status = "okay"; + cs-gpios = <&tlmm 12 GPIO_ACTIVE_HIGH>; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <24000000>; + }; +}; + +&blsp1_spi2 { + pinctrl-0 = <&spi_1_pins>; + pinctrl-names = "default"; + status = "okay"; + cs-gpios = <&tlmm 45 GPIO_ACTIVE_HIGH>; + + /* + * This "spidev" was included in the manufacturer device tree. I + * suspect it's the (unused; and removed from later HW spins) Zigbee + * radio -- SiliconLabs EM3581 Zigbee? There's no driver or binding for + * this at the moment. + */ + spidev@0 { + compatible = "spidev"; + reg = <0>; + spi-max-frequency = <24000000>; + }; +}; + +&blsp1_uart1 { + pinctrl-0 = <&serial_0_pins>; + pinctrl-names = "default"; + status = "okay"; +}; + +&blsp1_uart2 { + pinctrl-0 = <&serial_1_pins>; + pinctrl-names = "default"; + status = "okay"; +}; + +&gmac0 { + qcom,phy_mdio_addr = <4>; + qcom,poll_required = <1>; + qcom,forced_speed = <1000>; + qcom,forced_duplex = <1>; + vlan_tag = <2 0x20>; +}; + +&gmac1 { + qcom,phy_mdio_addr = <3>; + qcom,forced_duplex = <1>; + vlan_tag = <1 0x10>; +}; + +&mdio { + status = "okay"; + pinctrl-0 = <&mdio_pins>; + pinctrl-names = "default"; +}; + +&prng { + status = "okay"; +}; + +&sdhci { + status = "okay"; + pinctrl-0 = <&sd_0_pins>; + pinctrl-names = "default"; + clock-frequency = <192000000>; + vqmmc-supply = <&vqmmc>; + non-removable; +}; + +&usb2 { + status = "okay"; +}; + +&usb2_hs_phy { + status = "okay"; +}; + +&usb3 { + status = "okay"; +}; + +&usb3_ss_phy { + status = "okay"; +}; + +&usb3_hs_phy { + status = "okay"; +}; + +&vqmmc { + status = "okay"; +}; + +&watchdog { + status = "okay"; +}; + +&wifi0 { + status = "okay"; + qcom,ath10k-calibration-variant = "GO_GALE"; +}; + +&wifi1 { + status = "okay"; + pinctrl-0 = <&wifi1_1_pins>; + pinctrl-names = "default"; + qcom,ath10k-calibration-variant = "GO_GALE"; +}; diff --git a/target/linux/ipq40xx/image/chromium.mk b/target/linux/ipq40xx/image/chromium.mk new file mode 100644 index 0000000000..2c8457dcfb --- /dev/null +++ b/target/linux/ipq40xx/image/chromium.mk @@ -0,0 +1,36 @@ +define Build/cros-gpt + cp $@ $@.tmp 2>/dev/null || true + ptgen -o $@.tmp -g \ + -T cros_kernel -N kernel -p $(CONFIG_TARGET_KERNEL_PARTSIZE)m \ + -N rootfs -p $(CONFIG_TARGET_ROOTFS_PARTSIZE)m + cat $@.tmp >> $@ + rm $@.tmp +endef + +define Build/append-kernel-part + dd if=$(IMAGE_KERNEL) bs=$(CONFIG_TARGET_KERNEL_PARTSIZE)M conv=sync >> $@ +endef + +# NB: Chrome OS bootloaders replace the '%U' in command lines with the UUID of +# the kernel partition it chooses to boot from. This gives a flexible way to +# consistently build and sign kernels that always use the subsequent +# (PARTNROFF=1) partition as their rootfs. +define Build/cros-vboot + $(STAGING_DIR_HOST)/bin/cros-vbutil \ + -k $@ -c "root=PARTUUID=%U/PARTNROFF=1" -o $@.new + @mv $@.new $@ +endef + +define Device/google_wifi + DEVICE_VENDOR := Google + DEVICE_MODEL := WiFi (Gale) + SOC := qcom-ipq4019 + KERNEL_SUFFIX := -fit-zImage.itb.vboot + KERNEL = kernel-bin | fit none $$(DTS_DIR)/$$(DEVICE_DTS).dtb | cros-vboot + KERNEL_NAME := zImage + IMAGES += factory.bin + IMAGE/factory.bin := cros-gpt | append-kernel-part | append-rootfs + DEVICE_PACKAGES := ipq-wifi-google_wifi partx-utils mkf2fs e2fsprogs \ + kmod-fs-ext4 kmod-fs-f2fs kmod-google-firmware +endef +TARGET_DEVICES += google_wifi diff --git a/target/linux/ipq40xx/image/generic.mk b/target/linux/ipq40xx/image/generic.mk index 60a76bc150..33c6def373 100644 --- a/target/linux/ipq40xx/image/generic.mk +++ b/target/linux/ipq40xx/image/generic.mk @@ -269,6 +269,8 @@ define Device/avm_fritzbox-7530 $(call Device/FitImageLzma) DEVICE_VENDOR := AVM DEVICE_MODEL := FRITZ!Box 7530 + DEVICE_ALT0_VENDOR := AVM + DEVICE_ALT0_MODEL := FRITZ!Box 7520 SOC := qcom-ipq4019 DEVICE_PACKAGES := fritz-caldata fritz-tffs-nand endef @@ -928,7 +930,7 @@ define Device/qxwlan_e2600ac-c1 KERNEL_SIZE := 4096k IMAGE_SIZE := 31232k IMAGE/sysupgrade.bin := append-kernel | append-rootfs | pad-rootfs | append-metadata - DEVICE_PACKAGES := ipq-wifi-qxwlan_e2600ac + DEVICE_PACKAGES := ipq-wifi-qxwlan_e2600ac-c1 DEFAULT := n endef TARGET_DEVICES += qxwlan_e2600ac-c1 @@ -943,7 +945,7 @@ define Device/qxwlan_e2600ac-c2 KERNEL_INSTALL := 1 BLOCKSIZE := 128k PAGESIZE := 2048 - DEVICE_PACKAGES := ipq-wifi-qxwlan_e2600ac + DEVICE_PACKAGES := ipq-wifi-qxwlan_e2600ac-c2 endef TARGET_DEVICES += qxwlan_e2600ac-c2 @@ -972,6 +974,7 @@ define Device/tel_x1pro IMAGE_SIZE := 31232k IMAGE/sysupgrade.bin := append-kernel | append-rootfs | pad-rootfs | append-metadata DEVICE_PACKAGES := kmod-usb-net-qmi-wwan kmod-usb-serial-option uqmi + DEFAULT := n endef TARGET_DEVICES += tel_x1pro diff --git a/target/linux/ipq40xx/patches-5.10/420-firmware-qcom-scm-disable-SDI.patch b/target/linux/ipq40xx/patches-5.10/420-firmware-qcom-scm-disable-SDI.patch new file mode 100644 index 0000000000..eb474500b1 --- /dev/null +++ b/target/linux/ipq40xx/patches-5.10/420-firmware-qcom-scm-disable-SDI.patch @@ -0,0 +1,47 @@ +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -404,6 +404,20 @@ static int __qcom_scm_set_dload_mode(str + return qcom_scm_call_atomic(__scm->dev, &desc, NULL); + } + ++static int __qcom_scm_disable_sdi(struct device *dev) ++{ ++ struct qcom_scm_desc desc = { ++ .svc = QCOM_SCM_SVC_BOOT, ++ .cmd = QCOM_SCM_BOOT_CONFIG_SDI, ++ .arginfo = QCOM_SCM_ARGS(2), ++ .args[0] = 1 /* 1: disable watchdog debug */, ++ .args[1] = 0 /* 0: disable SDI */, ++ .owner = ARM_SMCCC_OWNER_SIP, ++ }; ++ ++ return qcom_scm_call(__scm->dev, &desc, NULL); ++} ++ + static void qcom_scm_set_download_mode(bool enable) + { + bool avail; +@@ -1256,6 +1270,13 @@ static int qcom_scm_probe(struct platfor + if (download_mode) + qcom_scm_set_download_mode(true); + ++ /* ++ * Factory firmware leaves SDI (a debug interface), which prevents ++ * clean reboot. ++ */ ++ if (of_machine_is_compatible("google,wifi")) ++ __qcom_scm_disable_sdi(__scm->dev); ++ + return 0; + } + +--- a/drivers/firmware/qcom_scm.h ++++ b/drivers/firmware/qcom_scm.h +@@ -77,6 +77,7 @@ extern int scm_legacy_call(struct device + #define QCOM_SCM_SVC_BOOT 0x01 + #define QCOM_SCM_BOOT_SET_ADDR 0x01 + #define QCOM_SCM_BOOT_TERMINATE_PC 0x02 ++#define QCOM_SCM_BOOT_CONFIG_SDI 0x09 + #define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10 + #define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a + #define QCOM_SCM_FLUSH_FLAG_MASK 0x3 diff --git a/target/linux/ipq40xx/patches-5.10/421-firmware-qcom-scm-cold-boot-address.patch b/target/linux/ipq40xx/patches-5.10/421-firmware-qcom-scm-cold-boot-address.patch new file mode 100644 index 0000000000..accf3e9686 --- /dev/null +++ b/target/linux/ipq40xx/patches-5.10/421-firmware-qcom-scm-cold-boot-address.patch @@ -0,0 +1,121 @@ +--- a/drivers/firmware/qcom_scm-legacy.c ++++ b/drivers/firmware/qcom_scm-legacy.c +@@ -13,6 +13,9 @@ + #include + #include + ++#include ++#include ++ + #include "qcom_scm.h" + + static DEFINE_MUTEX(qcom_scm_lock); +@@ -117,6 +120,25 @@ static void __scm_legacy_do(const struct + } while (res->a0 == QCOM_SCM_INTERRUPTED); + } + ++static void qcom_scm_inv_range(unsigned long start, unsigned long end) ++{ ++ u32 cacheline_size, ctr; ++ ++ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); ++ cacheline_size = 4 << ((ctr >> 16) & 0xf); ++ ++ start = round_down(start, cacheline_size); ++ end = round_up(end, cacheline_size); ++ outer_inv_range(start, end); ++ while (start < end) { ++ asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start) ++ : "memory"); ++ start += cacheline_size; ++ } ++ dsb(); ++ isb(); ++} ++ + /** + * qcom_scm_call() - Sends a command to the SCM and waits for the command to + * finish processing. +@@ -160,10 +182,16 @@ int scm_legacy_call(struct device *dev, + + rsp = scm_legacy_command_to_response(cmd); + +- cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); +- if (dma_mapping_error(dev, cmd_phys)) { +- kfree(cmd); +- return -ENOMEM; ++ if (dev) { ++ cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, cmd_phys)) { ++ kfree(cmd); ++ return -ENOMEM; ++ } ++ } else { ++ cmd_phys = virt_to_phys(cmd); ++ __cpuc_flush_dcache_area(cmd, alloc_len); ++ outer_flush_range(cmd_phys, cmd_phys + alloc_len); + } + + smc.args[0] = 1; +@@ -179,13 +207,26 @@ int scm_legacy_call(struct device *dev, + goto out; + + do { +- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len, +- sizeof(*rsp), DMA_FROM_DEVICE); ++ if (dev) { ++ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + ++ cmd_len, sizeof(*rsp), ++ DMA_FROM_DEVICE); ++ } else { ++ unsigned long start = (uintptr_t)cmd + sizeof(*cmd) + ++ cmd_len; ++ qcom_scm_inv_range(start, start + sizeof(*rsp)); ++ } + } while (!rsp->is_complete); + +- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + +- le32_to_cpu(rsp->buf_offset), +- resp_len, DMA_FROM_DEVICE); ++ if (dev) { ++ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + ++ le32_to_cpu(rsp->buf_offset), ++ resp_len, DMA_FROM_DEVICE); ++ } else { ++ unsigned long start = (uintptr_t)cmd + sizeof(*cmd) + cmd_len + ++ le32_to_cpu(rsp->buf_offset); ++ qcom_scm_inv_range(start, start + resp_len); ++ } + + if (res) { + res_buf = scm_legacy_get_response_buffer(rsp); +@@ -193,7 +234,8 @@ int scm_legacy_call(struct device *dev, + res->result[i] = le32_to_cpu(res_buf[i]); + } + out: +- dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); ++ if (dev) ++ dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); + kfree(cmd); + return ret; + } +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -344,6 +344,17 @@ int qcom_scm_set_cold_boot_addr(void *en + desc.args[0] = flags; + desc.args[1] = virt_to_phys(entry); + ++ /* ++ * Factory firmware doesn't support the atomic variant. Non-atomic SCMs ++ * require ugly DMA invalidation support that was dropped upstream a ++ * while ago. For more info, see: ++ * ++ * [RFC] qcom_scm: IPQ4019 firmware does not support atomic API? ++ * https://lore.kernel.org/linux-arm-msm/20200913201608.GA3162100@bDebian/ ++ */ ++ if (of_machine_is_compatible("google,wifi")) ++ return qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL); ++ + return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); + } + EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); diff --git a/target/linux/ipq40xx/patches-5.10/901-arm-boot-add-dts-files.patch b/target/linux/ipq40xx/patches-5.10/901-arm-boot-add-dts-files.patch index 38ac3058a3..a6a082a2c8 100644 --- a/target/linux/ipq40xx/patches-5.10/901-arm-boot-add-dts-files.patch +++ b/target/linux/ipq40xx/patches-5.10/901-arm-boot-add-dts-files.patch @@ -10,7 +10,7 @@ Signed-off-by: John Crispin --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile -@@ -903,11 +903,75 @@ dtb-$(CONFIG_ARCH_QCOM) += \ +@@ -903,11 +903,76 @@ dtb-$(CONFIG_ARCH_QCOM) += \ qcom-apq8074-dragonboard.dtb \ qcom-apq8084-ifc6540.dtb \ qcom-apq8084-mtp.dtb \ @@ -71,10 +71,11 @@ Signed-off-by: John Crispin + qcom-ipq4019-rtl30vw.dtb \ + qcom-ipq4019-srr60.dtb \ + qcom-ipq4019-srs60.dtb \ -+ qcom-ipq4019-x1pro.dtb \ + qcom-ipq4019-u4019-32m.dtb \ ++ qcom-ipq4019-wifi.dtb \ + qcom-ipq4019-wpj419.dtb \ + qcom-ipq4019-wtr-m2133hp.dtb \ ++ qcom-ipq4019-x1pro.dtb \ + qcom-ipq4028-wpj428.dtb \ + qcom-ipq4029-ap-303.dtb \ + qcom-ipq4029-ap-303h.dtb \ diff --git a/target/linux/lantiq/patches-5.10/0703-net-lantiq-enable-jumbo-frames-on-GSWIP.patch b/target/linux/lantiq/patches-5.10/0703-net-lantiq-enable-jumbo-frames-on-GSWIP.patch deleted file mode 100644 index 6593e43db7..0000000000 --- a/target/linux/lantiq/patches-5.10/0703-net-lantiq-enable-jumbo-frames-on-GSWIP.patch +++ /dev/null @@ -1,101 +0,0 @@ -From 24a43ae2ac0ea06c474b1c80dc75651294d49321 Mon Sep 17 00:00:00 2001 -From: Thomas Nixon -Date: Sat, 2 Oct 2021 00:48:05 +0100 -Subject: [PATCH 2/2] net: lantiq: enable jumbo frames on GSWIP - -This enables non-standard MTUs on a per-port basis, with the overall -frame size set based on the CPU port. - -When the MTU is not changed, this should have no effect. - -Long packets crash the switch with MTUs of greater than 2526, so the -maximum is limited for now. - -Signed-off-by: Thomas Nixon ---- - drivers/net/dsa/lantiq_gswip.c | 46 +++++++++++++++++++++++++++++++--- - 1 file changed, 42 insertions(+), 4 deletions(-) - ---- a/drivers/net/dsa/lantiq_gswip.c -+++ b/drivers/net/dsa/lantiq_gswip.c -@@ -238,6 +238,11 @@ - - #define XRX200_GPHY_FW_ALIGN (16 * 1024) - -+/* maximum packet size supported by the switch; in theory this should be 9600, -+ * but long packets currently cause lock-ups with an MTU of over 2526 -+ */ -+#define GSWIP_MAX_PACKET_LENGTH 2556 -+ - struct gswip_hw_info { - int max_ports; - int cpu_port; -@@ -856,10 +861,6 @@ static int gswip_setup(struct dsa_switch - gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, - GSWIP_PCE_PCTRL_0p(cpu_port)); - -- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, -- GSWIP_MAC_CTRL_2p(cpu_port)); -- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN, -- GSWIP_MAC_FLEN); - gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, - GSWIP_BM_QUEUE_GCTRL); - -@@ -876,6 +877,8 @@ static int gswip_setup(struct dsa_switch - return err; - } - -+ ds->mtu_enforcement_ingress = true; -+ - gswip_port_enable(ds, cpu_port, NULL); - return 0; - } -@@ -1438,6 +1441,39 @@ static int gswip_port_fdb_dump(struct ds - return 0; - } - -+static int gswip_port_max_mtu(struct dsa_switch *ds, int port) -+{ -+ /* includes 8 bytes for special header */ -+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN; -+} -+ -+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) -+{ -+ struct gswip_priv *priv = ds->priv; -+ int cpu_port = priv->hw_info->cpu_port; -+ -+ /* cpu port always has maximum mtu of user ports, so use it to set -+ * switch frame size, including 8 byte special header -+ */ -+ if (port == cpu_port) { -+ new_mtu += 8; -+ gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN, -+ GSWIP_MAC_FLEN); -+ } -+ -+ /* enable MLEN for ports with non-standard MTUs, including the special -+ * header on the CPU port added above -+ */ -+ if (new_mtu != ETH_DATA_LEN) -+ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, -+ GSWIP_MAC_CTRL_2p(port)); -+ else -+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0, -+ GSWIP_MAC_CTRL_2p(port)); -+ -+ return 0; -+} -+ - static void gswip_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) -@@ -1781,6 +1817,8 @@ static const struct dsa_switch_ops gswip - .port_fdb_add = gswip_port_fdb_add, - .port_fdb_del = gswip_port_fdb_del, - .port_fdb_dump = gswip_port_fdb_dump, -+ .port_change_mtu = gswip_port_change_mtu, -+ .port_max_mtu = gswip_port_max_mtu, - .phylink_validate = gswip_phylink_validate, - .phylink_mac_config = gswip_phylink_mac_config, - .phylink_mac_link_down = gswip_phylink_mac_link_down, diff --git a/target/linux/lantiq/patches-5.10/0704-v5.16-net-lantiq_xrx200-increase-buffer-reservation.patch b/target/linux/lantiq/patches-5.10/0703-v5.16-net-lantiq_xrx200-increase-buffer-reservation.patch similarity index 100% rename from target/linux/lantiq/patches-5.10/0704-v5.16-net-lantiq_xrx200-increase-buffer-reservation.patch rename to target/linux/lantiq/patches-5.10/0703-v5.16-net-lantiq_xrx200-increase-buffer-reservation.patch diff --git a/target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch b/target/linux/lantiq/patches-5.10/0704-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch similarity index 100% rename from target/linux/lantiq/patches-5.10/0705-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch rename to target/linux/lantiq/patches-5.10/0704-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch diff --git a/target/linux/lantiq/patches-5.10/0705-v5.13-net-dsa-lantiq-allow-to-use-all-GPHYs-on-xRX300-and-.patch b/target/linux/lantiq/patches-5.10/0705-v5.13-net-dsa-lantiq-allow-to-use-all-GPHYs-on-xRX300-and-.patch new file mode 100644 index 0000000000..c0911ef874 --- /dev/null +++ b/target/linux/lantiq/patches-5.10/0705-v5.13-net-dsa-lantiq-allow-to-use-all-GPHYs-on-xRX300-and-.patch @@ -0,0 +1,275 @@ +From a09d042b086202735c4ed64573cdd79933020001 Mon Sep 17 00:00:00 2001 +From: Aleksander Jan Bajkowski +Date: Mon, 22 Mar 2021 21:37:15 +0100 +Subject: [PATCH] net: dsa: lantiq: allow to use all GPHYs on xRX300 and xRX330 + +This patch allows to use all PHYs on GRX300 and GRX330. The ARX300 +has 3 and the GRX330 has 4 integrated PHYs connected to different +ports compared to VRX200. Each integrated PHY can work as single +Gigabit Ethernet PHY (GMII) or as double Fast Ethernet PHY (MII). + +Allowed port configurations: + +xRX200: +GMAC0: RGMII, MII, REVMII or RMII port +GMAC1: RGMII, MII, REVMII or RMII port +GMAC2: GPHY0 (GMII) +GMAC3: GPHY0 (MII) +GMAC4: GPHY1 (GMII) +GMAC5: GPHY1 (MII) or RGMII port + +xRX300: +GMAC0: RGMII port +GMAC1: GPHY2 (GMII) +GMAC2: GPHY0 (GMII) +GMAC3: GPHY0 (MII) +GMAC4: GPHY1 (GMII) +GMAC5: GPHY1 (MII) or RGMII port + +xRX330: +GMAC0: RGMII, GMII or RMII port +GMAC1: GPHY2 (GMII) +GMAC2: GPHY0 (GMII) +GMAC3: GPHY0 (MII) or GPHY3 (GMII) +GMAC4: GPHY1 (GMII) +GMAC5: GPHY1 (MII), RGMII or RMII port + +Tested on D-Link DWR966 (xRX330) with OpenWRT. + +Signed-off-by: Aleksander Jan Bajkowski +Acked-by: Hauke Mehrtens +Signed-off-by: David S. Miller +--- + drivers/net/dsa/lantiq_gswip.c | 142 ++++++++++++++++++++++++++------- + 1 file changed, 113 insertions(+), 29 deletions(-) + +--- a/drivers/net/dsa/lantiq_gswip.c ++++ b/drivers/net/dsa/lantiq_gswip.c +@@ -1,6 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + /* +- * Lantiq / Intel GSWIP switch driver for VRX200 SoCs ++ * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs + * + * Copyright (C) 2010 Lantiq Deutschland + * Copyright (C) 2012 John Crispin +@@ -104,6 +104,7 @@ + #define GSWIP_MII_CFG_MODE_RMIIP 0x2 + #define GSWIP_MII_CFG_MODE_RMIIM 0x3 + #define GSWIP_MII_CFG_MODE_RGMII 0x4 ++#define GSWIP_MII_CFG_MODE_GMII 0x9 + #define GSWIP_MII_CFG_MODE_MASK 0xf + #define GSWIP_MII_CFG_RATE_M2P5 0x00 + #define GSWIP_MII_CFG_RATE_M25 0x10 +@@ -241,6 +242,7 @@ + struct gswip_hw_info { + int max_ports; + int cpu_port; ++ const struct dsa_switch_ops *ops; + }; + + struct xway_gphy_match_data { +@@ -1438,12 +1440,42 @@ static int gswip_port_fdb_dump(struct ds + return 0; + } + +-static void gswip_phylink_validate(struct dsa_switch *ds, int port, +- unsigned long *supported, +- struct phylink_link_state *state) ++static void gswip_phylink_set_capab(unsigned long *supported, ++ struct phylink_link_state *state) + { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + ++ /* Allow all the expected bits */ ++ phylink_set(mask, Autoneg); ++ phylink_set_port_modes(mask); ++ phylink_set(mask, Pause); ++ phylink_set(mask, Asym_Pause); ++ ++ /* With the exclusion of MII, Reverse MII and Reduced MII, we ++ * support Gigabit, including Half duplex ++ */ ++ if (state->interface != PHY_INTERFACE_MODE_MII && ++ state->interface != PHY_INTERFACE_MODE_REVMII && ++ state->interface != PHY_INTERFACE_MODE_RMII) { ++ phylink_set(mask, 1000baseT_Full); ++ phylink_set(mask, 1000baseT_Half); ++ } ++ ++ phylink_set(mask, 10baseT_Half); ++ phylink_set(mask, 10baseT_Full); ++ phylink_set(mask, 100baseT_Half); ++ phylink_set(mask, 100baseT_Full); ++ ++ bitmap_and(supported, supported, mask, ++ __ETHTOOL_LINK_MODE_MASK_NBITS); ++ bitmap_and(state->advertising, state->advertising, mask, ++ __ETHTOOL_LINK_MODE_MASK_NBITS); ++} ++ ++static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, ++ unsigned long *supported, ++ struct phylink_link_state *state) ++{ + switch (port) { + case 0: + case 1: +@@ -1470,38 +1502,54 @@ static void gswip_phylink_validate(struc + return; + } + +- /* Allow all the expected bits */ +- phylink_set(mask, Autoneg); +- phylink_set_port_modes(mask); +- phylink_set(mask, Pause); +- phylink_set(mask, Asym_Pause); ++ gswip_phylink_set_capab(supported, state); + +- /* With the exclusion of MII, Reverse MII and Reduced MII, we +- * support Gigabit, including Half duplex +- */ +- if (state->interface != PHY_INTERFACE_MODE_MII && +- state->interface != PHY_INTERFACE_MODE_REVMII && +- state->interface != PHY_INTERFACE_MODE_RMII) { +- phylink_set(mask, 1000baseT_Full); +- phylink_set(mask, 1000baseT_Half); ++ return; ++ ++unsupported: ++ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); ++ dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", ++ phy_modes(state->interface), port); ++} ++ ++static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port, ++ unsigned long *supported, ++ struct phylink_link_state *state) ++{ ++ switch (port) { ++ case 0: ++ if (!phy_interface_mode_is_rgmii(state->interface) && ++ state->interface != PHY_INTERFACE_MODE_GMII && ++ state->interface != PHY_INTERFACE_MODE_RMII) ++ goto unsupported; ++ break; ++ case 1: ++ case 2: ++ case 3: ++ case 4: ++ if (state->interface != PHY_INTERFACE_MODE_INTERNAL) ++ goto unsupported; ++ break; ++ case 5: ++ if (!phy_interface_mode_is_rgmii(state->interface) && ++ state->interface != PHY_INTERFACE_MODE_INTERNAL && ++ state->interface != PHY_INTERFACE_MODE_RMII) ++ goto unsupported; ++ break; ++ default: ++ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); ++ dev_err(ds->dev, "Unsupported port: %i\n", port); ++ return; + } + +- phylink_set(mask, 10baseT_Half); +- phylink_set(mask, 10baseT_Full); +- phylink_set(mask, 100baseT_Half); +- phylink_set(mask, 100baseT_Full); ++ gswip_phylink_set_capab(supported, state); + +- bitmap_and(supported, supported, mask, +- __ETHTOOL_LINK_MODE_MASK_NBITS); +- bitmap_and(state->advertising, state->advertising, mask, +- __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + + unsupported: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", + phy_modes(state->interface), port); +- return; + } + + static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) +@@ -1639,6 +1687,9 @@ static void gswip_phylink_mac_config(str + case PHY_INTERFACE_MODE_RGMII_TXID: + miicfg |= GSWIP_MII_CFG_MODE_RGMII; + break; ++ case PHY_INTERFACE_MODE_GMII: ++ miicfg |= GSWIP_MII_CFG_MODE_GMII; ++ break; + default: + dev_err(ds->dev, + "Unsupported interface: %d\n", state->interface); +@@ -1765,7 +1816,7 @@ static int gswip_get_sset_count(struct d + return ARRAY_SIZE(gswip_rmon_cnt); + } + +-static const struct dsa_switch_ops gswip_switch_ops = { ++static const struct dsa_switch_ops gswip_xrx200_switch_ops = { + .get_tag_protocol = gswip_get_tag_protocol, + .setup = gswip_setup, + .port_enable = gswip_port_enable, +@@ -1781,7 +1832,31 @@ static const struct dsa_switch_ops gswip + .port_fdb_add = gswip_port_fdb_add, + .port_fdb_del = gswip_port_fdb_del, + .port_fdb_dump = gswip_port_fdb_dump, +- .phylink_validate = gswip_phylink_validate, ++ .phylink_validate = gswip_xrx200_phylink_validate, ++ .phylink_mac_config = gswip_phylink_mac_config, ++ .phylink_mac_link_down = gswip_phylink_mac_link_down, ++ .phylink_mac_link_up = gswip_phylink_mac_link_up, ++ .get_strings = gswip_get_strings, ++ .get_ethtool_stats = gswip_get_ethtool_stats, ++ .get_sset_count = gswip_get_sset_count, ++}; ++ ++static const struct dsa_switch_ops gswip_xrx300_switch_ops = { ++ .get_tag_protocol = gswip_get_tag_protocol, ++ .setup = gswip_setup, ++ .port_enable = gswip_port_enable, ++ .port_disable = gswip_port_disable, ++ .port_bridge_join = gswip_port_bridge_join, ++ .port_bridge_leave = gswip_port_bridge_leave, ++ .port_fast_age = gswip_port_fast_age, ++ .port_vlan_filtering = gswip_port_vlan_filtering, ++ .port_vlan_add = gswip_port_vlan_add, ++ .port_vlan_del = gswip_port_vlan_del, ++ .port_stp_state_set = gswip_port_stp_state_set, ++ .port_fdb_add = gswip_port_fdb_add, ++ .port_fdb_del = gswip_port_fdb_del, ++ .port_fdb_dump = gswip_port_fdb_dump, ++ .phylink_validate = gswip_xrx300_phylink_validate, + .phylink_mac_config = gswip_phylink_mac_config, + .phylink_mac_link_down = gswip_phylink_mac_link_down, + .phylink_mac_link_up = gswip_phylink_mac_link_up, +@@ -2043,7 +2118,7 @@ static int gswip_probe(struct platform_d + priv->ds->dev = dev; + priv->ds->num_ports = priv->hw_info->max_ports; + priv->ds->priv = priv; +- priv->ds->ops = &gswip_switch_ops; ++ priv->ds->ops = priv->hw_info->ops; + priv->dev = dev; + version = gswip_switch_r(priv, GSWIP_VERSION); + +@@ -2127,10 +2202,19 @@ static int gswip_remove(struct platform_ + static const struct gswip_hw_info gswip_xrx200 = { + .max_ports = 7, + .cpu_port = 6, ++ .ops = &gswip_xrx200_switch_ops, ++}; ++ ++static const struct gswip_hw_info gswip_xrx300 = { ++ .max_ports = 7, ++ .cpu_port = 6, ++ .ops = &gswip_xrx300_switch_ops, + }; + + static const struct of_device_id gswip_of_match[] = { + { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 }, ++ { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 }, ++ { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 }, + {}, + }; + MODULE_DEVICE_TABLE(of, gswip_of_match); diff --git a/target/linux/lantiq/patches-5.10/0706-v5.18-net-lantiq-enable-jumbo-frames-on-GSWIP.patch b/target/linux/lantiq/patches-5.10/0706-v5.18-net-lantiq-enable-jumbo-frames-on-GSWIP.patch new file mode 100644 index 0000000000..9d96883910 --- /dev/null +++ b/target/linux/lantiq/patches-5.10/0706-v5.18-net-lantiq-enable-jumbo-frames-on-GSWIP.patch @@ -0,0 +1,127 @@ +From c40bb4fedcd6b8b6a714da5dd466eb88ed2652d1 Mon Sep 17 00:00:00 2001 +From: Aleksander Jan Bajkowski +Date: Wed, 9 Mar 2022 00:04:57 +0100 +Subject: net: dsa: lantiq_gswip: enable jumbo frames on GSWIP + +This enables non-standard MTUs on a per-port basis, with the overall +frame size set based on the CPU port. + +When the MTU is not changed, this should have no effect. + +Long packets crash the switch with MTUs of greater than 2526, so the +maximum is limited for now. Medium packets are sometimes dropped (e.g. +TCP over 2477, UDP over 2516-2519, ICMP over 2526), Hence an MTU value +of 2400 seems safe. + +Signed-off-by: Thomas Nixon +Signed-off-by: Aleksander Jan Bajkowski +Link: https://lore.kernel.org/r/20220308230457.1599237-1-olek2@wp.pl +Signed-off-by: Jakub Kicinski +--- + drivers/net/dsa/lantiq_gswip.c | 53 ++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 49 insertions(+), 4 deletions(-) + +--- a/drivers/net/dsa/lantiq_gswip.c ++++ b/drivers/net/dsa/lantiq_gswip.c +@@ -213,6 +213,7 @@ + #define GSWIP_MAC_CTRL_0_GMII_MII 0x0001 + #define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002 + #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) ++#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */ + #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ + + /* Ethernet Switch Fetch DMA Port Control Register */ +@@ -239,6 +240,15 @@ + + #define XRX200_GPHY_FW_ALIGN (16 * 1024) + ++/* Maximum packet size supported by the switch. In theory this should be 10240, ++ * but long packets currently cause lock-ups with an MTU of over 2526. Medium ++ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP ++ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects ++ * packet reception. This is probably caused by the PPA engine, which is on the ++ * RX part of the device. Packet transmission works properly up to 10240. ++ */ ++#define GSWIP_MAX_PACKET_LENGTH 2400 ++ + struct gswip_hw_info { + int max_ports; + int cpu_port; +@@ -858,10 +868,6 @@ static int gswip_setup(struct dsa_switch + gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, + GSWIP_PCE_PCTRL_0p(cpu_port)); + +- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, +- GSWIP_MAC_CTRL_2p(cpu_port)); +- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN, +- GSWIP_MAC_FLEN); + gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, + GSWIP_BM_QUEUE_GCTRL); + +@@ -878,6 +884,8 @@ static int gswip_setup(struct dsa_switch + return err; + } + ++ ds->mtu_enforcement_ingress = true; ++ + gswip_port_enable(ds, cpu_port, NULL); + return 0; + } +@@ -1472,6 +1480,39 @@ static void gswip_phylink_set_capab(unsi + __ETHTOOL_LINK_MODE_MASK_NBITS); + } + ++static int gswip_port_max_mtu(struct dsa_switch *ds, int port) ++{ ++ /* Includes 8 bytes for special header. */ ++ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN; ++} ++ ++static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) ++{ ++ struct gswip_priv *priv = ds->priv; ++ int cpu_port = priv->hw_info->cpu_port; ++ ++ /* CPU port always has maximum mtu of user ports, so use it to set ++ * switch frame size, including 8 byte special header. ++ */ ++ if (port == cpu_port) { ++ new_mtu += 8; ++ gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN, ++ GSWIP_MAC_FLEN); ++ } ++ ++ /* Enable MLEN for ports with non-standard MTUs, including the special ++ * header on the CPU port added above. ++ */ ++ if (new_mtu != ETH_DATA_LEN) ++ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, ++ GSWIP_MAC_CTRL_2p(port)); ++ else ++ gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0, ++ GSWIP_MAC_CTRL_2p(port)); ++ ++ return 0; ++} ++ + static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +@@ -1832,6 +1873,8 @@ static const struct dsa_switch_ops gswip + .port_fdb_add = gswip_port_fdb_add, + .port_fdb_del = gswip_port_fdb_del, + .port_fdb_dump = gswip_port_fdb_dump, ++ .port_change_mtu = gswip_port_change_mtu, ++ .port_max_mtu = gswip_port_max_mtu, + .phylink_validate = gswip_xrx200_phylink_validate, + .phylink_mac_config = gswip_phylink_mac_config, + .phylink_mac_link_down = gswip_phylink_mac_link_down, +@@ -1856,6 +1899,8 @@ static const struct dsa_switch_ops gswip + .port_fdb_add = gswip_port_fdb_add, + .port_fdb_del = gswip_port_fdb_del, + .port_fdb_dump = gswip_port_fdb_dump, ++ .port_change_mtu = gswip_port_change_mtu, ++ .port_max_mtu = gswip_port_max_mtu, + .phylink_validate = gswip_xrx300_phylink_validate, + .phylink_mac_config = gswip_phylink_mac_config, + .phylink_mac_link_down = gswip_phylink_mac_link_down, diff --git a/target/linux/mediatek/patches-5.10/510-net-mediatek-add-flow-offload-for-mt7623.patch b/target/linux/mediatek/patches-5.10/510-net-mediatek-add-flow-offload-for-mt7623.patch index e6a8ba9935..dd2b70aa10 100644 --- a/target/linux/mediatek/patches-5.10/510-net-mediatek-add-flow-offload-for-mt7623.patch +++ b/target/linux/mediatek/patches-5.10/510-net-mediatek-add-flow-offload-for-mt7623.patch @@ -14,7 +14,7 @@ Signed-off-by: Frank Wunderlich --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3302,6 +3302,7 @@ static const struct mtk_soc_data mt7623_ +@@ -3304,6 +3304,7 @@ static const struct mtk_soc_data mt7623_ .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, diff --git a/target/linux/mpc85xx/base-files/etc/board.d/01_leds b/target/linux/mpc85xx/base-files/etc/board.d/01_leds new file mode 100644 index 0000000000..391c909e4c --- /dev/null +++ b/target/linux/mpc85xx/base-files/etc/board.d/01_leds @@ -0,0 +1,19 @@ + +. /lib/functions/leds.sh +. /lib/functions/uci-defaults.sh + +board=$(board_name) +boardname="${board##*,}" + +board_config_update + +case $board in +extreme-networks,ws-ap3825i) + ucidef_set_led_netdev "lan1" "LAN1" "green:lan1" "eth1" + ucidef_set_led_netdev "lan2" "LAN2" "green:lan2" "eth0" + ;; +esac + +board_config_flush + +exit 0 diff --git a/target/linux/mpc85xx/files/arch/powerpc/boot/dts/ws-ap3825i.dts b/target/linux/mpc85xx/files/arch/powerpc/boot/dts/ws-ap3825i.dts index b4a90b31c7..43c044b4fc 100644 --- a/target/linux/mpc85xx/files/arch/powerpc/boot/dts/ws-ap3825i.dts +++ b/target/linux/mpc85xx/files/arch/powerpc/boot/dts/ws-ap3825i.dts @@ -26,72 +26,49 @@ device_type = "memory"; }; - led_spi { - /* - * This is currently non-functioning because the spi-gpio - * driver refuses to register when presented with this node. - */ - compatible = "spi-gpio"; - #address-cells = <1>; - #size-cells = <0>; - - sck-gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>; - mosi-gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>; - num-chipselects = <0>; - - spi_gpio: led_gpio@0 { - compatible = "fairchild,74hc595"; - reg = <0>; - gpio-controller; - #gpio-cells = <2>; - registers-number = <1>; - spi-max-frequency = <100000>; - }; - }; - leds { compatible = "gpio-leds"; wifi1 { gpios = <&spi_gpio 3 GPIO_ACTIVE_HIGH>; - label = "ws-ap3825i:green:radio1"; + label = "green:radio1"; linux,default-trigger = "phy0tpt"; }; wifi2 { gpios = <&spi_gpio 2 GPIO_ACTIVE_HIGH>; - label = "ws-ap3825i:green:radio2"; + label = "green:radio2"; linux,default-trigger = "phy1tpt"; }; led_power_green: power_green { gpios = <&spi_gpio 0 GPIO_ACTIVE_HIGH>; - label = "ws-ap3825i:green:power"; + label = "green:power"; }; led_power_red: power_red { gpios = <&spi_gpio 1 GPIO_ACTIVE_HIGH>; - label = "ws-ap3825i:red:power"; + label = "red:power"; }; - eth0_red { + lan1_red { gpios = <&spi_gpio 6 GPIO_ACTIVE_HIGH>; - label = "ws-ap3825i:red:eth0"; + label = "red:lan1"; }; - eth0_green { + lan1_green { gpios = <&spi_gpio 4 GPIO_ACTIVE_HIGH>; - label = "ws-ap3825i:green:eth0"; + label = "green:lan1"; }; - eth1_red { + lan2_red { gpios = <&spi_gpio 7 GPIO_ACTIVE_HIGH>; - label = "ws-ap3825i:red:eth1"; + label = "red:lan2"; }; - eth1_green { + lan2_green { gpios = <&spi_gpio 5 GPIO_ACTIVE_HIGH>; - label = "ws-ap3825i:green:eth1"; + label = "green:lan2"; }; }; @@ -246,4 +223,29 @@ }; }; +&soc { + led_spi { + /* + * This is currently non-functioning because the spi-gpio + * driver refuses to register when presented with this node. + */ + compatible = "spi-gpio"; + #address-cells = <1>; + #size-cells = <0>; + + sck-gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>; + mosi-gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>; + num-chipselects = <0>; + + spi_gpio: led_gpio@0 { + compatible = "fairchild,74hc595"; + reg = <0>; + gpio-controller; + #gpio-cells = <2>; + registers-number = <1>; + spi-max-frequency = <100000>; + }; + }; +}; + /include/ "fsl/p1020si-post.dtsi" diff --git a/target/linux/mvebu/image/cortexa53.mk b/target/linux/mvebu/image/cortexa53.mk index ca3a53f992..bfaa597d9a 100644 --- a/target/linux/mvebu/image/cortexa53.mk +++ b/target/linux/mvebu/image/cortexa53.mk @@ -82,10 +82,10 @@ define Device/methode_udpu DEVICE_VENDOR := Methode DEVICE_MODEL := micro-DPU (uDPU) DEVICE_DTS := armada-3720-uDPU - KERNEL_LOADADDR := 0x00080000 + KERNEL_LOADADDR := 0x00800000 KERNEL_INITRAMFS := kernel-bin | gzip | fit gzip $$(KDIR)/image-$$(DEVICE_DTS).dtb KERNEL_INITRAMFS_SUFFIX := .itb - DEVICE_PACKAGES += f2fs-tools fdisk kmod-i2c-pxa + DEVICE_PACKAGES += f2fs-tools fdisk kmod-i2c-pxa kmod-hwmon-lm75 DEVICE_IMG_NAME = $$(DEVICE_IMG_PREFIX)-$$(2) IMAGES := firmware.tgz IMAGE/firmware.tgz := boot-scr | boot-img-ext4 | uDPU-firmware | append-metadata diff --git a/target/linux/qoriq/config-5.10 b/target/linux/qoriq/config-5.10 index 6a582bbcd5..e76ca379ff 100644 --- a/target/linux/qoriq/config-5.10 +++ b/target/linux/qoriq/config-5.10 @@ -140,7 +140,6 @@ CONFIG_FS_POSIX_ACL=y CONFIG_FTL=y CONFIG_FUNCTION_ERROR_INJECTION=y CONFIG_FW_LOADER_PAGED_BUF=y -CONFIG_GDB_SCRIPTS=y CONFIG_GENERIC_ALLOCATOR=y CONFIG_GENERIC_BUG=y CONFIG_GENERIC_CLOCKEVENTS=y diff --git a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c new file mode 100644 index 0000000000..f3a81ee4e2 --- /dev/null +++ b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c @@ -0,0 +1,1362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek MT7621 NAND Flash Controller driver + * + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved. + * + * Author: Weijie Gao + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* NFI core registers */ +#define NFI_CNFG 0x000 +#define CNFG_OP_MODE_S 12 +#define CNFG_OP_MODE_M GENMASK(14, 12) +#define CNFG_OP_CUSTOM 6 +#define CNFG_AUTO_FMT_EN BIT(9) +#define CNFG_HW_ECC_EN BIT(8) +#define CNFG_BYTE_RW BIT(6) +#define CNFG_READ_MODE BIT(1) + +#define NFI_PAGEFMT 0x004 +#define PAGEFMT_FDM_ECC_S 12 +#define PAGEFMT_FDM_ECC_M GENMASK(15, 12) +#define PAGEFMT_FDM_S 8 +#define PAGEFMT_FDM_M GENMASK(11, 8) +#define PAGEFMT_SPARE_S 4 +#define PAGEFMT_SPARE_M GENMASK(5, 4) +#define PAGEFMT_PAGE_S 0 +#define PAGEFMT_PAGE_M GENMASK(1, 0) + +#define NFI_CON 0x008 +#define CON_NFI_SEC_S 12 +#define CON_NFI_SEC_M GENMASK(15, 12) +#define CON_NFI_BWR BIT(9) +#define CON_NFI_BRD BIT(8) +#define CON_NFI_RST BIT(1) +#define CON_FIFO_FLUSH BIT(0) + +#define NFI_ACCCON 0x00c +#define ACCCON_POECS_S 28 +#define ACCCON_POECS_MAX 0x0f +#define ACCCON_POECS_DEF 3 +#define ACCCON_PRECS_S 22 +#define ACCCON_PRECS_MAX 0x3f +#define ACCCON_PRECS_DEF 3 +#define ACCCON_C2R_S 16 +#define ACCCON_C2R_MAX 0x3f +#define ACCCON_C2R_DEF 7 +#define ACCCON_W2R_S 12 +#define ACCCON_W2R_MAX 0x0f +#define ACCCON_W2R_DEF 7 +#define ACCCON_WH_S 8 +#define ACCCON_WH_MAX 0x0f +#define ACCCON_WH_DEF 15 +#define ACCCON_WST_S 4 +#define ACCCON_WST_MAX 0x0f +#define ACCCON_WST_DEF 15 +#define ACCCON_WST_MIN 3 +#define ACCCON_RLT_S 0 +#define ACCCON_RLT_MAX 0x0f +#define ACCCON_RLT_DEF 15 +#define ACCCON_RLT_MIN 3 + +#define NFI_CMD 0x020 + +#define NFI_ADDRNOB 0x030 +#define ADDR_ROW_NOB_S 4 +#define ADDR_ROW_NOB_M GENMASK(6, 4) +#define ADDR_COL_NOB_S 0 +#define ADDR_COL_NOB_M GENMASK(2, 0) + +#define NFI_COLADDR 0x034 +#define NFI_ROWADDR 0x038 + +#define NFI_STRDATA 0x040 +#define STR_DATA BIT(0) + +#define NFI_CNRNB 0x044 +#define CB2R_TIME_S 4 +#define CB2R_TIME_M GENMASK(7, 4) +#define STR_CNRNB BIT(0) + +#define NFI_DATAW 0x050 +#define NFI_DATAR 0x054 + +#define NFI_PIO_DIRDY 0x058 +#define PIO_DIRDY BIT(0) + +#define NFI_STA 0x060 +#define STA_NFI_FSM_S 16 +#define STA_NFI_FSM_M GENMASK(19, 16) +#define STA_FSM_CUSTOM_DATA 14 +#define STA_BUSY BIT(8) +#define STA_ADDR BIT(1) +#define STA_CMD BIT(0) + +#define NFI_ADDRCNTR 0x070 +#define SEC_CNTR_S 12 +#define SEC_CNTR_M GENMASK(15, 12) +#define SEC_ADDR_S 0 +#define SEC_ADDR_M GENMASK(9, 0) + +#define NFI_CSEL 0x090 +#define CSEL_S 0 +#define CSEL_M GENMASK(1, 0) + +#define NFI_FDM0L 0x0a0 +#define NFI_FDML(n) (0x0a0 + ((n) << 3)) + +#define NFI_FDM0M 0x0a4 +#define NFI_FDMM(n) (0x0a4 + ((n) << 3)) + +#define NFI_MASTER_STA 0x210 +#define MAS_ADDR GENMASK(11, 9) +#define MAS_RD GENMASK(8, 6) +#define MAS_WR GENMASK(5, 3) +#define MAS_RDDLY GENMASK(2, 0) + +/* ECC engine registers */ +#define ECC_ENCCON 0x000 +#define ENC_EN BIT(0) + +#define ECC_ENCCNFG 0x004 +#define ENC_CNFG_MSG_S 16 +#define ENC_CNFG_MSG_M GENMASK(28, 16) +#define ENC_MODE_S 4 +#define ENC_MODE_M GENMASK(5, 4) +#define ENC_MODE_NFI 1 +#define ENC_TNUM_S 0 +#define ENC_TNUM_M GENMASK(2, 0) + +#define ECC_ENCIDLE 0x00c +#define ENC_IDLE BIT(0) + +#define ECC_DECCON 0x100 +#define DEC_EN BIT(0) + +#define ECC_DECCNFG 0x104 +#define DEC_EMPTY_EN BIT(31) +#define DEC_CS_S 16 +#define DEC_CS_M GENMASK(28, 16) +#define DEC_CON_S 12 +#define DEC_CON_M GENMASK(13, 12) +#define DEC_CON_EL 2 +#define DEC_MODE_S 4 +#define DEC_MODE_M GENMASK(5, 4) +#define DEC_MODE_NFI 1 +#define DEC_TNUM_S 0 +#define DEC_TNUM_M GENMASK(2, 0) + +#define ECC_DECIDLE 0x10c +#define DEC_IDLE BIT(1) + +#define ECC_DECENUM 0x114 +#define ERRNUM_S 2 +#define ERRNUM_M GENMASK(3, 0) + +#define ECC_DECDONE 0x118 +#define DEC_DONE7 BIT(7) +#define DEC_DONE6 BIT(6) +#define DEC_DONE5 BIT(5) +#define DEC_DONE4 BIT(4) +#define DEC_DONE3 BIT(3) +#define DEC_DONE2 BIT(2) +#define DEC_DONE1 BIT(1) +#define DEC_DONE0 BIT(0) + +#define ECC_DECEL(n) (0x11c + (n) * 4) +#define DEC_EL_ODD_S 16 +#define DEC_EL_EVEN_S 0 +#define DEC_EL_M 0x1fff +#define DEC_EL_BYTE_POS_S 3 +#define DEC_EL_BIT_POS_M GENMASK(2, 0) + +#define ECC_FDMADDR 0x13c + +/* ENCIDLE and DECIDLE */ +#define ECC_IDLE BIT(0) + +#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \ + ((tpoecs) << ACCCON_POECS_S | (tprecs) << ACCCON_PRECS_S | \ + (tc2r) << ACCCON_C2R_S | (tw2r) << ACCCON_W2R_S | \ + (twh) << ACCCON_WH_S | (twst) << ACCCON_WST_S | (trlt)) + +#define MASTER_STA_MASK (MAS_ADDR | MAS_RD | MAS_WR | \ + MAS_RDDLY) +#define NFI_RESET_TIMEOUT 1000000 +#define NFI_CORE_TIMEOUT 500000 +#define ECC_ENGINE_TIMEOUT 500000 + +#define ECC_SECTOR_SIZE 512 +#define ECC_PARITY_BITS 13 + +#define NFI_FDM_SIZE 8 + +#define MT7621_NFC_NAME "mt7621-nand" + +struct mt7621_nfc { + struct nand_controller controller; + struct nand_chip nand; + struct clk *nfi_clk; + struct device *dev; + + u32 nfi_base; + void __iomem *nfi_regs; + void __iomem *ecc_regs; + + u32 spare_per_sector; +}; + +static const u16 mt7621_nfi_page_size[] = { SZ_512, SZ_2K, SZ_4K }; +static const u8 mt7621_nfi_spare_size[] = { 16, 26, 27, 28 }; +static const u8 mt7621_ecc_strength[] = { 4, 6, 8, 10, 12 }; + +static inline u32 nfi_read32(struct mt7621_nfc *nfc, u32 reg) +{ + return readl(nfc->nfi_regs + reg); +} + +static inline void nfi_write32(struct mt7621_nfc *nfc, u32 reg, u32 val) +{ + writel(val, nfc->nfi_regs + reg); +} + +static inline u16 nfi_read16(struct mt7621_nfc *nfc, u32 reg) +{ + return readw(nfc->nfi_regs + reg); +} + +static inline void nfi_write16(struct mt7621_nfc *nfc, u32 reg, u16 val) +{ + writew(val, nfc->nfi_regs + reg); +} + +static inline void ecc_write16(struct mt7621_nfc *nfc, u32 reg, u16 val) +{ + writew(val, nfc->ecc_regs + reg); +} + +static inline u32 ecc_read32(struct mt7621_nfc *nfc, u32 reg) +{ + return readl(nfc->ecc_regs + reg); +} + +static inline void ecc_write32(struct mt7621_nfc *nfc, u32 reg, u32 val) +{ + return writel(val, nfc->ecc_regs + reg); +} + +static inline u8 *oob_fdm_ptr(struct nand_chip *nand, int sect) +{ + return nand->oob_poi + sect * NFI_FDM_SIZE; +} + +static inline u8 *oob_ecc_ptr(struct mt7621_nfc *nfc, int sect) +{ + struct nand_chip *nand = &nfc->nand; + + return nand->oob_poi + nand->ecc.steps * NFI_FDM_SIZE + + sect * (nfc->spare_per_sector - NFI_FDM_SIZE); +} + +static inline u8 *page_data_ptr(struct nand_chip *nand, const u8 *buf, + int sect) +{ + return (u8 *)buf + sect * nand->ecc.size; +} + +static int mt7621_ecc_wait_idle(struct mt7621_nfc *nfc, u32 reg) +{ + struct device *dev = nfc->dev; + u32 val; + int ret; + + ret = readw_poll_timeout_atomic(nfc->ecc_regs + reg, val, + val & ECC_IDLE, 10, + ECC_ENGINE_TIMEOUT); + if (ret) { + dev_warn(dev, "ECC engine timed out entering idle mode\n"); + return -EIO; + } + + return 0; +} + +static int mt7621_ecc_decoder_wait_done(struct mt7621_nfc *nfc, u32 sect) +{ + struct device *dev = nfc->dev; + u32 val; + int ret; + + ret = readw_poll_timeout_atomic(nfc->ecc_regs + ECC_DECDONE, val, + val & (1 << sect), 10, + ECC_ENGINE_TIMEOUT); + + if (ret) { + dev_warn(dev, "ECC decoder for sector %d timed out\n", + sect); + return -ETIMEDOUT; + } + + return 0; +} + +static void mt7621_ecc_encoder_op(struct mt7621_nfc *nfc, bool enable) +{ + mt7621_ecc_wait_idle(nfc, ECC_ENCIDLE); + ecc_write16(nfc, ECC_ENCCON, enable ? ENC_EN : 0); +} + +static void mt7621_ecc_decoder_op(struct mt7621_nfc *nfc, bool enable) +{ + mt7621_ecc_wait_idle(nfc, ECC_DECIDLE); + ecc_write16(nfc, ECC_DECCON, enable ? DEC_EN : 0); +} + +static int mt7621_ecc_correct_check(struct mt7621_nfc *nfc, u8 *sector_buf, + u8 *fdm_buf, u32 sect) +{ + struct nand_chip *nand = &nfc->nand; + u32 decnum, num_error_bits, fdm_end_bits; + u32 error_locations, error_bit_loc; + u32 error_byte_pos, error_bit_pos; + int bitflips = 0; + u32 i; + + decnum = ecc_read32(nfc, ECC_DECENUM); + num_error_bits = (decnum >> (sect << ERRNUM_S)) & ERRNUM_M; + fdm_end_bits = (nand->ecc.size + NFI_FDM_SIZE) << 3; + + if (!num_error_bits) + return 0; + + if (num_error_bits == ERRNUM_M) + return -1; + + for (i = 0; i < num_error_bits; i++) { + error_locations = ecc_read32(nfc, ECC_DECEL(i / 2)); + error_bit_loc = (error_locations >> ((i % 2) * DEC_EL_ODD_S)) & + DEC_EL_M; + error_byte_pos = error_bit_loc >> DEC_EL_BYTE_POS_S; + error_bit_pos = error_bit_loc & DEC_EL_BIT_POS_M; + + if (error_bit_loc < (nand->ecc.size << 3)) { + if (sector_buf) { + sector_buf[error_byte_pos] ^= + (1 << error_bit_pos); + } + } else if (error_bit_loc < fdm_end_bits) { + if (fdm_buf) { + fdm_buf[error_byte_pos - nand->ecc.size] ^= + (1 << error_bit_pos); + } + } + + bitflips++; + } + + return bitflips; +} + +static int mt7621_nfc_wait_write_completion(struct mt7621_nfc *nfc, + struct nand_chip *nand) +{ + struct device *dev = nfc->dev; + u16 val; + int ret; + + ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_ADDRCNTR, val, + ((val & SEC_CNTR_M) >> SEC_CNTR_S) >= nand->ecc.steps, 10, + NFI_CORE_TIMEOUT); + + if (ret) { + dev_warn(dev, "NFI core write operation timed out\n"); + return -ETIMEDOUT; + } + + return ret; +} + +static void mt7621_nfc_hw_reset(struct mt7621_nfc *nfc) +{ + u32 val; + int ret; + + /* reset all registers and force the NFI master to terminate */ + nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); + + /* wait for the master to finish the last transaction */ + ret = readw_poll_timeout(nfc->nfi_regs + NFI_MASTER_STA, val, + !(val & MASTER_STA_MASK), 50, + NFI_RESET_TIMEOUT); + if (ret) { + dev_warn(nfc->dev, "Failed to reset NFI master in %dms\n", + NFI_RESET_TIMEOUT); + } + + /* ensure any status register affected by the NFI master is reset */ + nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); + nfi_write16(nfc, NFI_STRDATA, 0); +} + +static inline void mt7621_nfc_hw_init(struct mt7621_nfc *nfc) +{ + u32 acccon; + + /* + * CNRNB: nand ready/busy register + * ------------------------------- + * 7:4: timeout register for polling the NAND busy/ready signal + * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles. + */ + nfi_write16(nfc, NFI_CNRNB, CB2R_TIME_M | STR_CNRNB); + + mt7621_nfc_hw_reset(nfc); + + /* Apply default access timing */ + acccon = ACCTIMING(ACCCON_POECS_DEF, ACCCON_PRECS_DEF, ACCCON_C2R_DEF, + ACCCON_W2R_DEF, ACCCON_WH_DEF, ACCCON_WST_DEF, + ACCCON_RLT_DEF); + + nfi_write32(nfc, NFI_ACCCON, acccon); +} + +static int mt7621_nfc_send_command(struct mt7621_nfc *nfc, u8 command) +{ + struct device *dev = nfc->dev; + u32 val; + int ret; + + nfi_write32(nfc, NFI_CMD, command); + + ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val, + !(val & STA_CMD), 10, + NFI_CORE_TIMEOUT); + if (ret) { + dev_warn(dev, "NFI core timed out entering command mode\n"); + return -EIO; + } + + return 0; +} + +static int mt7621_nfc_send_address_byte(struct mt7621_nfc *nfc, int addr) +{ + struct device *dev = nfc->dev; + u32 val; + int ret; + + nfi_write32(nfc, NFI_COLADDR, addr); + nfi_write32(nfc, NFI_ROWADDR, 0); + nfi_write16(nfc, NFI_ADDRNOB, 1); + + ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val, + !(val & STA_ADDR), 10, + NFI_CORE_TIMEOUT); + if (ret) { + dev_warn(dev, "NFI core timed out entering address mode\n"); + return -EIO; + } + + return 0; +} + +static int mt7621_nfc_send_address(struct mt7621_nfc *nfc, const u8 *addr, + unsigned int naddrs) +{ + int ret; + + while (naddrs) { + ret = mt7621_nfc_send_address_byte(nfc, *addr); + if (ret) + return ret; + + addr++; + naddrs--; + } + + return 0; +} + +static void mt7621_nfc_wait_pio_ready(struct mt7621_nfc *nfc) +{ + struct device *dev = nfc->dev; + int ret; + u16 val; + + ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_PIO_DIRDY, val, + val & PIO_DIRDY, 10, + NFI_CORE_TIMEOUT); + if (ret < 0) + dev_err(dev, "NFI core PIO mode not ready\n"); +} + +static u32 mt7621_nfc_pio_read(struct mt7621_nfc *nfc, bool br) +{ + u32 reg; + + /* after each byte read, the NFI_STA reg is reset by the hardware */ + reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S; + if (reg != STA_FSM_CUSTOM_DATA) { + reg = nfi_read16(nfc, NFI_CNFG); + reg |= CNFG_READ_MODE | CNFG_BYTE_RW; + if (!br) + reg &= ~CNFG_BYTE_RW; + nfi_write16(nfc, NFI_CNFG, reg); + + /* + * set to max sector to allow the HW to continue reading over + * unaligned accesses + */ + nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BRD); + + /* trigger to fetch data */ + nfi_write16(nfc, NFI_STRDATA, STR_DATA); + } + + mt7621_nfc_wait_pio_ready(nfc); + + return nfi_read32(nfc, NFI_DATAR); +} + +static void mt7621_nfc_read_data(struct mt7621_nfc *nfc, u8 *buf, u32 len) +{ + while (((uintptr_t)buf & 3) && len) { + *buf = mt7621_nfc_pio_read(nfc, true); + buf++; + len--; + } + + while (len >= 4) { + *(u32 *)buf = mt7621_nfc_pio_read(nfc, false); + buf += 4; + len -= 4; + } + + while (len) { + *buf = mt7621_nfc_pio_read(nfc, true); + buf++; + len--; + } +} + +static void mt7621_nfc_read_data_discard(struct mt7621_nfc *nfc, u32 len) +{ + while (len >= 4) { + mt7621_nfc_pio_read(nfc, false); + len -= 4; + } + + while (len) { + mt7621_nfc_pio_read(nfc, true); + len--; + } +} + +static void mt7621_nfc_pio_write(struct mt7621_nfc *nfc, u32 val, bool bw) +{ + u32 reg; + + reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S; + if (reg != STA_FSM_CUSTOM_DATA) { + reg = nfi_read16(nfc, NFI_CNFG); + reg &= ~(CNFG_READ_MODE | CNFG_BYTE_RW); + if (bw) + reg |= CNFG_BYTE_RW; + nfi_write16(nfc, NFI_CNFG, reg); + + nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BWR); + nfi_write16(nfc, NFI_STRDATA, STR_DATA); + } + + mt7621_nfc_wait_pio_ready(nfc); + nfi_write32(nfc, NFI_DATAW, val); +} + +static void mt7621_nfc_write_data(struct mt7621_nfc *nfc, const u8 *buf, + u32 len) +{ + while (((uintptr_t)buf & 3) && len) { + mt7621_nfc_pio_write(nfc, *buf, true); + buf++; + len--; + } + + while (len >= 4) { + mt7621_nfc_pio_write(nfc, *(const u32 *)buf, false); + buf += 4; + len -= 4; + } + + while (len) { + mt7621_nfc_pio_write(nfc, *buf, true); + buf++; + len--; + } +} + +static void mt7621_nfc_write_data_empty(struct mt7621_nfc *nfc, u32 len) +{ + while (len >= 4) { + mt7621_nfc_pio_write(nfc, 0xffffffff, false); + len -= 4; + } + + while (len) { + mt7621_nfc_pio_write(nfc, 0xff, true); + len--; + } +} + +static int mt7621_nfc_dev_ready(struct mt7621_nfc *nfc, + unsigned int timeout_ms) +{ + u32 val; + + return readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val, + !(val & STA_BUSY), 10, + timeout_ms * 1000); +} + +static int mt7621_nfc_exec_instr(struct nand_chip *nand, + const struct nand_op_instr *instr) +{ + struct mt7621_nfc *nfc = nand_get_controller_data(nand); + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + mt7621_nfc_hw_reset(nfc); + nfi_write16(nfc, NFI_CNFG, CNFG_OP_CUSTOM << CNFG_OP_MODE_S); + return mt7621_nfc_send_command(nfc, instr->ctx.cmd.opcode); + case NAND_OP_ADDR_INSTR: + return mt7621_nfc_send_address(nfc, instr->ctx.addr.addrs, + instr->ctx.addr.naddrs); + case NAND_OP_DATA_IN_INSTR: + mt7621_nfc_read_data(nfc, instr->ctx.data.buf.in, + instr->ctx.data.len); + return 0; + case NAND_OP_DATA_OUT_INSTR: + mt7621_nfc_write_data(nfc, instr->ctx.data.buf.out, + instr->ctx.data.len); + return 0; + case NAND_OP_WAITRDY_INSTR: + return mt7621_nfc_dev_ready(nfc, + instr->ctx.waitrdy.timeout_ms); + default: + WARN_ONCE(1, "unsupported NAND instruction type: %d\n", + instr->type); + + return -EINVAL; + } +} + +static int mt7621_nfc_exec_op(struct nand_chip *nand, + const struct nand_operation *op, bool check_only) +{ + struct mt7621_nfc *nfc = nand_get_controller_data(nand); + int i, ret; + + if (check_only) + return 0; + + /* Only CS0 available */ + nfi_write16(nfc, NFI_CSEL, 0); + + for (i = 0; i < op->ninstrs; i++) { + ret = mt7621_nfc_exec_instr(nand, &op->instrs[i]); + if (ret) + return ret; + } + + return 0; +} + +static int mt7621_nfc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) +{ + struct mt7621_nfc *nfc = nand_get_controller_data(nand); + const struct nand_sdr_timings *timings; + u32 acccon, temp, rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt; + + if (!nfc->nfi_clk) + return -ENOTSUPP; + + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return -ENOTSUPP; + + rate = clk_get_rate(nfc->nfi_clk); + + /* turn clock rate into KHZ */ + rate /= 1000; + + tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000; + tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000); + tpoecs = min_t(u32, tpoecs, ACCCON_POECS_MAX); + + tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000; + tprecs = DIV_ROUND_UP(tprecs * rate, 1000000); + tprecs = min_t(u32, tprecs, ACCCON_PRECS_MAX); + + /* sdr interface has no tCR which means CE# low to RE# low */ + tc2r = 0; + + tw2r = timings->tWHR_min / 1000; + tw2r = DIV_ROUND_UP(tw2r * rate, 1000000); + tw2r = DIV_ROUND_UP(tw2r - 1, 2); + tw2r = min_t(u32, tw2r, ACCCON_W2R_MAX); + + twh = max(timings->tREH_min, timings->tWH_min) / 1000; + twh = DIV_ROUND_UP(twh * rate, 1000000) - 1; + twh = min_t(u32, twh, ACCCON_WH_MAX); + + /* Calculate real WE#/RE# hold time in nanosecond */ + temp = (twh + 1) * 1000000 / rate; + /* nanosecond to picosecond */ + temp *= 1000; + + /* + * WE# low level time should be expaned to meet WE# pulse time + * and WE# cycle time at the same time. + */ + if (temp < timings->tWC_min) + twst = timings->tWC_min - temp; + else + twst = 0; + twst = max(timings->tWP_min, twst) / 1000; + twst = DIV_ROUND_UP(twst * rate, 1000000) - 1; + twst = min_t(u32, twst, ACCCON_WST_MAX); + + /* + * RE# low level time should be expaned to meet RE# pulse time + * and RE# cycle time at the same time. + */ + if (temp < timings->tRC_min) + trlt = timings->tRC_min - temp; + else + trlt = 0; + trlt = max(trlt, timings->tRP_min) / 1000; + trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1; + trlt = min_t(u32, trlt, ACCCON_RLT_MAX); + + if (csline == NAND_DATA_IFACE_CHECK_ONLY) { + if (twst < ACCCON_WST_MIN || trlt < ACCCON_RLT_MIN) + return -ENOTSUPP; + } + + acccon = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt); + + dev_dbg(nfc->dev, "Using programmed access timing: %08x\n", acccon); + + nfi_write32(nfc, NFI_ACCCON, acccon); + + return 0; +} + +static int mt7621_nfc_calc_ecc_strength(struct mt7621_nfc *nfc, + u32 avail_ecc_bytes) +{ + struct nand_chip *nand = &nfc->nand; + struct mtd_info *mtd = nand_to_mtd(nand); + u32 strength; + int i; + + strength = avail_ecc_bytes * 8 / ECC_PARITY_BITS; + + /* Find the closest supported ecc strength */ + for (i = ARRAY_SIZE(mt7621_ecc_strength) - 1; i >= 0; i--) { + if (mt7621_ecc_strength[i] <= strength) + break; + } + + if (unlikely(i < 0)) { + dev_err(nfc->dev, "OOB size (%u) is not supported\n", + mtd->oobsize); + return -EINVAL; + } + + nand->ecc.strength = mt7621_ecc_strength[i]; + nand->ecc.bytes = + DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8); + + dev_info(nfc->dev, "ECC strength adjusted to %u bits\n", + nand->ecc.strength); + + return i; +} + +static int mt7621_nfc_set_spare_per_sector(struct mt7621_nfc *nfc) +{ + struct nand_chip *nand = &nfc->nand; + struct mtd_info *mtd = nand_to_mtd(nand); + u32 size; + int i; + + size = nand->ecc.bytes + NFI_FDM_SIZE; + + /* Find the closest supported spare size */ + for (i = 0; i < ARRAY_SIZE(mt7621_nfi_spare_size); i++) { + if (mt7621_nfi_spare_size[i] >= size) + break; + } + + if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_spare_size))) { + dev_err(nfc->dev, "OOB size (%u) is not supported\n", + mtd->oobsize); + return -EINVAL; + } + + nfc->spare_per_sector = mt7621_nfi_spare_size[i]; + + return i; +} + +static int mt7621_nfc_ecc_init(struct mt7621_nfc *nfc) +{ + struct nand_chip *nand = &nfc->nand; + struct mtd_info *mtd = nand_to_mtd(nand); + u32 spare_per_sector, encode_block_size, decode_block_size; + u32 ecc_enccfg, ecc_deccfg; + int ecc_cap; + + /* Only hardware ECC mode is supported */ + if (nand->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) { + dev_err(nfc->dev, "Only hardware ECC mode is supported\n"); + return -EINVAL; + } + + nand->ecc.size = ECC_SECTOR_SIZE; + nand->ecc.steps = mtd->writesize / nand->ecc.size; + + spare_per_sector = mtd->oobsize / nand->ecc.steps; + + ecc_cap = mt7621_nfc_calc_ecc_strength(nfc, + spare_per_sector - NFI_FDM_SIZE); + if (ecc_cap < 0) + return ecc_cap; + + /* Sector + FDM */ + encode_block_size = (nand->ecc.size + NFI_FDM_SIZE) * 8; + ecc_enccfg = ecc_cap | (ENC_MODE_NFI << ENC_MODE_S) | + (encode_block_size << ENC_CNFG_MSG_S); + + /* Sector + FDM + ECC parity bits */ + decode_block_size = ((nand->ecc.size + NFI_FDM_SIZE) * 8) + + nand->ecc.strength * ECC_PARITY_BITS; + ecc_deccfg = ecc_cap | (DEC_MODE_NFI << DEC_MODE_S) | + (decode_block_size << DEC_CS_S) | + (DEC_CON_EL << DEC_CON_S) | DEC_EMPTY_EN; + + ecc_write32(nfc, ECC_FDMADDR, nfc->nfi_base + NFI_FDML(0)); + + mt7621_ecc_encoder_op(nfc, false); + ecc_write32(nfc, ECC_ENCCNFG, ecc_enccfg); + + mt7621_ecc_decoder_op(nfc, false); + ecc_write32(nfc, ECC_DECCNFG, ecc_deccfg); + + return 0; +} + +static int mt7621_nfc_set_page_format(struct mt7621_nfc *nfc) +{ + struct nand_chip *nand = &nfc->nand; + struct mtd_info *mtd = nand_to_mtd(nand); + int i, spare_size; + u32 pagefmt; + + spare_size = mt7621_nfc_set_spare_per_sector(nfc); + if (spare_size < 0) + return spare_size; + + for (i = 0; i < ARRAY_SIZE(mt7621_nfi_page_size); i++) { + if (mt7621_nfi_page_size[i] == mtd->writesize) + break; + } + + if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_page_size))) { + dev_err(nfc->dev, "Page size (%u) is not supported\n", + mtd->writesize); + return -EINVAL; + } + + pagefmt = i | (spare_size << PAGEFMT_SPARE_S) | + (NFI_FDM_SIZE << PAGEFMT_FDM_S) | + (NFI_FDM_SIZE << PAGEFMT_FDM_ECC_S); + + nfi_write16(nfc, NFI_PAGEFMT, pagefmt); + + return 0; +} + +static int mt7621_nfc_attach_chip(struct nand_chip *nand) +{ + struct mt7621_nfc *nfc = nand_get_controller_data(nand); + int ret; + + if (nand->options & NAND_BUSWIDTH_16) { + dev_err(nfc->dev, "16-bit buswidth is not supported"); + return -EINVAL; + } + + ret = mt7621_nfc_ecc_init(nfc); + if (ret) + return ret; + + return mt7621_nfc_set_page_format(nfc); +} + +static const struct nand_controller_ops mt7621_nfc_controller_ops = { + .attach_chip = mt7621_nfc_attach_chip, + .exec_op = mt7621_nfc_exec_op, + .setup_interface = mt7621_nfc_setup_interface, +}; + +static int mt7621_nfc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + + if (section >= nand->ecc.steps) + return -ERANGE; + + oob_region->length = NFI_FDM_SIZE - 1; + oob_region->offset = section * NFI_FDM_SIZE + 1; + + return 0; +} + +static int mt7621_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oob_region->offset = NFI_FDM_SIZE * nand->ecc.steps; + oob_region->length = mtd->oobsize - oob_region->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops mt7621_nfc_ooblayout_ops = { + .free = mt7621_nfc_ooblayout_free, + .ecc = mt7621_nfc_ooblayout_ecc, +}; + +static void mt7621_nfc_write_fdm(struct mt7621_nfc *nfc) +{ + struct nand_chip *nand = &nfc->nand; + u32 vall, valm; + u8 *oobptr; + int i, j; + + for (i = 0; i < nand->ecc.steps; i++) { + vall = 0; + valm = 0; + oobptr = oob_fdm_ptr(nand, i); + + for (j = 0; j < 4; j++) + vall |= (u32)oobptr[j] << (j * 8); + + for (j = 0; j < 4; j++) + valm |= (u32)oobptr[j + 4] << (j * 8); + + nfi_write32(nfc, NFI_FDML(i), vall); + nfi_write32(nfc, NFI_FDMM(i), valm); + } +} + +static void mt7621_nfc_read_sector_fdm(struct mt7621_nfc *nfc, u32 sect) +{ + struct nand_chip *nand = &nfc->nand; + u32 vall, valm; + u8 *oobptr; + int i; + + vall = nfi_read32(nfc, NFI_FDML(sect)); + valm = nfi_read32(nfc, NFI_FDMM(sect)); + oobptr = oob_fdm_ptr(nand, sect); + + for (i = 0; i < 4; i++) + oobptr[i] = (vall >> (i * 8)) & 0xff; + + for (i = 0; i < 4; i++) + oobptr[i + 4] = (valm >> (i * 8)) & 0xff; +} + +static int mt7621_nfc_read_page_hwecc(struct nand_chip *nand, uint8_t *buf, + int oob_required, int page) +{ + struct mt7621_nfc *nfc = nand_get_controller_data(nand); + struct mtd_info *mtd = nand_to_mtd(nand); + int bitflips = 0; + int rc, i; + + nand_read_page_op(nand, page, 0, NULL, 0); + + nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) | + CNFG_READ_MODE | CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); + + mt7621_ecc_decoder_op(nfc, true); + + nfi_write16(nfc, NFI_CON, + CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S)); + + for (i = 0; i < nand->ecc.steps; i++) { + if (buf) + mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i), + nand->ecc.size); + else + mt7621_nfc_read_data_discard(nfc, nand->ecc.size); + + rc = mt7621_ecc_decoder_wait_done(nfc, i); + + mt7621_nfc_read_sector_fdm(nfc, i); + + if (rc < 0) { + bitflips = -EIO; + continue; + } + + rc = mt7621_ecc_correct_check(nfc, + buf ? page_data_ptr(nand, buf, i) : NULL, + oob_fdm_ptr(nand, i), i); + + if (rc < 0) { + dev_dbg(nfc->dev, + "Uncorrectable ECC error at page %d.%d\n", + page, i); + bitflips = -EBADMSG; + mtd->ecc_stats.failed++; + } else if (bitflips >= 0) { + bitflips += rc; + mtd->ecc_stats.corrected += rc; + } + } + + mt7621_ecc_decoder_op(nfc, false); + + nfi_write16(nfc, NFI_CON, 0); + + return bitflips; +} + +static int mt7621_nfc_read_page_raw(struct nand_chip *nand, uint8_t *buf, + int oob_required, int page) +{ + struct mt7621_nfc *nfc = nand_get_controller_data(nand); + int i; + + nand_read_page_op(nand, page, 0, NULL, 0); + + nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) | + CNFG_READ_MODE); + + nfi_write16(nfc, NFI_CON, + CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S)); + + for (i = 0; i < nand->ecc.steps; i++) { + /* Read data */ + if (buf) + mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i), + nand->ecc.size); + else + mt7621_nfc_read_data_discard(nfc, nand->ecc.size); + + /* Read FDM */ + mt7621_nfc_read_data(nfc, oob_fdm_ptr(nand, i), NFI_FDM_SIZE); + + /* Read ECC parity data */ + mt7621_nfc_read_data(nfc, oob_ecc_ptr(nfc, i), + nfc->spare_per_sector - NFI_FDM_SIZE); + } + + nfi_write16(nfc, NFI_CON, 0); + + return 0; +} + +static int mt7621_nfc_read_oob_hwecc(struct nand_chip *nand, int page) +{ + return mt7621_nfc_read_page_hwecc(nand, NULL, 1, page); +} + +static int mt7621_nfc_read_oob_raw(struct nand_chip *nand, int page) +{ + return mt7621_nfc_read_page_raw(nand, NULL, 1, page); +} + +static int mt7621_nfc_check_empty_page(struct nand_chip *nand, const u8 *buf) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + uint32_t i, j; + u8 *oobptr; + + if (buf) { + for (i = 0; i < mtd->writesize; i++) + if (buf[i] != 0xff) + return 0; + } + + for (i = 0; i < nand->ecc.steps; i++) { + oobptr = oob_fdm_ptr(nand, i); + for (j = 0; j < NFI_FDM_SIZE; j++) + if (oobptr[j] != 0xff) + return 0; + } + + return 1; +} + +static int mt7621_nfc_write_page_hwecc(struct nand_chip *nand, + const uint8_t *buf, int oob_required, + int page) +{ + struct mt7621_nfc *nfc = nand_get_controller_data(nand); + struct mtd_info *mtd = nand_to_mtd(nand); + + if (mt7621_nfc_check_empty_page(nand, buf)) { + /* + * MT7621 ECC engine always generates parity code for input + * pages, even for empty pages. Doing so will write back ECC + * parity code to the oob region, which means such pages will + * no longer be empty pages. + * + * To avoid this, stop write operation if current page is an + * empty page. + */ + return 0; + } + + nand_prog_page_begin_op(nand, page, 0, NULL, 0); + + nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) | + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); + + mt7621_ecc_encoder_op(nfc, true); + + mt7621_nfc_write_fdm(nfc); + + nfi_write16(nfc, NFI_CON, + CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S)); + + if (buf) + mt7621_nfc_write_data(nfc, buf, mtd->writesize); + else + mt7621_nfc_write_data_empty(nfc, mtd->writesize); + + mt7621_nfc_wait_write_completion(nfc, nand); + + mt7621_ecc_encoder_op(nfc, false); + + nfi_write16(nfc, NFI_CON, 0); + + return nand_prog_page_end_op(nand); +} + +static int mt7621_nfc_write_page_raw(struct nand_chip *nand, + const uint8_t *buf, int oob_required, + int page) +{ + struct mt7621_nfc *nfc = nand_get_controller_data(nand); + int i; + + nand_prog_page_begin_op(nand, page, 0, NULL, 0); + + nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S)); + + nfi_write16(nfc, NFI_CON, + CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S)); + + for (i = 0; i < nand->ecc.steps; i++) { + /* Write data */ + if (buf) + mt7621_nfc_write_data(nfc, page_data_ptr(nand, buf, i), + nand->ecc.size); + else + mt7621_nfc_write_data_empty(nfc, nand->ecc.size); + + /* Write FDM */ + mt7621_nfc_write_data(nfc, oob_fdm_ptr(nand, i), + NFI_FDM_SIZE); + + /* Write dummy ECC parity data */ + mt7621_nfc_write_data_empty(nfc, nfc->spare_per_sector - + NFI_FDM_SIZE); + } + + mt7621_nfc_wait_write_completion(nfc, nand); + + nfi_write16(nfc, NFI_CON, 0); + + return nand_prog_page_end_op(nand); +} + +static int mt7621_nfc_write_oob_hwecc(struct nand_chip *nand, int page) +{ + return mt7621_nfc_write_page_hwecc(nand, NULL, 1, page); +} + +static int mt7621_nfc_write_oob_raw(struct nand_chip *nand, int page) +{ + return mt7621_nfc_write_page_raw(nand, NULL, 1, page); +} + +static int mt7621_nfc_init_chip(struct mt7621_nfc *nfc) +{ + struct nand_chip *nand = &nfc->nand; + struct mtd_info *mtd; + int ret; + + nand->controller = &nfc->controller; + nand_set_controller_data(nand, (void *)nfc); + nand_set_flash_node(nand, nfc->dev->of_node); + + nand->options |= NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE | NAND_SKIP_BBTSCAN; + if (!nfc->nfi_clk) + nand->options |= NAND_KEEP_TIMINGS; + + nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; + nand->ecc.read_page = mt7621_nfc_read_page_hwecc; + nand->ecc.read_page_raw = mt7621_nfc_read_page_raw; + nand->ecc.write_page = mt7621_nfc_write_page_hwecc; + nand->ecc.write_page_raw = mt7621_nfc_write_page_raw; + nand->ecc.read_oob = mt7621_nfc_read_oob_hwecc; + nand->ecc.read_oob_raw = mt7621_nfc_read_oob_raw; + nand->ecc.write_oob = mt7621_nfc_write_oob_hwecc; + nand->ecc.write_oob_raw = mt7621_nfc_write_oob_raw; + + mtd = nand_to_mtd(nand); + mtd->owner = THIS_MODULE; + mtd->dev.parent = nfc->dev; + mtd->name = MT7621_NFC_NAME; + mtd_set_ooblayout(mtd, &mt7621_nfc_ooblayout_ops); + + mt7621_nfc_hw_init(nfc); + + ret = nand_scan(nand, 1); + if (ret) + return ret; + + mtk_bmt_attach(mtd); + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(nfc->dev, "Failed to register MTD: %d\n", ret); + mtk_bmt_detach(mtd); + nand_cleanup(nand); + return ret; + } + + return 0; +} + +static int mt7621_nfc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mt7621_nfc *nfc; + struct resource *res; + int ret; + + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nand_controller_init(&nfc->controller); + nfc->controller.ops = &mt7621_nfc_controller_ops; + nfc->dev = dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi"); + nfc->nfi_base = res->start; + nfc->nfi_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->nfi_regs)) { + ret = PTR_ERR(nfc->nfi_regs); + return ret; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc"); + nfc->ecc_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->ecc_regs)) { + ret = PTR_ERR(nfc->ecc_regs); + return ret; + } + + nfc->nfi_clk = devm_clk_get(dev, "nfi_clk"); + if (IS_ERR(nfc->nfi_clk)) { + dev_warn(dev, "nfi clk not provided\n"); + nfc->nfi_clk = NULL; + } else { + ret = clk_prepare_enable(nfc->nfi_clk); + if (ret) { + dev_err(dev, "Failed to enable nfi core clock\n"); + return ret; + } + } + + platform_set_drvdata(pdev, nfc); + + ret = mt7621_nfc_init_chip(nfc); + if (ret) { + dev_err(dev, "Failed to initialize nand chip\n"); + goto clk_disable; + } + + return 0; + +clk_disable: + clk_disable_unprepare(nfc->nfi_clk); + + return ret; +} + +static int mt7621_nfc_remove(struct platform_device *pdev) +{ + struct mt7621_nfc *nfc = platform_get_drvdata(pdev); + struct nand_chip *nand = &nfc->nand; + struct mtd_info *mtd = nand_to_mtd(nand); + + mtk_bmt_detach(mtd); + mtd_device_unregister(mtd); + nand_cleanup(nand); + clk_disable_unprepare(nfc->nfi_clk); + + return 0; +} + +static const struct of_device_id mt7621_nfc_id_table[] = { + { .compatible = "mediatek,mt7621-nfc" }, + { }, +}; +MODULE_DEVICE_TABLE(of, match); + +static struct platform_driver mt7621_nfc_driver = { + .probe = mt7621_nfc_probe, + .remove = mt7621_nfc_remove, + .driver = { + .name = MT7621_NFC_NAME, + .owner = THIS_MODULE, + .of_match_table = mt7621_nfc_id_table, + }, +}; +module_platform_driver(mt7621_nfc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Weijie Gao "); +MODULE_DESCRIPTION("MediaTek MT7621 NAND Flash Controller driver"); diff --git a/target/linux/ramips/mt7620/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac b/target/linux/ramips/mt7620/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac new file mode 100644 index 0000000000..aa0ad99158 --- /dev/null +++ b/target/linux/ramips/mt7620/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac @@ -0,0 +1,26 @@ +[ "$ACTION" == "add" ] || exit 0 + +PHYNBR=${DEVPATH##*/phy} + +[ -n $PHYNBR ] || exit 0 + +. /lib/functions.sh +. /lib/functions/system.sh + +board=$(board_name) + +case "$board" in + hiwifi,hc5661) + label_mac=$(mtd_get_mac_ascii bdinfo "Vfac_mac ") + [ "$PHYNBR" = "0" ] && [ -n "$label_mac" ] && \ + echo -n "$label_mac" > /sys${DEVPATH}/macaddress + ;; + hiwifi,hc5761|\ + hiwifi,hc5861) + label_mac=$(mtd_get_mac_ascii bdinfo "Vfac_mac ") + [ "$PHYNBR" = "1" ] && [ -n "$label_mac" ] && \ + echo -n "$label_mac" > /sys${DEVPATH}/macaddress + [ "$PHYNBR" = "0" ] && [ -n "$label_mac" ] && \ + macaddr_unsetbit "$label_mac" 6 > /sys${DEVPATH}/macaddress + ;; +esac diff --git a/target/linux/ramips/mt7621/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac b/target/linux/ramips/mt7621/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac index d018c1de1b..2a47f926be 100644 --- a/target/linux/ramips/mt7621/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac +++ b/target/linux/ramips/mt7621/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac @@ -32,6 +32,13 @@ case "$board" in [ "$PHYNBR" = "1" ] && \ macaddr_add "$(mtd_get_mac_binary factory 0x4)" 1 > /sys${DEVPATH}/macaddress ;; + hiwifi,hc5962) + label_mac=$(mtd_get_mac_ascii bdinfo "Vfac_mac ") + [ "$PHYNBR" = "0" ] && [ -n "$label_mac" ] && \ + echo -n "$label_mac" > /sys${DEVPATH}/macaddress + [ "$PHYNBR" = "1" ] && [ -n "$label_mac" ] && \ + macaddr_unsetbit "$label_mac" 6 > /sys${DEVPATH}/macaddress + ;; iptime,a3002mesh|\ iptime,a3004t) hw_mac_addr="$(mtd_get_mac_binary factory 0x4)" diff --git a/target/linux/ramips/mt76x8/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac b/target/linux/ramips/mt76x8/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac new file mode 100644 index 0000000000..b8f8faee9c --- /dev/null +++ b/target/linux/ramips/mt76x8/base-files/etc/hotplug.d/ieee80211/10_fix_wifi_mac @@ -0,0 +1,22 @@ +[ "$ACTION" == "add" ] || exit 0 + +PHYNBR=${DEVPATH##*/phy} + +[ -n $PHYNBR ] || exit 0 + +. /lib/functions.sh +. /lib/functions/system.sh + +board=$(board_name) + +case "$board" in + hiwifi,hc5661a|\ + hiwifi,hc5761a|\ + hiwifi,hc5861b) + label_mac=$(mtd_get_mac_ascii bdinfo "Vfac_mac ") + [ "$PHYNBR" = "0" ] && [ -n "$label_mac" ] && \ + echo -n "$label_mac" > /sys${DEVPATH}/macaddress + [ "$PHYNBR" = "1" ] && [ -n "$label_mac" ] && \ + macaddr_unsetbit "$label_mac" 6 > /sys${DEVPATH}/macaddress + ;; +esac diff --git a/target/linux/ramips/patches-5.10/410-mtd-rawnand-add-driver-support-for-MT7621-nand-flash.patch b/target/linux/ramips/patches-5.10/410-mtd-rawnand-add-driver-support-for-MT7621-nand-flash.patch index 3314013420..d0686a7051 100644 --- a/target/linux/ramips/patches-5.10/410-mtd-rawnand-add-driver-support-for-MT7621-nand-flash.patch +++ b/target/linux/ramips/patches-5.10/410-mtd-rawnand-add-driver-support-for-MT7621-nand-flash.patch @@ -45,1359 +45,3 @@ Signed-off-by: Weijie Gao obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o ---- /dev/null -+++ b/drivers/mtd/nand/raw/mt7621_nand.c -@@ -0,0 +1,1353 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * MediaTek MT7621 NAND Flash Controller driver -+ * -+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved. -+ * -+ * Author: Weijie Gao -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* NFI core registers */ -+#define NFI_CNFG 0x000 -+#define CNFG_OP_MODE_S 12 -+#define CNFG_OP_MODE_M GENMASK(14, 12) -+#define CNFG_OP_CUSTOM 6 -+#define CNFG_AUTO_FMT_EN BIT(9) -+#define CNFG_HW_ECC_EN BIT(8) -+#define CNFG_BYTE_RW BIT(6) -+#define CNFG_READ_MODE BIT(1) -+ -+#define NFI_PAGEFMT 0x004 -+#define PAGEFMT_FDM_ECC_S 12 -+#define PAGEFMT_FDM_ECC_M GENMASK(15, 12) -+#define PAGEFMT_FDM_S 8 -+#define PAGEFMT_FDM_M GENMASK(11, 8) -+#define PAGEFMT_SPARE_S 4 -+#define PAGEFMT_SPARE_M GENMASK(5, 4) -+#define PAGEFMT_PAGE_S 0 -+#define PAGEFMT_PAGE_M GENMASK(1, 0) -+ -+#define NFI_CON 0x008 -+#define CON_NFI_SEC_S 12 -+#define CON_NFI_SEC_M GENMASK(15, 12) -+#define CON_NFI_BWR BIT(9) -+#define CON_NFI_BRD BIT(8) -+#define CON_NFI_RST BIT(1) -+#define CON_FIFO_FLUSH BIT(0) -+ -+#define NFI_ACCCON 0x00c -+#define ACCCON_POECS_S 28 -+#define ACCCON_POECS_MAX 0x0f -+#define ACCCON_POECS_DEF 3 -+#define ACCCON_PRECS_S 22 -+#define ACCCON_PRECS_MAX 0x3f -+#define ACCCON_PRECS_DEF 3 -+#define ACCCON_C2R_S 16 -+#define ACCCON_C2R_MAX 0x3f -+#define ACCCON_C2R_DEF 7 -+#define ACCCON_W2R_S 12 -+#define ACCCON_W2R_MAX 0x0f -+#define ACCCON_W2R_DEF 7 -+#define ACCCON_WH_S 8 -+#define ACCCON_WH_MAX 0x0f -+#define ACCCON_WH_DEF 15 -+#define ACCCON_WST_S 4 -+#define ACCCON_WST_MAX 0x0f -+#define ACCCON_WST_DEF 15 -+#define ACCCON_WST_MIN 3 -+#define ACCCON_RLT_S 0 -+#define ACCCON_RLT_MAX 0x0f -+#define ACCCON_RLT_DEF 15 -+#define ACCCON_RLT_MIN 3 -+ -+#define NFI_CMD 0x020 -+ -+#define NFI_ADDRNOB 0x030 -+#define ADDR_ROW_NOB_S 4 -+#define ADDR_ROW_NOB_M GENMASK(6, 4) -+#define ADDR_COL_NOB_S 0 -+#define ADDR_COL_NOB_M GENMASK(2, 0) -+ -+#define NFI_COLADDR 0x034 -+#define NFI_ROWADDR 0x038 -+ -+#define NFI_STRDATA 0x040 -+#define STR_DATA BIT(0) -+ -+#define NFI_CNRNB 0x044 -+#define CB2R_TIME_S 4 -+#define CB2R_TIME_M GENMASK(7, 4) -+#define STR_CNRNB BIT(0) -+ -+#define NFI_DATAW 0x050 -+#define NFI_DATAR 0x054 -+ -+#define NFI_PIO_DIRDY 0x058 -+#define PIO_DIRDY BIT(0) -+ -+#define NFI_STA 0x060 -+#define STA_NFI_FSM_S 16 -+#define STA_NFI_FSM_M GENMASK(19, 16) -+#define STA_FSM_CUSTOM_DATA 14 -+#define STA_BUSY BIT(8) -+#define STA_ADDR BIT(1) -+#define STA_CMD BIT(0) -+ -+#define NFI_ADDRCNTR 0x070 -+#define SEC_CNTR_S 12 -+#define SEC_CNTR_M GENMASK(15, 12) -+#define SEC_ADDR_S 0 -+#define SEC_ADDR_M GENMASK(9, 0) -+ -+#define NFI_CSEL 0x090 -+#define CSEL_S 0 -+#define CSEL_M GENMASK(1, 0) -+ -+#define NFI_FDM0L 0x0a0 -+#define NFI_FDML(n) (0x0a0 + ((n) << 3)) -+ -+#define NFI_FDM0M 0x0a4 -+#define NFI_FDMM(n) (0x0a4 + ((n) << 3)) -+ -+#define NFI_MASTER_STA 0x210 -+#define MAS_ADDR GENMASK(11, 9) -+#define MAS_RD GENMASK(8, 6) -+#define MAS_WR GENMASK(5, 3) -+#define MAS_RDDLY GENMASK(2, 0) -+ -+/* ECC engine registers */ -+#define ECC_ENCCON 0x000 -+#define ENC_EN BIT(0) -+ -+#define ECC_ENCCNFG 0x004 -+#define ENC_CNFG_MSG_S 16 -+#define ENC_CNFG_MSG_M GENMASK(28, 16) -+#define ENC_MODE_S 4 -+#define ENC_MODE_M GENMASK(5, 4) -+#define ENC_MODE_NFI 1 -+#define ENC_TNUM_S 0 -+#define ENC_TNUM_M GENMASK(2, 0) -+ -+#define ECC_ENCIDLE 0x00c -+#define ENC_IDLE BIT(0) -+ -+#define ECC_DECCON 0x100 -+#define DEC_EN BIT(0) -+ -+#define ECC_DECCNFG 0x104 -+#define DEC_EMPTY_EN BIT(31) -+#define DEC_CS_S 16 -+#define DEC_CS_M GENMASK(28, 16) -+#define DEC_CON_S 12 -+#define DEC_CON_M GENMASK(13, 12) -+#define DEC_CON_EL 2 -+#define DEC_MODE_S 4 -+#define DEC_MODE_M GENMASK(5, 4) -+#define DEC_MODE_NFI 1 -+#define DEC_TNUM_S 0 -+#define DEC_TNUM_M GENMASK(2, 0) -+ -+#define ECC_DECIDLE 0x10c -+#define DEC_IDLE BIT(1) -+ -+#define ECC_DECENUM 0x114 -+#define ERRNUM_S 2 -+#define ERRNUM_M GENMASK(3, 0) -+ -+#define ECC_DECDONE 0x118 -+#define DEC_DONE7 BIT(7) -+#define DEC_DONE6 BIT(6) -+#define DEC_DONE5 BIT(5) -+#define DEC_DONE4 BIT(4) -+#define DEC_DONE3 BIT(3) -+#define DEC_DONE2 BIT(2) -+#define DEC_DONE1 BIT(1) -+#define DEC_DONE0 BIT(0) -+ -+#define ECC_DECEL(n) (0x11c + (n) * 4) -+#define DEC_EL_ODD_S 16 -+#define DEC_EL_EVEN_S 0 -+#define DEC_EL_M 0x1fff -+#define DEC_EL_BYTE_POS_S 3 -+#define DEC_EL_BIT_POS_M GENMASK(2, 0) -+ -+#define ECC_FDMADDR 0x13c -+ -+/* ENCIDLE and DECIDLE */ -+#define ECC_IDLE BIT(0) -+ -+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \ -+ ((tpoecs) << ACCCON_POECS_S | (tprecs) << ACCCON_PRECS_S | \ -+ (tc2r) << ACCCON_C2R_S | (tw2r) << ACCCON_W2R_S | \ -+ (twh) << ACCCON_WH_S | (twst) << ACCCON_WST_S | (trlt)) -+ -+#define MASTER_STA_MASK (MAS_ADDR | MAS_RD | MAS_WR | \ -+ MAS_RDDLY) -+#define NFI_RESET_TIMEOUT 1000000 -+#define NFI_CORE_TIMEOUT 500000 -+#define ECC_ENGINE_TIMEOUT 500000 -+ -+#define ECC_SECTOR_SIZE 512 -+#define ECC_PARITY_BITS 13 -+ -+#define NFI_FDM_SIZE 8 -+ -+#define MT7621_NFC_NAME "mt7621-nand" -+ -+struct mt7621_nfc { -+ struct nand_controller controller; -+ struct nand_chip nand; -+ struct clk *nfi_clk; -+ struct device *dev; -+ -+ void __iomem *nfi_regs; -+ void __iomem *ecc_regs; -+ -+ u32 spare_per_sector; -+}; -+ -+static const u16 mt7621_nfi_page_size[] = { SZ_512, SZ_2K, SZ_4K }; -+static const u8 mt7621_nfi_spare_size[] = { 16, 26, 27, 28 }; -+static const u8 mt7621_ecc_strength[] = { 4, 6, 8, 10, 12 }; -+ -+static inline u32 nfi_read32(struct mt7621_nfc *nfc, u32 reg) -+{ -+ return readl(nfc->nfi_regs + reg); -+} -+ -+static inline void nfi_write32(struct mt7621_nfc *nfc, u32 reg, u32 val) -+{ -+ writel(val, nfc->nfi_regs + reg); -+} -+ -+static inline u16 nfi_read16(struct mt7621_nfc *nfc, u32 reg) -+{ -+ return readw(nfc->nfi_regs + reg); -+} -+ -+static inline void nfi_write16(struct mt7621_nfc *nfc, u32 reg, u16 val) -+{ -+ writew(val, nfc->nfi_regs + reg); -+} -+ -+static inline void ecc_write16(struct mt7621_nfc *nfc, u32 reg, u16 val) -+{ -+ writew(val, nfc->ecc_regs + reg); -+} -+ -+static inline u32 ecc_read32(struct mt7621_nfc *nfc, u32 reg) -+{ -+ return readl(nfc->ecc_regs + reg); -+} -+ -+static inline void ecc_write32(struct mt7621_nfc *nfc, u32 reg, u32 val) -+{ -+ return writel(val, nfc->ecc_regs + reg); -+} -+ -+static inline u8 *oob_fdm_ptr(struct nand_chip *nand, int sect) -+{ -+ return nand->oob_poi + sect * NFI_FDM_SIZE; -+} -+ -+static inline u8 *oob_ecc_ptr(struct mt7621_nfc *nfc, int sect) -+{ -+ struct nand_chip *nand = &nfc->nand; -+ -+ return nand->oob_poi + nand->ecc.steps * NFI_FDM_SIZE + -+ sect * (nfc->spare_per_sector - NFI_FDM_SIZE); -+} -+ -+static inline u8 *page_data_ptr(struct nand_chip *nand, const u8 *buf, -+ int sect) -+{ -+ return (u8 *)buf + sect * nand->ecc.size; -+} -+ -+static int mt7621_ecc_wait_idle(struct mt7621_nfc *nfc, u32 reg) -+{ -+ struct device *dev = nfc->dev; -+ u32 val; -+ int ret; -+ -+ ret = readw_poll_timeout_atomic(nfc->ecc_regs + reg, val, -+ val & ECC_IDLE, 10, -+ ECC_ENGINE_TIMEOUT); -+ if (ret) { -+ dev_warn(dev, "ECC engine timed out entering idle mode\n"); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+static int mt7621_ecc_decoder_wait_done(struct mt7621_nfc *nfc, u32 sect) -+{ -+ struct device *dev = nfc->dev; -+ u32 val; -+ int ret; -+ -+ ret = readw_poll_timeout_atomic(nfc->ecc_regs + ECC_DECDONE, val, -+ val & (1 << sect), 10, -+ ECC_ENGINE_TIMEOUT); -+ -+ if (ret) { -+ dev_warn(dev, "ECC decoder for sector %d timed out\n", -+ sect); -+ return -ETIMEDOUT; -+ } -+ -+ return 0; -+} -+ -+static void mt7621_ecc_encoder_op(struct mt7621_nfc *nfc, bool enable) -+{ -+ mt7621_ecc_wait_idle(nfc, ECC_ENCIDLE); -+ ecc_write16(nfc, ECC_ENCCON, enable ? ENC_EN : 0); -+} -+ -+static void mt7621_ecc_decoder_op(struct mt7621_nfc *nfc, bool enable) -+{ -+ mt7621_ecc_wait_idle(nfc, ECC_DECIDLE); -+ ecc_write16(nfc, ECC_DECCON, enable ? DEC_EN : 0); -+} -+ -+static int mt7621_ecc_correct_check(struct mt7621_nfc *nfc, u8 *sector_buf, -+ u8 *fdm_buf, u32 sect) -+{ -+ struct nand_chip *nand = &nfc->nand; -+ u32 decnum, num_error_bits, fdm_end_bits; -+ u32 error_locations, error_bit_loc; -+ u32 error_byte_pos, error_bit_pos; -+ int bitflips = 0; -+ u32 i; -+ -+ decnum = ecc_read32(nfc, ECC_DECENUM); -+ num_error_bits = (decnum >> (sect << ERRNUM_S)) & ERRNUM_M; -+ fdm_end_bits = (nand->ecc.size + NFI_FDM_SIZE) << 3; -+ -+ if (!num_error_bits) -+ return 0; -+ -+ if (num_error_bits == ERRNUM_M) -+ return -1; -+ -+ for (i = 0; i < num_error_bits; i++) { -+ error_locations = ecc_read32(nfc, ECC_DECEL(i / 2)); -+ error_bit_loc = (error_locations >> ((i % 2) * DEC_EL_ODD_S)) & -+ DEC_EL_M; -+ error_byte_pos = error_bit_loc >> DEC_EL_BYTE_POS_S; -+ error_bit_pos = error_bit_loc & DEC_EL_BIT_POS_M; -+ -+ if (error_bit_loc < (nand->ecc.size << 3)) { -+ if (sector_buf) { -+ sector_buf[error_byte_pos] ^= -+ (1 << error_bit_pos); -+ } -+ } else if (error_bit_loc < fdm_end_bits) { -+ if (fdm_buf) { -+ fdm_buf[error_byte_pos - nand->ecc.size] ^= -+ (1 << error_bit_pos); -+ } -+ } -+ -+ bitflips++; -+ } -+ -+ return bitflips; -+} -+ -+static int mt7621_nfc_wait_write_completion(struct mt7621_nfc *nfc, -+ struct nand_chip *nand) -+{ -+ struct device *dev = nfc->dev; -+ u16 val; -+ int ret; -+ -+ ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_ADDRCNTR, val, -+ ((val & SEC_CNTR_M) >> SEC_CNTR_S) >= nand->ecc.steps, 10, -+ NFI_CORE_TIMEOUT); -+ -+ if (ret) { -+ dev_warn(dev, "NFI core write operation timed out\n"); -+ return -ETIMEDOUT; -+ } -+ -+ return ret; -+} -+ -+static void mt7621_nfc_hw_reset(struct mt7621_nfc *nfc) -+{ -+ u32 val; -+ int ret; -+ -+ /* reset all registers and force the NFI master to terminate */ -+ nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); -+ -+ /* wait for the master to finish the last transaction */ -+ ret = readw_poll_timeout(nfc->nfi_regs + NFI_MASTER_STA, val, -+ !(val & MASTER_STA_MASK), 50, -+ NFI_RESET_TIMEOUT); -+ if (ret) { -+ dev_warn(nfc->dev, "Failed to reset NFI master in %dms\n", -+ NFI_RESET_TIMEOUT); -+ } -+ -+ /* ensure any status register affected by the NFI master is reset */ -+ nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); -+ nfi_write16(nfc, NFI_STRDATA, 0); -+} -+ -+static inline void mt7621_nfc_hw_init(struct mt7621_nfc *nfc) -+{ -+ u32 acccon; -+ -+ /* -+ * CNRNB: nand ready/busy register -+ * ------------------------------- -+ * 7:4: timeout register for polling the NAND busy/ready signal -+ * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles. -+ */ -+ nfi_write16(nfc, NFI_CNRNB, CB2R_TIME_M | STR_CNRNB); -+ -+ mt7621_nfc_hw_reset(nfc); -+ -+ /* Apply default access timing */ -+ acccon = ACCTIMING(ACCCON_POECS_DEF, ACCCON_PRECS_DEF, ACCCON_C2R_DEF, -+ ACCCON_W2R_DEF, ACCCON_WH_DEF, ACCCON_WST_DEF, -+ ACCCON_RLT_DEF); -+ -+ nfi_write32(nfc, NFI_ACCCON, acccon); -+} -+ -+static int mt7621_nfc_send_command(struct mt7621_nfc *nfc, u8 command) -+{ -+ struct device *dev = nfc->dev; -+ u32 val; -+ int ret; -+ -+ nfi_write32(nfc, NFI_CMD, command); -+ -+ ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val, -+ !(val & STA_CMD), 10, -+ NFI_CORE_TIMEOUT); -+ if (ret) { -+ dev_warn(dev, "NFI core timed out entering command mode\n"); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+static int mt7621_nfc_send_address_byte(struct mt7621_nfc *nfc, int addr) -+{ -+ struct device *dev = nfc->dev; -+ u32 val; -+ int ret; -+ -+ nfi_write32(nfc, NFI_COLADDR, addr); -+ nfi_write32(nfc, NFI_ROWADDR, 0); -+ nfi_write16(nfc, NFI_ADDRNOB, 1); -+ -+ ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val, -+ !(val & STA_ADDR), 10, -+ NFI_CORE_TIMEOUT); -+ if (ret) { -+ dev_warn(dev, "NFI core timed out entering address mode\n"); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+static int mt7621_nfc_send_address(struct mt7621_nfc *nfc, const u8 *addr, -+ unsigned int naddrs) -+{ -+ int ret; -+ -+ while (naddrs) { -+ ret = mt7621_nfc_send_address_byte(nfc, *addr); -+ if (ret) -+ return ret; -+ -+ addr++; -+ naddrs--; -+ } -+ -+ return 0; -+} -+ -+static void mt7621_nfc_wait_pio_ready(struct mt7621_nfc *nfc) -+{ -+ struct device *dev = nfc->dev; -+ int ret; -+ u16 val; -+ -+ ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_PIO_DIRDY, val, -+ val & PIO_DIRDY, 10, -+ NFI_CORE_TIMEOUT); -+ if (ret < 0) -+ dev_err(dev, "NFI core PIO mode not ready\n"); -+} -+ -+static u32 mt7621_nfc_pio_read(struct mt7621_nfc *nfc, bool br) -+{ -+ u32 reg; -+ -+ /* after each byte read, the NFI_STA reg is reset by the hardware */ -+ reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S; -+ if (reg != STA_FSM_CUSTOM_DATA) { -+ reg = nfi_read16(nfc, NFI_CNFG); -+ reg |= CNFG_READ_MODE | CNFG_BYTE_RW; -+ if (!br) -+ reg &= ~CNFG_BYTE_RW; -+ nfi_write16(nfc, NFI_CNFG, reg); -+ -+ /* -+ * set to max sector to allow the HW to continue reading over -+ * unaligned accesses -+ */ -+ nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BRD); -+ -+ /* trigger to fetch data */ -+ nfi_write16(nfc, NFI_STRDATA, STR_DATA); -+ } -+ -+ mt7621_nfc_wait_pio_ready(nfc); -+ -+ return nfi_read32(nfc, NFI_DATAR); -+} -+ -+static void mt7621_nfc_read_data(struct mt7621_nfc *nfc, u8 *buf, u32 len) -+{ -+ while (((uintptr_t)buf & 3) && len) { -+ *buf = mt7621_nfc_pio_read(nfc, true); -+ buf++; -+ len--; -+ } -+ -+ while (len >= 4) { -+ *(u32 *)buf = mt7621_nfc_pio_read(nfc, false); -+ buf += 4; -+ len -= 4; -+ } -+ -+ while (len) { -+ *buf = mt7621_nfc_pio_read(nfc, true); -+ buf++; -+ len--; -+ } -+} -+ -+static void mt7621_nfc_read_data_discard(struct mt7621_nfc *nfc, u32 len) -+{ -+ while (len >= 4) { -+ mt7621_nfc_pio_read(nfc, false); -+ len -= 4; -+ } -+ -+ while (len) { -+ mt7621_nfc_pio_read(nfc, true); -+ len--; -+ } -+} -+ -+static void mt7621_nfc_pio_write(struct mt7621_nfc *nfc, u32 val, bool bw) -+{ -+ u32 reg; -+ -+ reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S; -+ if (reg != STA_FSM_CUSTOM_DATA) { -+ reg = nfi_read16(nfc, NFI_CNFG); -+ reg &= ~(CNFG_READ_MODE | CNFG_BYTE_RW); -+ if (bw) -+ reg |= CNFG_BYTE_RW; -+ nfi_write16(nfc, NFI_CNFG, reg); -+ -+ nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BWR); -+ nfi_write16(nfc, NFI_STRDATA, STR_DATA); -+ } -+ -+ mt7621_nfc_wait_pio_ready(nfc); -+ nfi_write32(nfc, NFI_DATAW, val); -+} -+ -+static void mt7621_nfc_write_data(struct mt7621_nfc *nfc, const u8 *buf, -+ u32 len) -+{ -+ while (((uintptr_t)buf & 3) && len) { -+ mt7621_nfc_pio_write(nfc, *buf, true); -+ buf++; -+ len--; -+ } -+ -+ while (len >= 4) { -+ mt7621_nfc_pio_write(nfc, *(const u32 *)buf, false); -+ buf += 4; -+ len -= 4; -+ } -+ -+ while (len) { -+ mt7621_nfc_pio_write(nfc, *buf, true); -+ buf++; -+ len--; -+ } -+} -+ -+static void mt7621_nfc_write_data_empty(struct mt7621_nfc *nfc, u32 len) -+{ -+ while (len >= 4) { -+ mt7621_nfc_pio_write(nfc, 0xffffffff, false); -+ len -= 4; -+ } -+ -+ while (len) { -+ mt7621_nfc_pio_write(nfc, 0xff, true); -+ len--; -+ } -+} -+ -+static int mt7621_nfc_dev_ready(struct mt7621_nfc *nfc, -+ unsigned int timeout_ms) -+{ -+ u32 val; -+ -+ return readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val, -+ !(val & STA_BUSY), 10, -+ timeout_ms * 1000); -+} -+ -+static int mt7621_nfc_exec_instr(struct nand_chip *nand, -+ const struct nand_op_instr *instr) -+{ -+ struct mt7621_nfc *nfc = nand_get_controller_data(nand); -+ -+ switch (instr->type) { -+ case NAND_OP_CMD_INSTR: -+ mt7621_nfc_hw_reset(nfc); -+ nfi_write16(nfc, NFI_CNFG, CNFG_OP_CUSTOM << CNFG_OP_MODE_S); -+ return mt7621_nfc_send_command(nfc, instr->ctx.cmd.opcode); -+ case NAND_OP_ADDR_INSTR: -+ return mt7621_nfc_send_address(nfc, instr->ctx.addr.addrs, -+ instr->ctx.addr.naddrs); -+ case NAND_OP_DATA_IN_INSTR: -+ mt7621_nfc_read_data(nfc, instr->ctx.data.buf.in, -+ instr->ctx.data.len); -+ return 0; -+ case NAND_OP_DATA_OUT_INSTR: -+ mt7621_nfc_write_data(nfc, instr->ctx.data.buf.out, -+ instr->ctx.data.len); -+ return 0; -+ case NAND_OP_WAITRDY_INSTR: -+ return mt7621_nfc_dev_ready(nfc, -+ instr->ctx.waitrdy.timeout_ms); -+ default: -+ WARN_ONCE(1, "unsupported NAND instruction type: %d\n", -+ instr->type); -+ -+ return -EINVAL; -+ } -+} -+ -+static int mt7621_nfc_exec_op(struct nand_chip *nand, -+ const struct nand_operation *op, bool check_only) -+{ -+ struct mt7621_nfc *nfc = nand_get_controller_data(nand); -+ int i, ret; -+ -+ if (check_only) -+ return 0; -+ -+ /* Only CS0 available */ -+ nfi_write16(nfc, NFI_CSEL, 0); -+ -+ for (i = 0; i < op->ninstrs; i++) { -+ ret = mt7621_nfc_exec_instr(nand, &op->instrs[i]); -+ if (ret) -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int mt7621_nfc_setup_interface(struct nand_chip *nand, int csline, -+ const struct nand_interface_config *conf) -+{ -+ struct mt7621_nfc *nfc = nand_get_controller_data(nand); -+ const struct nand_sdr_timings *timings; -+ u32 acccon, temp, rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt; -+ -+ if (!nfc->nfi_clk) -+ return -ENOTSUPP; -+ -+ timings = nand_get_sdr_timings(conf); -+ if (IS_ERR(timings)) -+ return -ENOTSUPP; -+ -+ rate = clk_get_rate(nfc->nfi_clk); -+ -+ /* turn clock rate into KHZ */ -+ rate /= 1000; -+ -+ tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000; -+ tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000); -+ tpoecs = min_t(u32, tpoecs, ACCCON_POECS_MAX); -+ -+ tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000; -+ tprecs = DIV_ROUND_UP(tprecs * rate, 1000000); -+ tprecs = min_t(u32, tprecs, ACCCON_PRECS_MAX); -+ -+ /* sdr interface has no tCR which means CE# low to RE# low */ -+ tc2r = 0; -+ -+ tw2r = timings->tWHR_min / 1000; -+ tw2r = DIV_ROUND_UP(tw2r * rate, 1000000); -+ tw2r = DIV_ROUND_UP(tw2r - 1, 2); -+ tw2r = min_t(u32, tw2r, ACCCON_W2R_MAX); -+ -+ twh = max(timings->tREH_min, timings->tWH_min) / 1000; -+ twh = DIV_ROUND_UP(twh * rate, 1000000) - 1; -+ twh = min_t(u32, twh, ACCCON_WH_MAX); -+ -+ /* Calculate real WE#/RE# hold time in nanosecond */ -+ temp = (twh + 1) * 1000000 / rate; -+ /* nanosecond to picosecond */ -+ temp *= 1000; -+ -+ /* -+ * WE# low level time should be expaned to meet WE# pulse time -+ * and WE# cycle time at the same time. -+ */ -+ if (temp < timings->tWC_min) -+ twst = timings->tWC_min - temp; -+ else -+ twst = 0; -+ twst = max(timings->tWP_min, twst) / 1000; -+ twst = DIV_ROUND_UP(twst * rate, 1000000) - 1; -+ twst = min_t(u32, twst, ACCCON_WST_MAX); -+ -+ /* -+ * RE# low level time should be expaned to meet RE# pulse time -+ * and RE# cycle time at the same time. -+ */ -+ if (temp < timings->tRC_min) -+ trlt = timings->tRC_min - temp; -+ else -+ trlt = 0; -+ trlt = max(trlt, timings->tRP_min) / 1000; -+ trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1; -+ trlt = min_t(u32, trlt, ACCCON_RLT_MAX); -+ -+ if (csline == NAND_DATA_IFACE_CHECK_ONLY) { -+ if (twst < ACCCON_WST_MIN || trlt < ACCCON_RLT_MIN) -+ return -ENOTSUPP; -+ } -+ -+ acccon = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt); -+ -+ dev_info(nfc->dev, "Using programmed access timing: %08x\n", acccon); -+ -+ nfi_write32(nfc, NFI_ACCCON, acccon); -+ -+ return 0; -+} -+ -+static int mt7621_nfc_calc_ecc_strength(struct mt7621_nfc *nfc, -+ u32 avail_ecc_bytes) -+{ -+ struct nand_chip *nand = &nfc->nand; -+ struct mtd_info *mtd = nand_to_mtd(nand); -+ u32 strength; -+ int i; -+ -+ strength = avail_ecc_bytes * 8 / ECC_PARITY_BITS; -+ -+ /* Find the closest supported ecc strength */ -+ for (i = ARRAY_SIZE(mt7621_ecc_strength) - 1; i >= 0; i--) { -+ if (mt7621_ecc_strength[i] <= strength) -+ break; -+ } -+ -+ if (unlikely(i < 0)) { -+ dev_err(nfc->dev, "OOB size (%u) is not supported\n", -+ mtd->oobsize); -+ return -EINVAL; -+ } -+ -+ nand->ecc.strength = mt7621_ecc_strength[i]; -+ nand->ecc.bytes = -+ DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8); -+ -+ dev_info(nfc->dev, "ECC strength adjusted to %u bits\n", -+ nand->ecc.strength); -+ -+ return i; -+} -+ -+static int mt7621_nfc_set_spare_per_sector(struct mt7621_nfc *nfc) -+{ -+ struct nand_chip *nand = &nfc->nand; -+ struct mtd_info *mtd = nand_to_mtd(nand); -+ u32 size; -+ int i; -+ -+ size = nand->ecc.bytes + NFI_FDM_SIZE; -+ -+ /* Find the closest supported spare size */ -+ for (i = 0; i < ARRAY_SIZE(mt7621_nfi_spare_size); i++) { -+ if (mt7621_nfi_spare_size[i] >= size) -+ break; -+ } -+ -+ if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_spare_size))) { -+ dev_err(nfc->dev, "OOB size (%u) is not supported\n", -+ mtd->oobsize); -+ return -EINVAL; -+ } -+ -+ nfc->spare_per_sector = mt7621_nfi_spare_size[i]; -+ -+ return i; -+} -+ -+static int mt7621_nfc_ecc_init(struct mt7621_nfc *nfc) -+{ -+ struct nand_chip *nand = &nfc->nand; -+ struct mtd_info *mtd = nand_to_mtd(nand); -+ u32 spare_per_sector, encode_block_size, decode_block_size; -+ u32 ecc_enccfg, ecc_deccfg; -+ int ecc_cap; -+ -+ /* Only hardware ECC mode is supported */ -+ if (nand->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) { -+ dev_err(nfc->dev, "Only hardware ECC mode is supported\n"); -+ return -EINVAL; -+ } -+ -+ nand->ecc.size = ECC_SECTOR_SIZE; -+ nand->ecc.steps = mtd->writesize / nand->ecc.size; -+ -+ spare_per_sector = mtd->oobsize / nand->ecc.steps; -+ -+ ecc_cap = mt7621_nfc_calc_ecc_strength(nfc, -+ spare_per_sector - NFI_FDM_SIZE); -+ if (ecc_cap < 0) -+ return ecc_cap; -+ -+ /* Sector + FDM */ -+ encode_block_size = (nand->ecc.size + NFI_FDM_SIZE) * 8; -+ ecc_enccfg = ecc_cap | (ENC_MODE_NFI << ENC_MODE_S) | -+ (encode_block_size << ENC_CNFG_MSG_S); -+ -+ /* Sector + FDM + ECC parity bits */ -+ decode_block_size = ((nand->ecc.size + NFI_FDM_SIZE) * 8) + -+ nand->ecc.strength * ECC_PARITY_BITS; -+ ecc_deccfg = ecc_cap | (DEC_MODE_NFI << DEC_MODE_S) | -+ (decode_block_size << DEC_CS_S) | -+ (DEC_CON_EL << DEC_CON_S) | DEC_EMPTY_EN; -+ -+ mt7621_ecc_encoder_op(nfc, false); -+ ecc_write32(nfc, ECC_ENCCNFG, ecc_enccfg); -+ -+ mt7621_ecc_decoder_op(nfc, false); -+ ecc_write32(nfc, ECC_DECCNFG, ecc_deccfg); -+ -+ return 0; -+} -+ -+static int mt7621_nfc_set_page_format(struct mt7621_nfc *nfc) -+{ -+ struct nand_chip *nand = &nfc->nand; -+ struct mtd_info *mtd = nand_to_mtd(nand); -+ int i, spare_size; -+ u32 pagefmt; -+ -+ spare_size = mt7621_nfc_set_spare_per_sector(nfc); -+ if (spare_size < 0) -+ return spare_size; -+ -+ for (i = 0; i < ARRAY_SIZE(mt7621_nfi_page_size); i++) { -+ if (mt7621_nfi_page_size[i] == mtd->writesize) -+ break; -+ } -+ -+ if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_page_size))) { -+ dev_err(nfc->dev, "Page size (%u) is not supported\n", -+ mtd->writesize); -+ return -EINVAL; -+ } -+ -+ pagefmt = i | (spare_size << PAGEFMT_SPARE_S) | -+ (NFI_FDM_SIZE << PAGEFMT_FDM_S) | -+ (NFI_FDM_SIZE << PAGEFMT_FDM_ECC_S); -+ -+ nfi_write16(nfc, NFI_PAGEFMT, pagefmt); -+ -+ return 0; -+} -+ -+static int mt7621_nfc_attach_chip(struct nand_chip *nand) -+{ -+ struct mt7621_nfc *nfc = nand_get_controller_data(nand); -+ int ret; -+ -+ if (nand->options & NAND_BUSWIDTH_16) { -+ dev_err(nfc->dev, "16-bit buswidth is not supported"); -+ return -EINVAL; -+ } -+ -+ ret = mt7621_nfc_ecc_init(nfc); -+ if (ret) -+ return ret; -+ -+ return mt7621_nfc_set_page_format(nfc); -+} -+ -+static const struct nand_controller_ops mt7621_nfc_controller_ops = { -+ .attach_chip = mt7621_nfc_attach_chip, -+ .exec_op = mt7621_nfc_exec_op, -+ .setup_interface = mt7621_nfc_setup_interface, -+}; -+ -+static int mt7621_nfc_ooblayout_free(struct mtd_info *mtd, int section, -+ struct mtd_oob_region *oob_region) -+{ -+ struct nand_chip *nand = mtd_to_nand(mtd); -+ -+ if (section >= nand->ecc.steps) -+ return -ERANGE; -+ -+ oob_region->length = NFI_FDM_SIZE - 1; -+ oob_region->offset = section * NFI_FDM_SIZE + 1; -+ -+ return 0; -+} -+ -+static int mt7621_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, -+ struct mtd_oob_region *oob_region) -+{ -+ struct nand_chip *nand = mtd_to_nand(mtd); -+ -+ if (section) -+ return -ERANGE; -+ -+ oob_region->offset = NFI_FDM_SIZE * nand->ecc.steps; -+ oob_region->length = mtd->oobsize - oob_region->offset; -+ -+ return 0; -+} -+ -+static const struct mtd_ooblayout_ops mt7621_nfc_ooblayout_ops = { -+ .free = mt7621_nfc_ooblayout_free, -+ .ecc = mt7621_nfc_ooblayout_ecc, -+}; -+ -+static void mt7621_nfc_write_fdm(struct mt7621_nfc *nfc) -+{ -+ struct nand_chip *nand = &nfc->nand; -+ u32 vall, valm; -+ u8 *oobptr; -+ int i, j; -+ -+ for (i = 0; i < nand->ecc.steps; i++) { -+ vall = 0; -+ valm = 0; -+ oobptr = oob_fdm_ptr(nand, i); -+ -+ for (j = 0; j < 4; j++) -+ vall |= (u32)oobptr[j] << (j * 8); -+ -+ for (j = 0; j < 4; j++) -+ valm |= (u32)oobptr[j + 4] << (j * 8); -+ -+ nfi_write32(nfc, NFI_FDML(i), vall); -+ nfi_write32(nfc, NFI_FDMM(i), valm); -+ } -+} -+ -+static void mt7621_nfc_read_sector_fdm(struct mt7621_nfc *nfc, u32 sect) -+{ -+ struct nand_chip *nand = &nfc->nand; -+ u32 vall, valm; -+ u8 *oobptr; -+ int i; -+ -+ vall = nfi_read32(nfc, NFI_FDML(sect)); -+ valm = nfi_read32(nfc, NFI_FDMM(sect)); -+ oobptr = oob_fdm_ptr(nand, sect); -+ -+ for (i = 0; i < 4; i++) -+ oobptr[i] = (vall >> (i * 8)) & 0xff; -+ -+ for (i = 0; i < 4; i++) -+ oobptr[i + 4] = (valm >> (i * 8)) & 0xff; -+} -+ -+static int mt7621_nfc_read_page_hwecc(struct nand_chip *nand, uint8_t *buf, -+ int oob_required, int page) -+{ -+ struct mt7621_nfc *nfc = nand_get_controller_data(nand); -+ struct mtd_info *mtd = nand_to_mtd(nand); -+ int bitflips = 0; -+ int rc, i; -+ -+ nand_read_page_op(nand, page, 0, NULL, 0); -+ -+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) | -+ CNFG_READ_MODE | CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); -+ -+ mt7621_ecc_decoder_op(nfc, true); -+ -+ nfi_write16(nfc, NFI_CON, -+ CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S)); -+ -+ for (i = 0; i < nand->ecc.steps; i++) { -+ if (buf) -+ mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i), -+ nand->ecc.size); -+ else -+ mt7621_nfc_read_data_discard(nfc, nand->ecc.size); -+ -+ rc = mt7621_ecc_decoder_wait_done(nfc, i); -+ -+ mt7621_nfc_read_sector_fdm(nfc, i); -+ -+ if (rc < 0) { -+ bitflips = -EIO; -+ continue; -+ } -+ -+ rc = mt7621_ecc_correct_check(nfc, -+ buf ? page_data_ptr(nand, buf, i) : NULL, -+ oob_fdm_ptr(nand, i), i); -+ -+ if (rc < 0) { -+ dev_warn(nfc->dev, -+ "Uncorrectable ECC error at page %d.%d\n", -+ page, i); -+ bitflips = -EBADMSG; -+ mtd->ecc_stats.failed++; -+ } else if (bitflips >= 0) { -+ bitflips += rc; -+ mtd->ecc_stats.corrected += rc; -+ } -+ } -+ -+ mt7621_ecc_decoder_op(nfc, false); -+ -+ nfi_write16(nfc, NFI_CON, 0); -+ -+ return bitflips; -+} -+ -+static int mt7621_nfc_read_page_raw(struct nand_chip *nand, uint8_t *buf, -+ int oob_required, int page) -+{ -+ struct mt7621_nfc *nfc = nand_get_controller_data(nand); -+ int i; -+ -+ nand_read_page_op(nand, page, 0, NULL, 0); -+ -+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) | -+ CNFG_READ_MODE); -+ -+ nfi_write16(nfc, NFI_CON, -+ CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S)); -+ -+ for (i = 0; i < nand->ecc.steps; i++) { -+ /* Read data */ -+ if (buf) -+ mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i), -+ nand->ecc.size); -+ else -+ mt7621_nfc_read_data_discard(nfc, nand->ecc.size); -+ -+ /* Read FDM */ -+ mt7621_nfc_read_data(nfc, oob_fdm_ptr(nand, i), NFI_FDM_SIZE); -+ -+ /* Read ECC parity data */ -+ mt7621_nfc_read_data(nfc, oob_ecc_ptr(nfc, i), -+ nfc->spare_per_sector - NFI_FDM_SIZE); -+ } -+ -+ nfi_write16(nfc, NFI_CON, 0); -+ -+ return 0; -+} -+ -+static int mt7621_nfc_read_oob_hwecc(struct nand_chip *nand, int page) -+{ -+ return mt7621_nfc_read_page_hwecc(nand, NULL, 1, page); -+} -+ -+static int mt7621_nfc_read_oob_raw(struct nand_chip *nand, int page) -+{ -+ return mt7621_nfc_read_page_raw(nand, NULL, 1, page); -+} -+ -+static int mt7621_nfc_check_empty_page(struct nand_chip *nand, const u8 *buf) -+{ -+ struct mtd_info *mtd = nand_to_mtd(nand); -+ uint32_t i, j; -+ u8 *oobptr; -+ -+ if (buf) { -+ for (i = 0; i < mtd->writesize; i++) -+ if (buf[i] != 0xff) -+ return 0; -+ } -+ -+ for (i = 0; i < nand->ecc.steps; i++) { -+ oobptr = oob_fdm_ptr(nand, i); -+ for (j = 0; j < NFI_FDM_SIZE; j++) -+ if (oobptr[j] != 0xff) -+ return 0; -+ } -+ -+ return 1; -+} -+ -+static int mt7621_nfc_write_page_hwecc(struct nand_chip *nand, -+ const uint8_t *buf, int oob_required, -+ int page) -+{ -+ struct mt7621_nfc *nfc = nand_get_controller_data(nand); -+ struct mtd_info *mtd = nand_to_mtd(nand); -+ -+ if (mt7621_nfc_check_empty_page(nand, buf)) { -+ /* -+ * MT7621 ECC engine always generates parity code for input -+ * pages, even for empty pages. Doing so will write back ECC -+ * parity code to the oob region, which means such pages will -+ * no longer be empty pages. -+ * -+ * To avoid this, stop write operation if current page is an -+ * empty page. -+ */ -+ return 0; -+ } -+ -+ nand_prog_page_begin_op(nand, page, 0, NULL, 0); -+ -+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) | -+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); -+ -+ mt7621_ecc_encoder_op(nfc, true); -+ -+ mt7621_nfc_write_fdm(nfc); -+ -+ nfi_write16(nfc, NFI_CON, -+ CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S)); -+ -+ if (buf) -+ mt7621_nfc_write_data(nfc, buf, mtd->writesize); -+ else -+ mt7621_nfc_write_data_empty(nfc, mtd->writesize); -+ -+ mt7621_nfc_wait_write_completion(nfc, nand); -+ -+ mt7621_ecc_encoder_op(nfc, false); -+ -+ nfi_write16(nfc, NFI_CON, 0); -+ -+ return nand_prog_page_end_op(nand); -+} -+ -+static int mt7621_nfc_write_page_raw(struct nand_chip *nand, -+ const uint8_t *buf, int oob_required, -+ int page) -+{ -+ struct mt7621_nfc *nfc = nand_get_controller_data(nand); -+ int i; -+ -+ nand_prog_page_begin_op(nand, page, 0, NULL, 0); -+ -+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S)); -+ -+ nfi_write16(nfc, NFI_CON, -+ CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S)); -+ -+ for (i = 0; i < nand->ecc.steps; i++) { -+ /* Write data */ -+ if (buf) -+ mt7621_nfc_write_data(nfc, page_data_ptr(nand, buf, i), -+ nand->ecc.size); -+ else -+ mt7621_nfc_write_data_empty(nfc, nand->ecc.size); -+ -+ /* Write FDM */ -+ mt7621_nfc_write_data(nfc, oob_fdm_ptr(nand, i), -+ NFI_FDM_SIZE); -+ -+ /* Write dummy ECC parity data */ -+ mt7621_nfc_write_data_empty(nfc, nfc->spare_per_sector - -+ NFI_FDM_SIZE); -+ } -+ -+ mt7621_nfc_wait_write_completion(nfc, nand); -+ -+ nfi_write16(nfc, NFI_CON, 0); -+ -+ return nand_prog_page_end_op(nand); -+} -+ -+static int mt7621_nfc_write_oob_hwecc(struct nand_chip *nand, int page) -+{ -+ return mt7621_nfc_write_page_hwecc(nand, NULL, 1, page); -+} -+ -+static int mt7621_nfc_write_oob_raw(struct nand_chip *nand, int page) -+{ -+ return mt7621_nfc_write_page_raw(nand, NULL, 1, page); -+} -+ -+static int mt7621_nfc_init_chip(struct mt7621_nfc *nfc) -+{ -+ struct nand_chip *nand = &nfc->nand; -+ struct mtd_info *mtd; -+ int ret; -+ -+ nand->controller = &nfc->controller; -+ nand_set_controller_data(nand, (void *)nfc); -+ nand_set_flash_node(nand, nfc->dev->of_node); -+ -+ nand->options |= NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE; -+ if (!nfc->nfi_clk) -+ nand->options |= NAND_KEEP_TIMINGS; -+ -+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; -+ nand->ecc.read_page = mt7621_nfc_read_page_hwecc; -+ nand->ecc.read_page_raw = mt7621_nfc_read_page_raw; -+ nand->ecc.write_page = mt7621_nfc_write_page_hwecc; -+ nand->ecc.write_page_raw = mt7621_nfc_write_page_raw; -+ nand->ecc.read_oob = mt7621_nfc_read_oob_hwecc; -+ nand->ecc.read_oob_raw = mt7621_nfc_read_oob_raw; -+ nand->ecc.write_oob = mt7621_nfc_write_oob_hwecc; -+ nand->ecc.write_oob_raw = mt7621_nfc_write_oob_raw; -+ -+ mtd = nand_to_mtd(nand); -+ mtd->owner = THIS_MODULE; -+ mtd->dev.parent = nfc->dev; -+ mtd->name = MT7621_NFC_NAME; -+ mtd_set_ooblayout(mtd, &mt7621_nfc_ooblayout_ops); -+ -+ mt7621_nfc_hw_init(nfc); -+ -+ ret = nand_scan(nand, 1); -+ if (ret) -+ return ret; -+ -+ ret = mtd_device_register(mtd, NULL, 0); -+ if (ret) { -+ dev_err(nfc->dev, "Failed to register MTD: %d\n", ret); -+ nand_cleanup(nand); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int mt7621_nfc_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ struct mt7621_nfc *nfc; -+ struct resource *res; -+ int ret; -+ -+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); -+ if (!nfc) -+ return -ENOMEM; -+ -+ nand_controller_init(&nfc->controller); -+ nfc->controller.ops = &mt7621_nfc_controller_ops; -+ nfc->dev = dev; -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi"); -+ nfc->nfi_regs = devm_ioremap_resource(dev, res); -+ if (IS_ERR(nfc->nfi_regs)) { -+ ret = PTR_ERR(nfc->nfi_regs); -+ return ret; -+ } -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc"); -+ nfc->ecc_regs = devm_ioremap_resource(dev, res); -+ if (IS_ERR(nfc->ecc_regs)) { -+ ret = PTR_ERR(nfc->ecc_regs); -+ return ret; -+ } -+ -+ nfc->nfi_clk = devm_clk_get(dev, "nfi_clk"); -+ if (IS_ERR(nfc->nfi_clk)) { -+ dev_warn(dev, "nfi clk not provided\n"); -+ nfc->nfi_clk = NULL; -+ } else { -+ ret = clk_prepare_enable(nfc->nfi_clk); -+ if (ret) { -+ dev_err(dev, "Failed to enable nfi core clock\n"); -+ return ret; -+ } -+ } -+ -+ platform_set_drvdata(pdev, nfc); -+ -+ ret = mt7621_nfc_init_chip(nfc); -+ if (ret) { -+ dev_err(dev, "Failed to initialize nand chip\n"); -+ goto clk_disable; -+ } -+ -+ return 0; -+ -+clk_disable: -+ clk_disable_unprepare(nfc->nfi_clk); -+ -+ return ret; -+} -+ -+static int mt7621_nfc_remove(struct platform_device *pdev) -+{ -+ struct mt7621_nfc *nfc = platform_get_drvdata(pdev); -+ struct nand_chip *nand = &nfc->nand; -+ struct mtd_info *mtd = nand_to_mtd(nand); -+ -+ mtd_device_unregister(mtd); -+ nand_cleanup(nand); -+ clk_disable_unprepare(nfc->nfi_clk); -+ -+ return 0; -+} -+ -+static const struct of_device_id mt7621_nfc_id_table[] = { -+ { .compatible = "mediatek,mt7621-nfc" }, -+ { }, -+}; -+MODULE_DEVICE_TABLE(of, match); -+ -+static struct platform_driver mt7621_nfc_driver = { -+ .probe = mt7621_nfc_probe, -+ .remove = mt7621_nfc_remove, -+ .driver = { -+ .name = MT7621_NFC_NAME, -+ .owner = THIS_MODULE, -+ .of_match_table = mt7621_nfc_id_table, -+ }, -+}; -+module_platform_driver(mt7621_nfc_driver); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Weijie Gao "); -+MODULE_DESCRIPTION("MediaTek MT7621 NAND Flash Controller driver"); diff --git a/target/linux/sunxi/cortexa7/config-5.10 b/target/linux/sunxi/cortexa7/config-5.10 index c87c5fc0c7..d931782d5f 100644 --- a/target/linux/sunxi/cortexa7/config-5.10 +++ b/target/linux/sunxi/cortexa7/config-5.10 @@ -5,6 +5,7 @@ CONFIG_GRO_CELLS=y # CONFIG_MACH_SUN4I is not set # CONFIG_MACH_SUN5I is not set CONFIG_MDIO_BUS_MUX=y +CONFIG_MICREL_PHY=y CONFIG_MUSB_PIO_ONLY=y CONFIG_NET_DEVLINK=y CONFIG_NET_DSA=y diff --git a/target/linux/x86/base-files/etc/board.d/01_leds b/target/linux/x86/base-files/etc/board.d/01_leds index 42974d8290..74ad2d59fe 100644 --- a/target/linux/x86/base-files/etc/board.d/01_leds +++ b/target/linux/x86/base-files/etc/board.d/01_leds @@ -9,7 +9,7 @@ board_config_update case "$(board_name)" in cisco-mx100-hw) ucidef_set_led_usbport "usb" "USB" "mx100:green:usb" "1-1-port2" - ucidef_set_led_default "diag" "DIAG" "mx100:green:ha" "1" + ucidef_set_led_default "diag" "DIAG" "mx100:green:tricolor" "1" ;; pc-engines-apu1|pc-engines-apu2|pc-engines-apu3) ucidef_set_led_netdev "wan" "WAN" "apu:green:3" "eth0" diff --git a/target/linux/x86/base-files/etc/board.d/02_network b/target/linux/x86/base-files/etc/board.d/02_network index 97f002ba08..e4451461db 100644 --- a/target/linux/x86/base-files/etc/board.d/02_network +++ b/target/linux/x86/base-files/etc/board.d/02_network @@ -24,7 +24,8 @@ sophos-sg-105wr2|sophos-xg-105wr2| \ sophos-sg-115r1|sophos-xg-115r1| \ sophos-sg-115wr1|sophos-xg-115wr1| \ sophos-sg-115r2|sophos-xg-115r2| \ -sophos-sg-115wr2|sophos-xg-115wr2) +sophos-sg-115wr2|sophos-xg-115wr2| \ +sophos-xg-85*|sophos-xg-86*) ucidef_set_interfaces_lan_wan "eth0 eth2 eth3" "eth1" ;; sophos-sg-125r1|sophos-xg-125r1| \ diff --git a/target/linux/x86/base-files/lib/preinit/01_sysinfo b/target/linux/x86/base-files/lib/preinit/01_sysinfo index 97e8e003f3..0714a9a39f 100644 --- a/target/linux/x86/base-files/lib/preinit/01_sysinfo +++ b/target/linux/x86/base-files/lib/preinit/01_sysinfo @@ -38,7 +38,7 @@ do_sysinfo_x86() { local product_version product_version="$(cat /sys/devices/virtual/dmi/id/product_version 2>/dev/null)" case "$product_version" in - 105*|115*|125*|135*) + 105*|115*|125*|135*|85*|86*) product="${product}-${product_version}" break ;; diff --git a/target/linux/x86/modules.mk b/target/linux/x86/modules.mk index 4005ee97ef..0071ebda41 100644 --- a/target/linux/x86/modules.mk +++ b/target/linux/x86/modules.mk @@ -88,7 +88,7 @@ define KernelPackage/meraki-mx100 SUBMENU:=$(OTHER_MENU) TITLE:=Cisco Meraki MX100 Platform Driver DEPENDS:=@TARGET_x86 @!LINUX_5_4 +kmod-tg3 +kmod-gpio-button-hotplug +kmod-leds-gpio \ - +kmod-usb-ledtrig-usbport +kmod-itco-wdt + +kmod-usb-ledtrig-usbport +nu801 +kmod-itco-wdt KCONFIG:=CONFIG_MERAKI_MX100 FILES:=$(LINUX_DIR)/drivers/platform/x86/meraki-mx100.ko AUTOLOAD:=$(call AutoLoad,60,meraki-mx100,1) diff --git a/tools/zlib/Makefile b/tools/zlib/Makefile index 279851f758..c83cc6d55b 100644 --- a/tools/zlib/Makefile +++ b/tools/zlib/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=zlib PKG_VERSION:=1.2.11 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=@SF/libpng http://www.zlib.net diff --git a/tools/zlib/patches/006-fix-compressor-crash-on-certain-inputs.patch b/tools/zlib/patches/006-fix-compressor-crash-on-certain-inputs.patch new file mode 100644 index 0000000000..9f37ba5c58 --- /dev/null +++ b/tools/zlib/patches/006-fix-compressor-crash-on-certain-inputs.patch @@ -0,0 +1,343 @@ +From 5c44459c3b28a9bd3283aaceab7c615f8020c531 Mon Sep 17 00:00:00 2001 +From: Mark Adler +Date: Tue, 17 Apr 2018 22:09:22 -0700 +Subject: [PATCH] Fix a bug that can crash deflate on some input when using + Z_FIXED. + +This bug was reported by Danilo Ramos of Eideticom, Inc. It has +lain in wait 13 years before being found! The bug was introduced +in zlib 1.2.2.2, with the addition of the Z_FIXED option. That +option forces the use of fixed Huffman codes. For rare inputs with +a large number of distant matches, the pending buffer into which +the compressed data is written can overwrite the distance symbol +table which it overlays. That results in corrupted output due to +invalid distances, and can result in out-of-bound accesses, +crashing the application. + +The fix here combines the distance buffer and literal/length +buffers into a single symbol buffer. Now three bytes of pending +buffer space are opened up for each literal or length/distance +pair consumed, instead of the previous two bytes. This assures +that the pending buffer cannot overwrite the symbol table, since +the maximum fixed code compressed length/distance is 31 bits, and +since there are four bytes of pending space for every three bytes +of symbol space. +--- + deflate.c | 74 ++++++++++++++++++++++++++++++++++++++++--------------- + deflate.h | 25 +++++++++---------- + trees.c | 50 +++++++++++-------------------------- + 3 files changed, 79 insertions(+), 70 deletions(-) + +diff --git a/deflate.c b/deflate.c +index 425babc00..19cba873a 100644 +--- a/deflate.c ++++ b/deflate.c +@@ -255,11 +255,6 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + int wrap = 1; + static const char my_version[] = ZLIB_VERSION; + +- ushf *overlay; +- /* We overlay pending_buf and d_buf+l_buf. This works since the average +- * output size for (length,distance) codes is <= 24 bits. +- */ +- + if (version == Z_NULL || version[0] != my_version[0] || + stream_size != sizeof(z_stream)) { + return Z_VERSION_ERROR; +@@ -329,9 +324,47 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + +- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); +- s->pending_buf = (uchf *) overlay; +- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); ++ /* We overlay pending_buf and sym_buf. This works since the average size ++ * for length/distance pairs over any compressed block is assured to be 31 ++ * bits or less. ++ * ++ * Analysis: The longest fixed codes are a length code of 8 bits plus 5 ++ * extra bits, for lengths 131 to 257. The longest fixed distance codes are ++ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest ++ * possible fixed-codes length/distance pair is then 31 bits total. ++ * ++ * sym_buf starts one-fourth of the way into pending_buf. So there are ++ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol ++ * in sym_buf is three bytes -- two for the distance and one for the ++ * literal/length. As each symbol is consumed, the pointer to the next ++ * sym_buf value to read moves forward three bytes. From that symbol, up to ++ * 31 bits are written to pending_buf. The closest the written pending_buf ++ * bits gets to the next sym_buf symbol to read is just before the last ++ * code is written. At that time, 31*(n-2) bits have been written, just ++ * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at ++ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1 ++ * symbols are written.) The closest the writing gets to what is unread is ++ * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and ++ * can range from 128 to 32768. ++ * ++ * Therefore, at a minimum, there are 142 bits of space between what is ++ * written and what is read in the overlain buffers, so the symbols cannot ++ * be overwritten by the compressed data. That space is actually 139 bits, ++ * due to the three-bit fixed-code block header. ++ * ++ * That covers the case where either Z_FIXED is specified, forcing fixed ++ * codes, or when the use of fixed codes is chosen, because that choice ++ * results in a smaller compressed block than dynamic codes. That latter ++ * condition then assures that the above analysis also covers all dynamic ++ * blocks. A dynamic-code block will only be chosen to be emitted if it has ++ * fewer bits than a fixed-code block would for the same set of symbols. ++ * Therefore its average symbol length is assured to be less than 31. So ++ * the compressed data for a dynamic block also cannot overwrite the ++ * symbols from which it is being constructed. ++ */ ++ ++ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4); ++ s->pending_buf_size = (ulg)s->lit_bufsize * 4; + + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || + s->pending_buf == Z_NULL) { +@@ -340,8 +373,12 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + deflateEnd (strm); + return Z_MEM_ERROR; + } +- s->d_buf = overlay + s->lit_bufsize/sizeof(ush); +- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; ++ s->sym_buf = s->pending_buf + s->lit_bufsize; ++ s->sym_end = (s->lit_bufsize - 1) * 3; ++ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K ++ * on 16 bit machines and because stored blocks are restricted to ++ * 64K-1 bytes. ++ */ + + s->level = level; + s->strategy = strategy; +@@ -552,7 +589,7 @@ int ZEXPORT deflatePrime (strm, bits, value) + + if (deflateStateCheck(strm)) return Z_STREAM_ERROR; + s = strm->state; +- if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) ++ if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3)) + return Z_BUF_ERROR; + do { + put = Buf_size - s->bi_valid; +@@ -1113,7 +1150,6 @@ int ZEXPORT deflateCopy (dest, source) + #else + deflate_state *ds; + deflate_state *ss; +- ushf *overlay; + + + if (deflateStateCheck(source) || dest == Z_NULL) { +@@ -1133,8 +1169,7 @@ int ZEXPORT deflateCopy (dest, source) + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); +- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); +- ds->pending_buf = (uchf *) overlay; ++ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4); + + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || + ds->pending_buf == Z_NULL) { +@@ -1148,8 +1183,7 @@ int ZEXPORT deflateCopy (dest, source) + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); + + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); +- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); +- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; ++ ds->sym_buf = ds->pending_buf + ds->lit_bufsize; + + ds->l_desc.dyn_tree = ds->dyn_ltree; + ds->d_desc.dyn_tree = ds->dyn_dtree; +@@ -1925,7 +1959,7 @@ local block_state deflate_fast(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +@@ -2056,7 +2090,7 @@ local block_state deflate_slow(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +@@ -2131,7 +2165,7 @@ local block_state deflate_rle(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +@@ -2170,7 +2204,7 @@ local block_state deflate_huff(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +diff --git a/deflate.h b/deflate.h +index 23ecdd312..d4cf1a98b 100644 +--- a/deflate.h ++++ b/deflate.h +@@ -217,7 +217,7 @@ typedef struct internal_state { + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + +- uchf *l_buf; /* buffer for literals or lengths */ ++ uchf *sym_buf; /* buffer for distances and literals/lengths */ + + uInt lit_bufsize; + /* Size of match buffer for literals/lengths. There are 4 reasons for +@@ -239,13 +239,8 @@ typedef struct internal_state { + * - I can't count above 4 + */ + +- uInt last_lit; /* running index in l_buf */ +- +- ushf *d_buf; +- /* Buffer for distances. To simplify the code, d_buf and l_buf have +- * the same number of elements. To use different lengths, an extra flag +- * array would be necessary. +- */ ++ uInt sym_next; /* running index in sym_buf */ ++ uInt sym_end; /* symbol table full when sym_next reaches this */ + + ulg opt_len; /* bit length of current block with optimal trees */ + ulg static_len; /* bit length of current block with static trees */ +@@ -325,20 +320,22 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, + + # define _tr_tally_lit(s, c, flush) \ + { uch cc = (c); \ +- s->d_buf[s->last_lit] = 0; \ +- s->l_buf[s->last_lit++] = cc; \ ++ s->sym_buf[s->sym_next++] = 0; \ ++ s->sym_buf[s->sym_next++] = 0; \ ++ s->sym_buf[s->sym_next++] = cc; \ + s->dyn_ltree[cc].Freq++; \ +- flush = (s->last_lit == s->lit_bufsize-1); \ ++ flush = (s->sym_next == s->sym_end); \ + } + # define _tr_tally_dist(s, distance, length, flush) \ + { uch len = (uch)(length); \ + ush dist = (ush)(distance); \ +- s->d_buf[s->last_lit] = dist; \ +- s->l_buf[s->last_lit++] = len; \ ++ s->sym_buf[s->sym_next++] = dist; \ ++ s->sym_buf[s->sym_next++] = dist >> 8; \ ++ s->sym_buf[s->sym_next++] = len; \ + dist--; \ + s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ + s->dyn_dtree[d_code(dist)].Freq++; \ +- flush = (s->last_lit == s->lit_bufsize-1); \ ++ flush = (s->sym_next == s->sym_end); \ + } + #else + # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) +diff --git a/trees.c b/trees.c +index 4f4a65011..decaeb7c3 100644 +--- a/trees.c ++++ b/trees.c +@@ -416,7 +416,7 @@ local void init_block(s) + + s->dyn_ltree[END_BLOCK].Freq = 1; + s->opt_len = s->static_len = 0L; +- s->last_lit = s->matches = 0; ++ s->sym_next = s->matches = 0; + } + + #define SMALLEST 1 +@@ -948,7 +948,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) + + Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, +- s->last_lit)); ++ s->sym_next / 3)); + + if (static_lenb <= opt_lenb) opt_lenb = static_lenb; + +@@ -1017,8 +1017,9 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc) + unsigned dist; /* distance of matched string */ + unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ + { +- s->d_buf[s->last_lit] = (ush)dist; +- s->l_buf[s->last_lit++] = (uch)lc; ++ s->sym_buf[s->sym_next++] = dist; ++ s->sym_buf[s->sym_next++] = dist >> 8; ++ s->sym_buf[s->sym_next++] = lc; + if (dist == 0) { + /* lc is the unmatched char */ + s->dyn_ltree[lc].Freq++; +@@ -1033,30 +1034,7 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc) + s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; + s->dyn_dtree[d_code(dist)].Freq++; + } +- +-#ifdef TRUNCATE_BLOCK +- /* Try to guess if it is profitable to stop the current block here */ +- if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { +- /* Compute an upper bound for the compressed length */ +- ulg out_length = (ulg)s->last_lit*8L; +- ulg in_length = (ulg)((long)s->strstart - s->block_start); +- int dcode; +- for (dcode = 0; dcode < D_CODES; dcode++) { +- out_length += (ulg)s->dyn_dtree[dcode].Freq * +- (5L+extra_dbits[dcode]); +- } +- out_length >>= 3; +- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", +- s->last_lit, in_length, out_length, +- 100L - out_length*100L/in_length)); +- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; +- } +-#endif +- return (s->last_lit == s->lit_bufsize-1); +- /* We avoid equality with lit_bufsize because of wraparound at 64K +- * on 16 bit machines and because stored blocks are restricted to +- * 64K-1 bytes. +- */ ++ return (s->sym_next == s->sym_end); + } + + /* =========================================================================== +@@ -1069,13 +1047,14 @@ local void compress_block(s, ltree, dtree) + { + unsigned dist; /* distance of matched string */ + int lc; /* match length or unmatched char (if dist == 0) */ +- unsigned lx = 0; /* running index in l_buf */ ++ unsigned sx = 0; /* running index in sym_buf */ + unsigned code; /* the code to send */ + int extra; /* number of extra bits to send */ + +- if (s->last_lit != 0) do { +- dist = s->d_buf[lx]; +- lc = s->l_buf[lx++]; ++ if (s->sym_next != 0) do { ++ dist = s->sym_buf[sx++] & 0xff; ++ dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8; ++ lc = s->sym_buf[sx++]; + if (dist == 0) { + send_code(s, lc, ltree); /* send a literal byte */ + Tracecv(isgraph(lc), (stderr," '%c' ", lc)); +@@ -1100,11 +1079,10 @@ local void compress_block(s, ltree, dtree) + } + } /* literal or match pair ? */ + +- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ +- Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, +- "pendingBuf overflow"); ++ /* Check that the overlay between pending_buf and sym_buf is ok: */ ++ Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow"); + +- } while (lx < s->last_lit); ++ } while (sx < s->sym_next); + + send_code(s, END_BLOCK, ltree); + }