Android-x86
Fork
Donation

  • R/O
  • HTTP
  • SSH
  • HTTPS

kernel: Commit

kernel


Commit MetaInfo

Revision19d4991aa6c9fc7543d0f5a626f935d83d276176 (tree)
Time2021-02-10 17:57:00
AuthorGreg Kroah-Hartman <gregkh@goog...>
CommiterGreg Kroah-Hartman

Log Message

This is the 4.19.175 stable release
-----BEGIN PGP SIGNATURE-----

iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmAjmCsACgkQONu9yGCS
aT6zjw//T/sN/vtlHmZZUD/iUcbYHUJiM82izt8yu6SsgcjiUkjwXKH373zX0EsN
O7iQIWXMWxXCWc73esvJ1L8uOLmYto3fJq4bb+emsd8R6M/NQuQqSX7fvvlnpLo/
wkTkctk1l+EyVjk67M2izCK35Zz2rVvLHGYXYMj+bJZxxCrnhG06pXcV+0udX53j
Nl0ejBoYLhKZW3wyfkLbORQToFATM1IxSaBqUESQj1Wkie989c4ARcJ1mUuOOW6T
XMhdxazbV5I4kQzcBWmhVbGX5hv28FIeeugu2aW4bxX4up8zcq+S0Z1I5c0Q0MWL
1AI+4HDS+9/USXuQc4kqgqF893Y25hXwk3tck7boRu0OznLC3j9RTXMrDVl08ZOv
3Y9fkyEoRwwbsik5+8Sf3H0OamNaL6qcWYbJczlz50BltaVH2k960itCGlnfnJjT
9FZQMmWzx3JnZbDHD6iCYuVCnPdZn2AUK5ZwF5kjME7CcJVDWGH23uFpTw5MDXMO
1arKj615PPTKzZ+gODM4Q0H2+ek3uclKpNqDNlR4sZgRxcr+bimw7tke3Pv4L+4p
iU5W9o9TBXqoeNwKCO37eoRHySpU/mJ8OslHTrf/YzXH/q9uHNnVo9sM9/SEyqr1
94OrfbKTE8InON0/jgu482i7SHODdNUoD4wDXyZDQ1jb5J/+H3k=
=uSwA
-----END PGP SIGNATURE-----

Merge 4.19.175 into android-4.19-stable

Changes in 4.19.175
USB: serial: cp210x: add pid/vid for WSDA-200-USB
USB: serial: cp210x: add new VID/PID for supporting Teraoka AD2000
USB: serial: option: Adding support for Cinterion MV31
elfcore: fix building with clang
Input: i8042 - unbreak Pegatron C15B
rxrpc: Fix deadlock around release of dst cached on udp tunnel
arm64: dts: ls1046a: fix dcfg address range
net: lapb: Copy the skb before sending a packet
net: mvpp2: TCAM entry enable should be written after SRAM data
memblock: do not start bottom-up allocations with kernel_end
USB: gadget: legacy: fix an error code in eth_bind()
USB: usblp: don't call usb_set_interface if there's a single alt
usb: renesas_usbhs: Clear pipe running flag in usbhs_pkt_pop()
usb: dwc2: Fix endpoint direction check in ep_from_windex
usb: dwc3: fix clock issue during resume in OTG mode
ovl: fix dentry leak in ovl_get_redirect
mac80211: fix station rate table updates on assoc
kretprobe: Avoid re-registration of the same kretprobe earlier
genirq/msi: Activate Multi-MSI early when MSI_FLAG_ACTIVATE_EARLY is set
xhci: fix bounce buffer usage for non-sg list case
cifs: report error instead of invalid when revalidating a dentry fails
smb3: Fix out-of-bounds bug in SMB2_negotiate()
mmc: core: Limit retries when analyse of SDIO tuples fails
nvme-pci: avoid the deepest sleep state on Kingston A2000 SSDs
KVM: SVM: Treat SVM as unsupported when running as an SEV guest
ARM: footbridge: fix dc21285 PCI configuration accessors
mm: hugetlbfs: fix cannot migrate the fallocated HugeTLB page
mm: hugetlb: fix a race between freeing and dissolving the page
mm: hugetlb: fix a race between isolating and freeing page
mm: hugetlb: remove VM_BUG_ON_PAGE from page_huge_active
mm: thp: fix MADV_REMOVE deadlock on shmem THP
x86/build: Disable CET instrumentation in the kernel
x86/apic: Add extra serialization for non-serializing MSRs
Input: xpad - sync supported devices with fork on GitHub
iommu/vt-d: Do not use flush-queue when caching-mode is on
md: Set prev_flush_start and flush_bio in an atomic way
net: ip_tunnel: fix mtu calculation
net: dsa: mv88e6xxx: override existent unicast portvec in port_fdb_add
Linux 4.19.175

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Iac6d5c4ad079946ef1c032791e8e583b9264917b

Change Summary

Incremental Difference

--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
11 # SPDX-License-Identifier: GPL-2.0
22 VERSION = 4
33 PATCHLEVEL = 19
4-SUBLEVEL = 174
4+SUBLEVEL = 175
55 EXTRAVERSION =
66 NAME = "People's Front"
77
@@ -952,12 +952,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
952952 # change __FILE__ to the relative path from the srctree
953953 KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
954954
955-# ensure -fcf-protection is disabled when using retpoline as it is
956-# incompatible with -mindirect-branch=thunk-extern
957-ifdef CONFIG_RETPOLINE
958-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
959-endif
960-
961955 # use the deterministic mode of AR if available
962956 KBUILD_ARFLAGS := $(call ar-option,D)
963957
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
6969 if (addr)
7070 switch (size) {
7171 case 1:
72- asm("ldrb %0, [%1, %2]"
72+ asm volatile("ldrb %0, [%1, %2]"
7373 : "=r" (v) : "r" (addr), "r" (where) : "cc");
7474 break;
7575 case 2:
76- asm("ldrh %0, [%1, %2]"
76+ asm volatile("ldrh %0, [%1, %2]"
7777 : "=r" (v) : "r" (addr), "r" (where) : "cc");
7878 break;
7979 case 4:
80- asm("ldr %0, [%1, %2]"
80+ asm volatile("ldr %0, [%1, %2]"
8181 : "=r" (v) : "r" (addr), "r" (where) : "cc");
8282 break;
8383 }
@@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
103103 if (addr)
104104 switch (size) {
105105 case 1:
106- asm("strb %0, [%1, %2]"
106+ asm volatile("strb %0, [%1, %2]"
107107 : : "r" (value), "r" (addr), "r" (where)
108108 : "cc");
109109 break;
110110 case 2:
111- asm("strh %0, [%1, %2]"
111+ asm volatile("strh %0, [%1, %2]"
112112 : : "r" (value), "r" (addr), "r" (where)
113113 : "cc");
114114 break;
115115 case 4:
116- asm("str %0, [%1, %2]"
116+ asm volatile("str %0, [%1, %2]"
117117 : : "r" (value), "r" (addr), "r" (where)
118118 : "cc");
119119 break;
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -303,7 +303,7 @@
303303
304304 dcfg: dcfg@1ee0000 {
305305 compatible = "fsl,ls1046a-dcfg", "syscon";
306- reg = <0x0 0x1ee0000 0x0 0x10000>;
306+ reg = <0x0 0x1ee0000 0x0 0x1000>;
307307 big-endian;
308308 };
309309
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -132,6 +132,9 @@ else
132132 KBUILD_CFLAGS += -mno-red-zone
133133 KBUILD_CFLAGS += -mcmodel=kernel
134134
135+ # Intel CET isn't enabled in the kernel
136+ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
137+
135138 # -funit-at-a-time shrinks the kernel .text considerably
136139 # unfortunately it makes reading oopses harder.
137140 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -190,16 +190,6 @@ static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { }
190190 #endif /* !CONFIG_X86_LOCAL_APIC */
191191
192192 #ifdef CONFIG_X86_X2APIC
193-/*
194- * Make previous memory operations globally visible before
195- * sending the IPI through x2apic wrmsr. We need a serializing instruction or
196- * mfence for this.
197- */
198-static inline void x2apic_wrmsr_fence(void)
199-{
200- asm volatile("mfence" : : : "memory");
201-}
202-
203193 static inline void native_apic_msr_write(u32 reg, u32 v)
204194 {
205195 if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -85,4 +85,22 @@ do { \
8585
8686 #include <asm-generic/barrier.h>
8787
88+/*
89+ * Make previous memory operations globally visible before
90+ * a WRMSR.
91+ *
92+ * MFENCE makes writes visible, but only affects load/store
93+ * instructions. WRMSR is unfortunately not a load/store
94+ * instruction and is unaffected by MFENCE. The LFENCE ensures
95+ * that the WRMSR is not reordered.
96+ *
97+ * Most WRMSRs are full serializing instructions themselves and
98+ * do not require this barrier. This is only required for the
99+ * IA32_TSC_DEADLINE and X2APIC MSRs.
100+ */
101+static inline void weak_wrmsr_fence(void)
102+{
103+ asm volatile("mfence; lfence" : : : "memory");
104+}
105+
88106 #endif /* _ASM_X86_BARRIER_H */
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -41,6 +41,7 @@
4141 #include <asm/x86_init.h>
4242 #include <asm/pgalloc.h>
4343 #include <linux/atomic.h>
44+#include <asm/barrier.h>
4445 #include <asm/mpspec.h>
4546 #include <asm/i8259.h>
4647 #include <asm/proto.h>
@@ -465,6 +466,9 @@ static int lapic_next_deadline(unsigned long delta,
465466 {
466467 u64 tsc;
467468
469+ /* This MSR is special and need a special fence: */
470+ weak_wrmsr_fence();
471+
468472 tsc = rdtsc();
469473 wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
470474 return 0;
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -31,7 +31,8 @@ static void x2apic_send_IPI(int cpu, int vector)
3131 {
3232 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
3333
34- x2apic_wrmsr_fence();
34+ /* x2apic MSRs are special and need a special fence: */
35+ weak_wrmsr_fence();
3536 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
3637 }
3738
@@ -43,7 +44,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
4344 unsigned long flags;
4445 u32 dest;
4546
46- x2apic_wrmsr_fence();
47+ /* x2apic MSRs are special and need a special fence: */
48+ weak_wrmsr_fence();
4749 local_irq_save(flags);
4850
4951 tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -48,7 +48,8 @@ static void x2apic_send_IPI(int cpu, int vector)
4848 {
4949 u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
5050
51- x2apic_wrmsr_fence();
51+ /* x2apic MSRs are special and need a special fence: */
52+ weak_wrmsr_fence();
5253 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
5354 }
5455
@@ -59,7 +60,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
5960 unsigned long this_cpu;
6061 unsigned long flags;
6162
62- x2apic_wrmsr_fence();
63+ /* x2apic MSRs are special and need a special fence: */
64+ weak_wrmsr_fence();
6365
6466 local_irq_save(flags);
6567
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -892,6 +892,11 @@ static int has_svm(void)
892892 return 0;
893893 }
894894
895+ if (sev_active()) {
896+ pr_info("KVM is unsupported when running as an SEV guest\n");
897+ return 0;
898+ }
899+
895900 return 1;
896901 }
897902
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -229,9 +229,17 @@ static const struct xpad_device {
229229 { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
230230 { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
231231 { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
232- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
232+ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
233+ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
234+ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
233235 { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
234236 { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
237+ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
238+ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
239+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
240+ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
241+ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
242+ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
235243 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
236244 { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
237245 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -310,6 +318,9 @@ static const struct xpad_device {
310318 { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
311319 { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
312320 { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
321+ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
322+ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
323+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
313324 { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
314325 { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
315326 { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -443,8 +454,12 @@ static const struct usb_device_id xpad_table[] = {
443454 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
444455 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
445456 XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
457+ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
458+ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
446459 XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
447460 XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
461+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
462+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
448463 { }
449464 };
450465
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -223,6 +223,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
223223 DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
224224 DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
225225 },
226+ },
227+ {
226228 .matches = {
227229 DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
228230 DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3364,6 +3364,12 @@ static int __init init_dmars(void)
33643364
33653365 if (!ecap_pass_through(iommu->ecap))
33663366 hw_pass_through = 0;
3367+
3368+ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
3369+ pr_info("Disable batched IOTLB flush due to virtualization");
3370+ intel_iommu_strict = 1;
3371+ }
3372+
33673373 #ifdef CONFIG_INTEL_IOMMU_SVM
33683374 if (pasid_enabled(iommu))
33693375 intel_svm_init(iommu);
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -474,8 +474,10 @@ static void md_submit_flush_data(struct work_struct *ws)
474474 * could wait for this and below md_handle_request could wait for those
475475 * bios because of suspend check
476476 */
477+ spin_lock_irq(&mddev->lock);
477478 mddev->last_flush = mddev->start_flush;
478479 mddev->flush_bio = NULL;
480+ spin_unlock_irq(&mddev->lock);
479481 wake_up(&mddev->sb_wait);
480482
481483 if (bio->bi_iter.bi_size == 0) {
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -24,6 +24,8 @@
2424 #include "sdio_cis.h"
2525 #include "sdio_ops.h"
2626
27+#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
28+
2729 static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
2830 const unsigned char *buf, unsigned size)
2931 {
@@ -270,6 +272,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
270272
271273 do {
272274 unsigned char tpl_code, tpl_link;
275+ unsigned long timeout = jiffies +
276+ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
273277
274278 ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
275279 if (ret)
@@ -322,6 +326,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
322326 prev = &this->next;
323327
324328 if (ret == -ENOENT) {
329+ if (time_after(jiffies, timeout))
330+ break;
325331 /* warn about unknown tuples */
326332 pr_warn_ratelimited("%s: queuing unknown"
327333 " CIS tuple 0x%02x (%u bytes)\n",
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1658,7 +1658,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
16581658 if (!entry.portvec)
16591659 entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
16601660 } else {
1661- entry.portvec |= BIT(port);
1661+ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
1662+ entry.portvec = BIT(port);
1663+ else
1664+ entry.portvec |= BIT(port);
1665+
16621666 entry.state = state;
16631667 }
16641668
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
2929 /* Clear entry invalidation bit */
3030 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
3131
32- /* Write tcam index - indirect access */
33- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
34- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
35- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
36-
3732 /* Write sram index - indirect access */
3833 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3934 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
4035 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
4136
37+ /* Write tcam index - indirect access */
38+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
39+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
40+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
41+
4242 return 0;
4343 }
4444
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2733,6 +2733,8 @@ static const struct pci_device_id nvme_id_table[] = {
27332733 { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
27342734 .driver_data = NVME_QUIRK_LIGHTNVM, },
27352735 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2736+ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
2737+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
27362738 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
27372739 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
27382740 { 0, }
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1327,14 +1327,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
13271327 if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
13281328 return -EINVAL;
13291329
1330- alts = usblp->protocol[protocol].alt_setting;
1331- if (alts < 0)
1332- return -EINVAL;
1333- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
1334- if (r < 0) {
1335- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
1336- alts, usblp->ifnum);
1337- return r;
1330+ /* Don't unnecessarily set the interface if there's a single alt. */
1331+ if (usblp->intf->num_altsetting > 1) {
1332+ alts = usblp->protocol[protocol].alt_setting;
1333+ if (alts < 0)
1334+ return -EINVAL;
1335+ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
1336+ if (r < 0) {
1337+ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
1338+ alts, usblp->ifnum);
1339+ return r;
1340+ }
13381341 }
13391342
13401343 usblp->bidir = (usblp->protocol[protocol].epread != NULL);
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1453,7 +1453,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
14531453 static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
14541454 u32 windex)
14551455 {
1456- struct dwc2_hsotg_ep *ep;
14571456 int dir = (windex & USB_DIR_IN) ? 1 : 0;
14581457 int idx = windex & 0x7F;
14591458
@@ -1463,12 +1462,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
14631462 if (idx > hsotg->num_of_eps)
14641463 return NULL;
14651464
1466- ep = index_to_ep(hsotg, idx, dir);
1467-
1468- if (idx && ep->dir_in != dir)
1469- return NULL;
1470-
1471- return ep;
1465+ return index_to_ep(hsotg, idx, dir);
14721466 }
14731467
14741468 /**
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1700,7 +1700,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
17001700 if (PMSG_IS_AUTO(msg))
17011701 break;
17021702
1703- ret = dwc3_core_init(dwc);
1703+ ret = dwc3_core_init_for_resume(dwc);
17041704 if (ret)
17051705 return ret;
17061706
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
403403 struct usb_descriptor_header *usb_desc;
404404
405405 usb_desc = usb_otg_descriptor_alloc(gadget);
406- if (!usb_desc)
406+ if (!usb_desc) {
407+ status = -ENOMEM;
407408 goto fail1;
409+ }
408410 usb_otg_descriptor_init(gadget, usb_desc);
409411 otg_desc[0] = usb_desc;
410412 otg_desc[1] = NULL;
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -670,11 +670,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
670670 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
671671 DMA_FROM_DEVICE);
672672 /* for in tranfers we need to copy the data from bounce to sg */
673- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
674- seg->bounce_len, seg->bounce_offs);
675- if (len != seg->bounce_len)
676- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
677- len, seg->bounce_len);
673+ if (urb->num_sgs) {
674+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
675+ seg->bounce_len, seg->bounce_offs);
676+ if (len != seg->bounce_len)
677+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
678+ len, seg->bounce_len);
679+ } else {
680+ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
681+ seg->bounce_len);
682+ }
678683 seg->bounce_len = 0;
679684 seg->bounce_offs = 0;
680685 }
@@ -3180,12 +3185,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
31803185
31813186 /* create a max max_pkt sized bounce buffer pointed to by last trb */
31823187 if (usb_urb_dir_out(urb)) {
3183- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
3184- seg->bounce_buf, new_buff_len, enqd_len);
3185- if (len != new_buff_len)
3186- xhci_warn(xhci,
3187- "WARN Wrong bounce buffer write length: %zu != %d\n",
3188- len, new_buff_len);
3188+ if (urb->num_sgs) {
3189+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
3190+ seg->bounce_buf, new_buff_len, enqd_len);
3191+ if (len != new_buff_len)
3192+ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
3193+ len, new_buff_len);
3194+ } else {
3195+ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
3196+ }
3197+
31893198 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
31903199 max_pkt, DMA_TO_DEVICE);
31913200 } else {
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
126126 }
127127
128128 usbhs_pipe_clear_without_sequence(pipe, 0, 0);
129+ usbhs_pipe_running(pipe, 0);
129130
130131 __usbhsf_pkt_del(pkt);
131132 }
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
6161 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
6262 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
6363 { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
64+ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
6465 { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
6566 { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
6667 { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
@@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = {
201202 { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
202203 { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
203204 { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
205+ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
204206 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
205207 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
206208 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
425425 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
426426 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
427427 #define CINTERION_PRODUCT_CLS8 0x00b0
428+#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
429+#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
428430
429431 /* Olivetti products */
430432 #define OLIVETTI_VENDOR_ID 0x0b3c
@@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = {
19141916 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
19151917 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
19161918 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
1919+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
1920+ .driver_info = RSVD(3)},
1921+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
1922+ .driver_info = RSVD(0)},
19171923 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
19181924 .driver_info = RSVD(4) },
19191925 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -190,7 +190,7 @@ static int __init afs_init(void)
190190 goto error_cache;
191191 #endif
192192
193- ret = register_pernet_subsys(&afs_net_ops);
193+ ret = register_pernet_device(&afs_net_ops);
194194 if (ret < 0)
195195 goto error_net;
196196
@@ -210,7 +210,7 @@ static int __init afs_init(void)
210210 error_proc:
211211 afs_fs_exit();
212212 error_fs:
213- unregister_pernet_subsys(&afs_net_ops);
213+ unregister_pernet_device(&afs_net_ops);
214214 error_net:
215215 #ifdef CONFIG_AFS_FSCACHE
216216 fscache_unregister_netfs(&afs_cache_netfs);
@@ -241,7 +241,7 @@ static void __exit afs_exit(void)
241241
242242 proc_remove(afs_proc_symlink);
243243 afs_fs_exit();
244- unregister_pernet_subsys(&afs_net_ops);
244+ unregister_pernet_device(&afs_net_ops);
245245 #ifdef CONFIG_AFS_FSCACHE
246246 fscache_unregister_netfs(&afs_cache_netfs);
247247 #endif
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -840,6 +840,7 @@ static int
840840 cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
841841 {
842842 struct inode *inode;
843+ int rc;
843844
844845 if (flags & LOOKUP_RCU)
845846 return -ECHILD;
@@ -849,8 +850,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
849850 if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
850851 CIFS_I(inode)->time = 0; /* force reval */
851852
852- if (cifs_revalidate_dentry(direntry))
853- return 0;
853+ rc = cifs_revalidate_dentry(direntry);
854+ if (rc) {
855+ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
856+ switch (rc) {
857+ case -ENOENT:
858+ case -ESTALE:
859+ /*
860+ * Those errors mean the dentry is invalid
861+ * (file was deleted or recreated)
862+ */
863+ return 0;
864+ default:
865+ /*
866+ * Otherwise some unexpected error happened
867+ * report it as-is to VFS layer
868+ */
869+ return rc;
870+ }
871+ }
854872 else {
855873 /*
856874 * If the inode wasn't known to be a dfs entry when
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -222,7 +222,7 @@ struct smb2_negotiate_req {
222222 __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
223223 __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
224224 __le16 Reserved2;
225- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
225+ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
226226 } __packed;
227227
228228 /* Dialects */
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -654,9 +654,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
654654
655655 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
656656
657+ set_page_huge_active(page);
657658 /*
658659 * unlock_page because locked by add_to_page_cache()
659- * page_put due to reference from alloc_huge_page()
660+ * put_page() due to reference from alloc_huge_page()
660661 */
661662 unlock_page(page);
662663 put_page(page);
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -949,8 +949,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect)
949949
950950 buflen -= thislen;
951951 memcpy(&buf[buflen], name, thislen);
952- tmp = dget_dlock(d->d_parent);
953952 spin_unlock(&d->d_lock);
953+ tmp = dget_parent(d);
954954
955955 dput(d);
956956 d = tmp;
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -58,6 +58,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
5858 }
5959 #endif
6060
61+#if defined(CONFIG_UM) || defined(CONFIG_IA64)
6162 /*
6263 * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
6364 * extra segments containing the gate DSO contents. Dumping its
@@ -72,5 +73,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
7273 extern int
7374 elf_core_write_extra_data(struct coredump_params *cprm);
7475 extern size_t elf_core_extra_data_size(void);
76+#else
77+static inline Elf_Half elf_core_extra_phdrs(void)
78+{
79+ return 0;
80+}
81+
82+static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
83+{
84+ return 1;
85+}
86+
87+static inline int elf_core_write_extra_data(struct coredump_params *cprm)
88+{
89+ return 1;
90+}
91+
92+static inline size_t elf_core_extra_data_size(void)
93+{
94+ return 0;
95+}
96+#endif
7597
7698 #endif /* _LINUX_ELFCORE_H */
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -541,6 +541,9 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr
541541 set_huge_pte_at(mm, addr, ptep, pte);
542542 }
543543 #endif
544+
545+void set_page_huge_active(struct page *page);
546+
544547 #else /* CONFIG_HUGETLB_PAGE */
545548 struct hstate {};
546549 #define alloc_huge_page(v, a, r) NULL
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -118,6 +118,12 @@ struct msi_desc {
118118 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
119119 #define for_each_msi_entry_safe(desc, tmp, dev) \
120120 list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
121+#define for_each_msi_vector(desc, __irq, dev) \
122+ for_each_msi_entry((desc), (dev)) \
123+ if ((desc)->irq) \
124+ for (__irq = (desc)->irq; \
125+ __irq < ((desc)->irq + (desc)->nvec_used); \
126+ __irq++)
121127
122128 #ifdef CONFIG_PCI_MSI
123129 #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -98,7 +98,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
9898 obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
9999 obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
100100 obj-$(CONFIG_LATENCYTOP) += latencytop.o
101-obj-$(CONFIG_ELFCORE) += elfcore.o
102101 obj-$(CONFIG_FUNCTION_TRACER) += trace/
103102 obj-$(CONFIG_TRACING) += trace/
104103 obj-$(CONFIG_TRACE_CLOCK) += trace/
--- a/kernel/elfcore.c
+++ /dev/null
@@ -1,26 +0,0 @@
1-// SPDX-License-Identifier: GPL-2.0
2-#include <linux/elf.h>
3-#include <linux/fs.h>
4-#include <linux/mm.h>
5-#include <linux/binfmts.h>
6-#include <linux/elfcore.h>
7-
8-Elf_Half __weak elf_core_extra_phdrs(void)
9-{
10- return 0;
11-}
12-
13-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
14-{
15- return 1;
16-}
17-
18-int __weak elf_core_write_extra_data(struct coredump_params *cprm)
19-{
20- return 1;
21-}
22-
23-size_t __weak elf_core_extra_data_size(void)
24-{
25- return 0;
26-}
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -437,22 +437,22 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
437437
438438 can_reserve = msi_check_reservation_mode(domain, info, dev);
439439
440- for_each_msi_entry(desc, dev) {
441- virq = desc->irq;
442- if (desc->nvec_used == 1)
443- dev_dbg(dev, "irq %d for MSI\n", virq);
444- else
440+ /*
441+ * This flag is set by the PCI layer as we need to activate
442+ * the MSI entries before the PCI layer enables MSI in the
443+ * card. Otherwise the card latches a random msi message.
444+ */
445+ if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
446+ goto skip_activate;
447+
448+ for_each_msi_vector(desc, i, dev) {
449+ if (desc->irq == i) {
450+ virq = desc->irq;
445451 dev_dbg(dev, "irq [%d-%d] for MSI\n",
446452 virq, virq + desc->nvec_used - 1);
447- /*
448- * This flag is set by the PCI layer as we need to activate
449- * the MSI entries before the PCI layer enables MSI in the
450- * card. Otherwise the card latches a random msi message.
451- */
452- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
453- continue;
453+ }
454454
455- irq_data = irq_domain_get_irq_data(domain, desc->irq);
455+ irq_data = irq_domain_get_irq_data(domain, i);
456456 if (!can_reserve) {
457457 irqd_clr_can_reserve(irq_data);
458458 if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
@@ -463,28 +463,24 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
463463 goto cleanup;
464464 }
465465
466+skip_activate:
466467 /*
467468 * If these interrupts use reservation mode, clear the activated bit
468469 * so request_irq() will assign the final vector.
469470 */
470471 if (can_reserve) {
471- for_each_msi_entry(desc, dev) {
472- irq_data = irq_domain_get_irq_data(domain, desc->irq);
472+ for_each_msi_vector(desc, i, dev) {
473+ irq_data = irq_domain_get_irq_data(domain, i);
473474 irqd_clr_activated(irq_data);
474475 }
475476 }
476477 return 0;
477478
478479 cleanup:
479- for_each_msi_entry(desc, dev) {
480- struct irq_data *irqd;
481-
482- if (desc->irq == virq)
483- break;
484-
485- irqd = irq_domain_get_irq_data(domain, desc->irq);
486- if (irqd_is_activated(irqd))
487- irq_domain_deactivate_irq(irqd);
480+ for_each_msi_vector(desc, i, dev) {
481+ irq_data = irq_domain_get_irq_data(domain, i);
482+ if (irqd_is_activated(irq_data))
483+ irq_domain_deactivate_irq(irq_data);
488484 }
489485 msi_domain_free_irqs(domain, dev);
490486 return ret;
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1945,6 +1945,10 @@ int register_kretprobe(struct kretprobe *rp)
19451945 if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
19461946 return -EINVAL;
19471947
1948+ /* If only rp->kp.addr is specified, check reregistering kprobes */
1949+ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
1950+ return -EINVAL;
1951+
19481952 if (kretprobe_blacklist_size) {
19491953 addr = kprobe_addr(&rp->kp);
19501954 if (IS_ERR(addr))
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2278,7 +2278,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
22782278 spinlock_t *ptl;
22792279 struct mm_struct *mm = vma->vm_mm;
22802280 unsigned long haddr = address & HPAGE_PMD_MASK;
2281- bool was_locked = false;
2281+ bool do_unlock_page = false;
22822282 pmd_t _pmd;
22832283
22842284 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
@@ -2291,7 +2291,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
22912291 VM_BUG_ON(freeze && !page);
22922292 if (page) {
22932293 VM_WARN_ON_ONCE(!PageLocked(page));
2294- was_locked = true;
22952294 if (page != pmd_page(*pmd))
22962295 goto out;
22972296 }
@@ -2300,19 +2299,29 @@ repeat:
23002299 if (pmd_trans_huge(*pmd)) {
23012300 if (!page) {
23022301 page = pmd_page(*pmd);
2303- if (unlikely(!trylock_page(page))) {
2304- get_page(page);
2305- _pmd = *pmd;
2306- spin_unlock(ptl);
2307- lock_page(page);
2308- spin_lock(ptl);
2309- if (unlikely(!pmd_same(*pmd, _pmd))) {
2310- unlock_page(page);
2302+ /*
2303+ * An anonymous page must be locked, to ensure that a
2304+ * concurrent reuse_swap_page() sees stable mapcount;
2305+ * but reuse_swap_page() is not used on shmem or file,
2306+ * and page lock must not be taken when zap_pmd_range()
2307+ * calls __split_huge_pmd() while i_mmap_lock is held.
2308+ */
2309+ if (PageAnon(page)) {
2310+ if (unlikely(!trylock_page(page))) {
2311+ get_page(page);
2312+ _pmd = *pmd;
2313+ spin_unlock(ptl);
2314+ lock_page(page);
2315+ spin_lock(ptl);
2316+ if (unlikely(!pmd_same(*pmd, _pmd))) {
2317+ unlock_page(page);
2318+ put_page(page);
2319+ page = NULL;
2320+ goto repeat;
2321+ }
23112322 put_page(page);
2312- page = NULL;
2313- goto repeat;
23142323 }
2315- put_page(page);
2324+ do_unlock_page = true;
23162325 }
23172326 }
23182327 if (PageMlocked(page))
@@ -2322,7 +2331,7 @@ repeat:
23222331 __split_huge_pmd_locked(vma, pmd, haddr, freeze);
23232332 out:
23242333 spin_unlock(ptl);
2325- if (!was_locked && page)
2334+ if (do_unlock_page)
23262335 unlock_page(page);
23272336 /*
23282337 * No need to double call mmu_notifier->invalidate_range() callback.
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -68,6 +68,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
6868 static int num_fault_mutexes;
6969 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
7070
71+static inline bool PageHugeFreed(struct page *head)
72+{
73+ return page_private(head + 4) == -1UL;
74+}
75+
76+static inline void SetPageHugeFreed(struct page *head)
77+{
78+ set_page_private(head + 4, -1UL);
79+}
80+
81+static inline void ClearPageHugeFreed(struct page *head)
82+{
83+ set_page_private(head + 4, 0);
84+}
85+
7186 /* Forward declaration */
7287 static int hugetlb_acct_memory(struct hstate *h, long delta);
7388
@@ -858,6 +873,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
858873 list_move(&page->lru, &h->hugepage_freelists[nid]);
859874 h->free_huge_pages++;
860875 h->free_huge_pages_node[nid]++;
876+ SetPageHugeFreed(page);
861877 }
862878
863879 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -875,6 +891,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
875891 return NULL;
876892 list_move(&page->lru, &h->hugepage_activelist);
877893 set_page_refcounted(page);
894+ ClearPageHugeFreed(page);
878895 h->free_huge_pages--;
879896 h->free_huge_pages_node[nid]--;
880897 return page;
@@ -1196,12 +1213,11 @@ struct hstate *size_to_hstate(unsigned long size)
11961213 */
11971214 bool page_huge_active(struct page *page)
11981215 {
1199- VM_BUG_ON_PAGE(!PageHuge(page), page);
1200- return PageHead(page) && PagePrivate(&page[1]);
1216+ return PageHeadHuge(page) && PagePrivate(&page[1]);
12011217 }
12021218
12031219 /* never called for tail page */
1204-static void set_page_huge_active(struct page *page)
1220+void set_page_huge_active(struct page *page)
12051221 {
12061222 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
12071223 SetPagePrivate(&page[1]);
@@ -1305,6 +1321,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
13051321 set_hugetlb_cgroup(page, NULL);
13061322 h->nr_huge_pages++;
13071323 h->nr_huge_pages_node[nid]++;
1324+ ClearPageHugeFreed(page);
13081325 spin_unlock(&hugetlb_lock);
13091326 }
13101327
@@ -1500,6 +1517,7 @@ int dissolve_free_huge_page(struct page *page)
15001517 {
15011518 int rc = -EBUSY;
15021519
1520+retry:
15031521 /* Not to disrupt normal path by vainly holding hugetlb_lock */
15041522 if (!PageHuge(page))
15051523 return 0;
@@ -1516,6 +1534,26 @@ int dissolve_free_huge_page(struct page *page)
15161534 int nid = page_to_nid(head);
15171535 if (h->free_huge_pages - h->resv_huge_pages == 0)
15181536 goto out;
1537+
1538+ /*
1539+ * We should make sure that the page is already on the free list
1540+ * when it is dissolved.
1541+ */
1542+ if (unlikely(!PageHugeFreed(head))) {
1543+ spin_unlock(&hugetlb_lock);
1544+ cond_resched();
1545+
1546+ /*
1547+ * Theoretically, we should return -EBUSY when we
1548+ * encounter this race. In fact, we have a chance
1549+ * to successfully dissolve the page if we do a
1550+ * retry. Because the race window is quite small.
1551+ * If we seize this opportunity, it is an optimization
1552+ * for increasing the success rate of dissolving page.
1553+ */
1554+ goto retry;
1555+ }
1556+
15191557 /*
15201558 * Move PageHWPoison flag from head page to the raw error page,
15211559 * which makes any subpages rather than the error page reusable.
@@ -4940,9 +4978,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
49404978 {
49414979 bool ret = true;
49424980
4943- VM_BUG_ON_PAGE(!PageHead(page), page);
49444981 spin_lock(&hugetlb_lock);
4945- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4982+ if (!PageHeadHuge(page) || !page_huge_active(page) ||
4983+ !get_page_unless_zero(page)) {
49464984 ret = false;
49474985 goto unlock;
49484986 }
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -234,14 +234,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
234234 *
235235 * Find @size free area aligned to @align in the specified range and node.
236236 *
237- * When allocation direction is bottom-up, the @start should be greater
238- * than the end of the kernel image. Otherwise, it will be trimmed. The
239- * reason is that we want the bottom-up allocation just near the kernel
240- * image so it is highly likely that the allocated memory and the kernel
241- * will reside in the same node.
242- *
243- * If bottom-up allocation failed, will try to allocate memory top-down.
244- *
245237 * Return:
246238 * Found address on success, 0 on failure.
247239 */
@@ -250,8 +242,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
250242 phys_addr_t end, int nid,
251243 enum memblock_flags flags)
252244 {
253- phys_addr_t kernel_end, ret;
254-
255245 /* pump up @end */
256246 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
257247 end == MEMBLOCK_ALLOC_KASAN)
@@ -260,40 +250,13 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
260250 /* avoid allocating the first page */
261251 start = max_t(phys_addr_t, start, PAGE_SIZE);
262252 end = max(start, end);
263- kernel_end = __pa_symbol(_end);
264-
265- /*
266- * try bottom-up allocation only when bottom-up mode
267- * is set and @end is above the kernel image.
268- */
269- if (memblock_bottom_up() && end > kernel_end) {
270- phys_addr_t bottom_up_start;
271-
272- /* make sure we will allocate above the kernel */
273- bottom_up_start = max(start, kernel_end);
274253
275- /* ok, try bottom-up allocation first */
276- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
277- size, align, nid, flags);
278- if (ret)
279- return ret;
280-
281- /*
282- * we always limit bottom-up allocation above the kernel,
283- * but top-down allocation doesn't have the limit, so
284- * retrying top-down allocation may succeed when bottom-up
285- * allocation failed.
286- *
287- * bottom-up allocation is expected to be fail very rarely,
288- * so we use WARN_ONCE() here to see the stack trace if
289- * fail happens.
290- */
291- WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
292- "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
293- }
294-
295- return __memblock_find_range_top_down(start, end, size, align, nid,
296- flags);
254+ if (memblock_bottom_up())
255+ return __memblock_find_range_bottom_up(start, end, size, align,
256+ nid, flags);
257+ else
258+ return __memblock_find_range_top_down(start, end, size, align,
259+ nid, flags);
297260 }
298261
299262 /**
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -330,7 +330,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
330330 }
331331
332332 dev->needed_headroom = t_hlen + hlen;
333- mtu -= (dev->hard_header_len + t_hlen);
333+ mtu -= t_hlen;
334334
335335 if (mtu < IPV4_MIN_MTU)
336336 mtu = IPV4_MIN_MTU;
@@ -360,7 +360,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
360360 nt = netdev_priv(dev);
361361 t_hlen = nt->hlen + sizeof(struct iphdr);
362362 dev->min_mtu = ETH_MIN_MTU;
363- dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
363+ dev->max_mtu = IP_MAX_MTU - t_hlen;
364364 ip_tunnel_add(itn, nt);
365365 return nt;
366366
@@ -502,12 +502,11 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
502502 const struct iphdr *inner_iph)
503503 {
504504 struct ip_tunnel *tunnel = netdev_priv(dev);
505- int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
505+ int pkt_size = skb->len - tunnel->hlen;
506506 int mtu;
507507
508508 if (df)
509- mtu = dst_mtu(&rt->dst) - dev->hard_header_len
510- - sizeof(struct iphdr) - tunnel->hlen;
509+ mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel->hlen);
511510 else
512511 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
513512
@@ -935,7 +934,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
935934 {
936935 struct ip_tunnel *tunnel = netdev_priv(dev);
937936 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
938- int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
937+ int max_mtu = IP_MAX_MTU - t_hlen;
939938
940939 if (new_mtu < ETH_MIN_MTU)
941940 return -EINVAL;
@@ -1112,10 +1111,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
11121111
11131112 mtu = ip_tunnel_bind_dev(dev);
11141113 if (tb[IFLA_MTU]) {
1115- unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
1114+ unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
11161115
1117- mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
1118- (unsigned int)(max - sizeof(struct iphdr)));
1116+ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
11191117 }
11201118
11211119 err = dev_set_mtu(dev, mtu);
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -87,7 +87,8 @@ void lapb_kick(struct lapb_cb *lapb)
8787 skb = skb_dequeue(&lapb->write_queue);
8888
8989 do {
90- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
90+ skbn = skb_copy(skb, GFP_ATOMIC);
91+ if (!skbn) {
9192 skb_queue_head(&lapb->write_queue, skb);
9293 break;
9394 }
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -128,8 +128,11 @@ int drv_sta_state(struct ieee80211_local *local,
128128 } else if (old_state == IEEE80211_STA_AUTH &&
129129 new_state == IEEE80211_STA_ASSOC) {
130130 ret = drv_sta_add(local, sdata, &sta->sta);
131- if (ret == 0)
131+ if (ret == 0) {
132132 sta->uploaded = true;
133+ if (rcu_access_pointer(sta->sta.rates))
134+ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
135+ }
133136 } else if (old_state == IEEE80211_STA_ASSOC &&
134137 new_state == IEEE80211_STA_AUTH) {
135138 drv_sta_remove(local, sdata, &sta->sta);
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -941,7 +941,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
941941 if (old)
942942 kfree_rcu(old, rcu_head);
943943
944- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
944+ if (sta->uploaded)
945+ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
945946
946947 ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
947948
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -1010,7 +1010,7 @@ static int __init af_rxrpc_init(void)
10101010 goto error_security;
10111011 }
10121012
1013- ret = register_pernet_subsys(&rxrpc_net_ops);
1013+ ret = register_pernet_device(&rxrpc_net_ops);
10141014 if (ret)
10151015 goto error_pernet;
10161016
@@ -1055,7 +1055,7 @@ error_key_type:
10551055 error_sock:
10561056 proto_unregister(&rxrpc_proto);
10571057 error_proto:
1058- unregister_pernet_subsys(&rxrpc_net_ops);
1058+ unregister_pernet_device(&rxrpc_net_ops);
10591059 error_pernet:
10601060 rxrpc_exit_security();
10611061 error_security:
@@ -1077,7 +1077,7 @@ static void __exit af_rxrpc_exit(void)
10771077 unregister_key_type(&key_type_rxrpc);
10781078 sock_unregister(PF_RXRPC);
10791079 proto_unregister(&rxrpc_proto);
1080- unregister_pernet_subsys(&rxrpc_net_ops);
1080+ unregister_pernet_device(&rxrpc_net_ops);
10811081 ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
10821082 ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
10831083
Show on old repository browser