kernel
Revision | 19d4991aa6c9fc7543d0f5a626f935d83d276176 (tree) |
---|---|
Time | 2021-02-10 17:57:00 |
Author | Greg Kroah-Hartman <gregkh@goog...> |
Commiter | Greg Kroah-Hartman |
Merge 4.19.175 into android-4.19-stable
Changes in 4.19.175
USB: serial: cp210x: add pid/vid for WSDA-200-USB
USB: serial: cp210x: add new VID/PID for supporting Teraoka AD2000
USB: serial: option: Adding support for Cinterion MV31
elfcore: fix building with clang
Input: i8042 - unbreak Pegatron C15B
rxrpc: Fix deadlock around release of dst cached on udp tunnel
arm64: dts: ls1046a: fix dcfg address range
net: lapb: Copy the skb before sending a packet
net: mvpp2: TCAM entry enable should be written after SRAM data
memblock: do not start bottom-up allocations with kernel_end
USB: gadget: legacy: fix an error code in eth_bind()
USB: usblp: don't call usb_set_interface if there's a single alt
usb: renesas_usbhs: Clear pipe running flag in usbhs_pkt_pop()
usb: dwc2: Fix endpoint direction check in ep_from_windex
usb: dwc3: fix clock issue during resume in OTG mode
ovl: fix dentry leak in ovl_get_redirect
mac80211: fix station rate table updates on assoc
kretprobe: Avoid re-registration of the same kretprobe earlier
genirq/msi: Activate Multi-MSI early when MSI_FLAG_ACTIVATE_EARLY is set
xhci: fix bounce buffer usage for non-sg list case
cifs: report error instead of invalid when revalidating a dentry fails
smb3: Fix out-of-bounds bug in SMB2_negotiate()
mmc: core: Limit retries when analyse of SDIO tuples fails
nvme-pci: avoid the deepest sleep state on Kingston A2000 SSDs
KVM: SVM: Treat SVM as unsupported when running as an SEV guest
ARM: footbridge: fix dc21285 PCI configuration accessors
mm: hugetlbfs: fix cannot migrate the fallocated HugeTLB page
mm: hugetlb: fix a race between freeing and dissolving the page
mm: hugetlb: fix a race between isolating and freeing page
mm: hugetlb: remove VM_BUG_ON_PAGE from page_huge_active
mm: thp: fix MADV_REMOVE deadlock on shmem THP
x86/build: Disable CET instrumentation in the kernel
x86/apic: Add extra serialization for non-serializing MSRs
Input: xpad - sync supported devices with fork on GitHub
iommu/vt-d: Do not use flush-queue when caching-mode is on
md: Set prev_flush_start and flush_bio in an atomic way
net: ip_tunnel: fix mtu calculation
net: dsa: mv88e6xxx: override existent unicast portvec in port_fdb_add
Linux 4.19.175
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Iac6d5c4ad079946ef1c032791e8e583b9264917b
@@ -1,7 +1,7 @@ | ||
1 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | 2 | VERSION = 4 |
3 | 3 | PATCHLEVEL = 19 |
4 | -SUBLEVEL = 174 | |
4 | +SUBLEVEL = 175 | |
5 | 5 | EXTRAVERSION = |
6 | 6 | NAME = "People's Front" |
7 | 7 |
@@ -952,12 +952,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) | ||
952 | 952 | # change __FILE__ to the relative path from the srctree |
953 | 953 | KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) |
954 | 954 | |
955 | -# ensure -fcf-protection is disabled when using retpoline as it is | |
956 | -# incompatible with -mindirect-branch=thunk-extern | |
957 | -ifdef CONFIG_RETPOLINE | |
958 | -KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) | |
959 | -endif | |
960 | - | |
961 | 955 | # use the deterministic mode of AR if available |
962 | 956 | KBUILD_ARFLAGS := $(call ar-option,D) |
963 | 957 |
@@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
69 | 69 | if (addr) |
70 | 70 | switch (size) { |
71 | 71 | case 1: |
72 | - asm("ldrb %0, [%1, %2]" | |
72 | + asm volatile("ldrb %0, [%1, %2]" | |
73 | 73 | : "=r" (v) : "r" (addr), "r" (where) : "cc"); |
74 | 74 | break; |
75 | 75 | case 2: |
76 | - asm("ldrh %0, [%1, %2]" | |
76 | + asm volatile("ldrh %0, [%1, %2]" | |
77 | 77 | : "=r" (v) : "r" (addr), "r" (where) : "cc"); |
78 | 78 | break; |
79 | 79 | case 4: |
80 | - asm("ldr %0, [%1, %2]" | |
80 | + asm volatile("ldr %0, [%1, %2]" | |
81 | 81 | : "=r" (v) : "r" (addr), "r" (where) : "cc"); |
82 | 82 | break; |
83 | 83 | } |
@@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, | ||
103 | 103 | if (addr) |
104 | 104 | switch (size) { |
105 | 105 | case 1: |
106 | - asm("strb %0, [%1, %2]" | |
106 | + asm volatile("strb %0, [%1, %2]" | |
107 | 107 | : : "r" (value), "r" (addr), "r" (where) |
108 | 108 | : "cc"); |
109 | 109 | break; |
110 | 110 | case 2: |
111 | - asm("strh %0, [%1, %2]" | |
111 | + asm volatile("strh %0, [%1, %2]" | |
112 | 112 | : : "r" (value), "r" (addr), "r" (where) |
113 | 113 | : "cc"); |
114 | 114 | break; |
115 | 115 | case 4: |
116 | - asm("str %0, [%1, %2]" | |
116 | + asm volatile("str %0, [%1, %2]" | |
117 | 117 | : : "r" (value), "r" (addr), "r" (where) |
118 | 118 | : "cc"); |
119 | 119 | break; |
@@ -303,7 +303,7 @@ | ||
303 | 303 | |
304 | 304 | dcfg: dcfg@1ee0000 { |
305 | 305 | compatible = "fsl,ls1046a-dcfg", "syscon"; |
306 | - reg = <0x0 0x1ee0000 0x0 0x10000>; | |
306 | + reg = <0x0 0x1ee0000 0x0 0x1000>; | |
307 | 307 | big-endian; |
308 | 308 | }; |
309 | 309 |
@@ -132,6 +132,9 @@ else | ||
132 | 132 | KBUILD_CFLAGS += -mno-red-zone |
133 | 133 | KBUILD_CFLAGS += -mcmodel=kernel |
134 | 134 | |
135 | + # Intel CET isn't enabled in the kernel | |
136 | + KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) | |
137 | + | |
135 | 138 | # -funit-at-a-time shrinks the kernel .text considerably |
136 | 139 | # unfortunately it makes reading oopses harder. |
137 | 140 | KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) |
@@ -190,16 +190,6 @@ static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { } | ||
190 | 190 | #endif /* !CONFIG_X86_LOCAL_APIC */ |
191 | 191 | |
192 | 192 | #ifdef CONFIG_X86_X2APIC |
193 | -/* | |
194 | - * Make previous memory operations globally visible before | |
195 | - * sending the IPI through x2apic wrmsr. We need a serializing instruction or | |
196 | - * mfence for this. | |
197 | - */ | |
198 | -static inline void x2apic_wrmsr_fence(void) | |
199 | -{ | |
200 | - asm volatile("mfence" : : : "memory"); | |
201 | -} | |
202 | - | |
203 | 193 | static inline void native_apic_msr_write(u32 reg, u32 v) |
204 | 194 | { |
205 | 195 | if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || |
@@ -85,4 +85,22 @@ do { \ | ||
85 | 85 | |
86 | 86 | #include <asm-generic/barrier.h> |
87 | 87 | |
88 | +/* | |
89 | + * Make previous memory operations globally visible before | |
90 | + * a WRMSR. | |
91 | + * | |
92 | + * MFENCE makes writes visible, but only affects load/store | |
93 | + * instructions. WRMSR is unfortunately not a load/store | |
94 | + * instruction and is unaffected by MFENCE. The LFENCE ensures | |
95 | + * that the WRMSR is not reordered. | |
96 | + * | |
97 | + * Most WRMSRs are full serializing instructions themselves and | |
98 | + * do not require this barrier. This is only required for the | |
99 | + * IA32_TSC_DEADLINE and X2APIC MSRs. | |
100 | + */ | |
101 | +static inline void weak_wrmsr_fence(void) | |
102 | +{ | |
103 | + asm volatile("mfence; lfence" : : : "memory"); | |
104 | +} | |
105 | + | |
88 | 106 | #endif /* _ASM_X86_BARRIER_H */ |
@@ -41,6 +41,7 @@ | ||
41 | 41 | #include <asm/x86_init.h> |
42 | 42 | #include <asm/pgalloc.h> |
43 | 43 | #include <linux/atomic.h> |
44 | +#include <asm/barrier.h> | |
44 | 45 | #include <asm/mpspec.h> |
45 | 46 | #include <asm/i8259.h> |
46 | 47 | #include <asm/proto.h> |
@@ -465,6 +466,9 @@ static int lapic_next_deadline(unsigned long delta, | ||
465 | 466 | { |
466 | 467 | u64 tsc; |
467 | 468 | |
469 | + /* This MSR is special and need a special fence: */ | |
470 | + weak_wrmsr_fence(); | |
471 | + | |
468 | 472 | tsc = rdtsc(); |
469 | 473 | wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); |
470 | 474 | return 0; |
@@ -31,7 +31,8 @@ static void x2apic_send_IPI(int cpu, int vector) | ||
31 | 31 | { |
32 | 32 | u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); |
33 | 33 | |
34 | - x2apic_wrmsr_fence(); | |
34 | + /* x2apic MSRs are special and need a special fence: */ | |
35 | + weak_wrmsr_fence(); | |
35 | 36 | __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); |
36 | 37 | } |
37 | 38 |
@@ -43,7 +44,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) | ||
43 | 44 | unsigned long flags; |
44 | 45 | u32 dest; |
45 | 46 | |
46 | - x2apic_wrmsr_fence(); | |
47 | + /* x2apic MSRs are special and need a special fence: */ | |
48 | + weak_wrmsr_fence(); | |
47 | 49 | local_irq_save(flags); |
48 | 50 | |
49 | 51 | tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask); |
@@ -48,7 +48,8 @@ static void x2apic_send_IPI(int cpu, int vector) | ||
48 | 48 | { |
49 | 49 | u32 dest = per_cpu(x86_cpu_to_apicid, cpu); |
50 | 50 | |
51 | - x2apic_wrmsr_fence(); | |
51 | + /* x2apic MSRs are special and need a special fence: */ | |
52 | + weak_wrmsr_fence(); | |
52 | 53 | __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); |
53 | 54 | } |
54 | 55 |
@@ -59,7 +60,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) | ||
59 | 60 | unsigned long this_cpu; |
60 | 61 | unsigned long flags; |
61 | 62 | |
62 | - x2apic_wrmsr_fence(); | |
63 | + /* x2apic MSRs are special and need a special fence: */ | |
64 | + weak_wrmsr_fence(); | |
63 | 65 | |
64 | 66 | local_irq_save(flags); |
65 | 67 |
@@ -892,6 +892,11 @@ static int has_svm(void) | ||
892 | 892 | return 0; |
893 | 893 | } |
894 | 894 | |
895 | + if (sev_active()) { | |
896 | + pr_info("KVM is unsupported when running as an SEV guest\n"); | |
897 | + return 0; | |
898 | + } | |
899 | + | |
895 | 900 | return 1; |
896 | 901 | } |
897 | 902 |
@@ -229,9 +229,17 @@ static const struct xpad_device { | ||
229 | 229 | { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, |
230 | 230 | { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, |
231 | 231 | { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, |
232 | - { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, | |
232 | + { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, | |
233 | + { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, | |
234 | + { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, | |
233 | 235 | { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, |
234 | 236 | { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, |
237 | + { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, | |
238 | + { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, | |
239 | + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, | |
240 | + { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, | |
241 | + { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, | |
242 | + { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, | |
235 | 243 | { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
236 | 244 | { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, |
237 | 245 | { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
@@ -310,6 +318,9 @@ static const struct xpad_device { | ||
310 | 318 | { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, |
311 | 319 | { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, |
312 | 320 | { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, |
321 | + { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, | |
322 | + { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, | |
323 | + { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, | |
313 | 324 | { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, |
314 | 325 | { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, |
315 | 326 | { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, |
@@ -443,8 +454,12 @@ static const struct usb_device_id xpad_table[] = { | ||
443 | 454 | XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ |
444 | 455 | XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ |
445 | 456 | XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ |
457 | + XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ | |
458 | + XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ | |
446 | 459 | XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ |
447 | 460 | XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ |
461 | + XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ | |
462 | + XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ | |
448 | 463 | { } |
449 | 464 | }; |
450 | 465 |
@@ -223,6 +223,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
223 | 223 | DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), |
224 | 224 | DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), |
225 | 225 | }, |
226 | + }, | |
227 | + { | |
226 | 228 | .matches = { |
227 | 229 | DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), |
228 | 230 | DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), |
@@ -3364,6 +3364,12 @@ static int __init init_dmars(void) | ||
3364 | 3364 | |
3365 | 3365 | if (!ecap_pass_through(iommu->ecap)) |
3366 | 3366 | hw_pass_through = 0; |
3367 | + | |
3368 | + if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { | |
3369 | + pr_info("Disable batched IOTLB flush due to virtualization"); | |
3370 | + intel_iommu_strict = 1; | |
3371 | + } | |
3372 | + | |
3367 | 3373 | #ifdef CONFIG_INTEL_IOMMU_SVM |
3368 | 3374 | if (pasid_enabled(iommu)) |
3369 | 3375 | intel_svm_init(iommu); |
@@ -474,8 +474,10 @@ static void md_submit_flush_data(struct work_struct *ws) | ||
474 | 474 | * could wait for this and below md_handle_request could wait for those |
475 | 475 | * bios because of suspend check |
476 | 476 | */ |
477 | + spin_lock_irq(&mddev->lock); | |
477 | 478 | mddev->last_flush = mddev->start_flush; |
478 | 479 | mddev->flush_bio = NULL; |
480 | + spin_unlock_irq(&mddev->lock); | |
479 | 481 | wake_up(&mddev->sb_wait); |
480 | 482 | |
481 | 483 | if (bio->bi_iter.bi_size == 0) { |
@@ -24,6 +24,8 @@ | ||
24 | 24 | #include "sdio_cis.h" |
25 | 25 | #include "sdio_ops.h" |
26 | 26 | |
27 | +#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ | |
28 | + | |
27 | 29 | static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, |
28 | 30 | const unsigned char *buf, unsigned size) |
29 | 31 | { |
@@ -270,6 +272,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) | ||
270 | 272 | |
271 | 273 | do { |
272 | 274 | unsigned char tpl_code, tpl_link; |
275 | + unsigned long timeout = jiffies + | |
276 | + msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); | |
273 | 277 | |
274 | 278 | ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); |
275 | 279 | if (ret) |
@@ -322,6 +326,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) | ||
322 | 326 | prev = &this->next; |
323 | 327 | |
324 | 328 | if (ret == -ENOENT) { |
329 | + if (time_after(jiffies, timeout)) | |
330 | + break; | |
325 | 331 | /* warn about unknown tuples */ |
326 | 332 | pr_warn_ratelimited("%s: queuing unknown" |
327 | 333 | " CIS tuple 0x%02x (%u bytes)\n", |
@@ -1658,7 +1658,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, | ||
1658 | 1658 | if (!entry.portvec) |
1659 | 1659 | entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED; |
1660 | 1660 | } else { |
1661 | - entry.portvec |= BIT(port); | |
1661 | + if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) | |
1662 | + entry.portvec = BIT(port); | |
1663 | + else | |
1664 | + entry.portvec |= BIT(port); | |
1665 | + | |
1662 | 1666 | entry.state = state; |
1663 | 1667 | } |
1664 | 1668 |
@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) | ||
29 | 29 | /* Clear entry invalidation bit */ |
30 | 30 | pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; |
31 | 31 | |
32 | - /* Write tcam index - indirect access */ | |
33 | - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); | |
34 | - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) | |
35 | - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); | |
36 | - | |
37 | 32 | /* Write sram index - indirect access */ |
38 | 33 | mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); |
39 | 34 | for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) |
40 | 35 | mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); |
41 | 36 | |
37 | + /* Write tcam index - indirect access */ | |
38 | + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); | |
39 | + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) | |
40 | + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); | |
41 | + | |
42 | 42 | return 0; |
43 | 43 | } |
44 | 44 |
@@ -2733,6 +2733,8 @@ static const struct pci_device_id nvme_id_table[] = { | ||
2733 | 2733 | { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ |
2734 | 2734 | .driver_data = NVME_QUIRK_LIGHTNVM, }, |
2735 | 2735 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
2736 | + { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ | |
2737 | + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, | |
2736 | 2738 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, |
2737 | 2739 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, |
2738 | 2740 | { 0, } |
@@ -1327,14 +1327,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) | ||
1327 | 1327 | if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) |
1328 | 1328 | return -EINVAL; |
1329 | 1329 | |
1330 | - alts = usblp->protocol[protocol].alt_setting; | |
1331 | - if (alts < 0) | |
1332 | - return -EINVAL; | |
1333 | - r = usb_set_interface(usblp->dev, usblp->ifnum, alts); | |
1334 | - if (r < 0) { | |
1335 | - printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", | |
1336 | - alts, usblp->ifnum); | |
1337 | - return r; | |
1330 | + /* Don't unnecessarily set the interface if there's a single alt. */ | |
1331 | + if (usblp->intf->num_altsetting > 1) { | |
1332 | + alts = usblp->protocol[protocol].alt_setting; | |
1333 | + if (alts < 0) | |
1334 | + return -EINVAL; | |
1335 | + r = usb_set_interface(usblp->dev, usblp->ifnum, alts); | |
1336 | + if (r < 0) { | |
1337 | + printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", | |
1338 | + alts, usblp->ifnum); | |
1339 | + return r; | |
1340 | + } | |
1338 | 1341 | } |
1339 | 1342 | |
1340 | 1343 | usblp->bidir = (usblp->protocol[protocol].epread != NULL); |
@@ -1453,7 +1453,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, | ||
1453 | 1453 | static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, |
1454 | 1454 | u32 windex) |
1455 | 1455 | { |
1456 | - struct dwc2_hsotg_ep *ep; | |
1457 | 1456 | int dir = (windex & USB_DIR_IN) ? 1 : 0; |
1458 | 1457 | int idx = windex & 0x7F; |
1459 | 1458 |
@@ -1463,12 +1462,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, | ||
1463 | 1462 | if (idx > hsotg->num_of_eps) |
1464 | 1463 | return NULL; |
1465 | 1464 | |
1466 | - ep = index_to_ep(hsotg, idx, dir); | |
1467 | - | |
1468 | - if (idx && ep->dir_in != dir) | |
1469 | - return NULL; | |
1470 | - | |
1471 | - return ep; | |
1465 | + return index_to_ep(hsotg, idx, dir); | |
1472 | 1466 | } |
1473 | 1467 | |
1474 | 1468 | /** |
@@ -1700,7 +1700,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) | ||
1700 | 1700 | if (PMSG_IS_AUTO(msg)) |
1701 | 1701 | break; |
1702 | 1702 | |
1703 | - ret = dwc3_core_init(dwc); | |
1703 | + ret = dwc3_core_init_for_resume(dwc); | |
1704 | 1704 | if (ret) |
1705 | 1705 | return ret; |
1706 | 1706 |
@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev) | ||
403 | 403 | struct usb_descriptor_header *usb_desc; |
404 | 404 | |
405 | 405 | usb_desc = usb_otg_descriptor_alloc(gadget); |
406 | - if (!usb_desc) | |
406 | + if (!usb_desc) { | |
407 | + status = -ENOMEM; | |
407 | 408 | goto fail1; |
409 | + } | |
408 | 410 | usb_otg_descriptor_init(gadget, usb_desc); |
409 | 411 | otg_desc[0] = usb_desc; |
410 | 412 | otg_desc[1] = NULL; |
@@ -670,11 +670,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, | ||
670 | 670 | dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, |
671 | 671 | DMA_FROM_DEVICE); |
672 | 672 | /* for in tranfers we need to copy the data from bounce to sg */ |
673 | - len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, | |
674 | - seg->bounce_len, seg->bounce_offs); | |
675 | - if (len != seg->bounce_len) | |
676 | - xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", | |
677 | - len, seg->bounce_len); | |
673 | + if (urb->num_sgs) { | |
674 | + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, | |
675 | + seg->bounce_len, seg->bounce_offs); | |
676 | + if (len != seg->bounce_len) | |
677 | + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", | |
678 | + len, seg->bounce_len); | |
679 | + } else { | |
680 | + memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, | |
681 | + seg->bounce_len); | |
682 | + } | |
678 | 683 | seg->bounce_len = 0; |
679 | 684 | seg->bounce_offs = 0; |
680 | 685 | } |
@@ -3180,12 +3185,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, | ||
3180 | 3185 | |
3181 | 3186 | /* create a max max_pkt sized bounce buffer pointed to by last trb */ |
3182 | 3187 | if (usb_urb_dir_out(urb)) { |
3183 | - len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, | |
3184 | - seg->bounce_buf, new_buff_len, enqd_len); | |
3185 | - if (len != new_buff_len) | |
3186 | - xhci_warn(xhci, | |
3187 | - "WARN Wrong bounce buffer write length: %zu != %d\n", | |
3188 | - len, new_buff_len); | |
3188 | + if (urb->num_sgs) { | |
3189 | + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, | |
3190 | + seg->bounce_buf, new_buff_len, enqd_len); | |
3191 | + if (len != new_buff_len) | |
3192 | + xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", | |
3193 | + len, new_buff_len); | |
3194 | + } else { | |
3195 | + memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); | |
3196 | + } | |
3197 | + | |
3189 | 3198 | seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, |
3190 | 3199 | max_pkt, DMA_TO_DEVICE); |
3191 | 3200 | } else { |
@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) | ||
126 | 126 | } |
127 | 127 | |
128 | 128 | usbhs_pipe_clear_without_sequence(pipe, 0, 0); |
129 | + usbhs_pipe_running(pipe, 0); | |
129 | 130 | |
130 | 131 | __usbhsf_pkt_del(pkt); |
131 | 132 | } |
@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = { | ||
61 | 61 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ |
62 | 62 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ |
63 | 63 | { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ |
64 | + { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ | |
64 | 65 | { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ |
65 | 66 | { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ |
66 | 67 | { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ |
@@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = { | ||
201 | 202 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ |
202 | 203 | { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ |
203 | 204 | { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ |
205 | + { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ | |
204 | 206 | { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ |
205 | 207 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
206 | 208 | { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb); | ||
425 | 425 | #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 |
426 | 426 | #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 |
427 | 427 | #define CINTERION_PRODUCT_CLS8 0x00b0 |
428 | +#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 | |
429 | +#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 | |
428 | 430 | |
429 | 431 | /* Olivetti products */ |
430 | 432 | #define OLIVETTI_VENDOR_ID 0x0b3c |
@@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = { | ||
1914 | 1916 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, |
1915 | 1917 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ |
1916 | 1918 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, |
1919 | + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), | |
1920 | + .driver_info = RSVD(3)}, | |
1921 | + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), | |
1922 | + .driver_info = RSVD(0)}, | |
1917 | 1923 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), |
1918 | 1924 | .driver_info = RSVD(4) }, |
1919 | 1925 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), |
@@ -190,7 +190,7 @@ static int __init afs_init(void) | ||
190 | 190 | goto error_cache; |
191 | 191 | #endif |
192 | 192 | |
193 | - ret = register_pernet_subsys(&afs_net_ops); | |
193 | + ret = register_pernet_device(&afs_net_ops); | |
194 | 194 | if (ret < 0) |
195 | 195 | goto error_net; |
196 | 196 |
@@ -210,7 +210,7 @@ static int __init afs_init(void) | ||
210 | 210 | error_proc: |
211 | 211 | afs_fs_exit(); |
212 | 212 | error_fs: |
213 | - unregister_pernet_subsys(&afs_net_ops); | |
213 | + unregister_pernet_device(&afs_net_ops); | |
214 | 214 | error_net: |
215 | 215 | #ifdef CONFIG_AFS_FSCACHE |
216 | 216 | fscache_unregister_netfs(&afs_cache_netfs); |
@@ -241,7 +241,7 @@ static void __exit afs_exit(void) | ||
241 | 241 | |
242 | 242 | proc_remove(afs_proc_symlink); |
243 | 243 | afs_fs_exit(); |
244 | - unregister_pernet_subsys(&afs_net_ops); | |
244 | + unregister_pernet_device(&afs_net_ops); | |
245 | 245 | #ifdef CONFIG_AFS_FSCACHE |
246 | 246 | fscache_unregister_netfs(&afs_cache_netfs); |
247 | 247 | #endif |
@@ -840,6 +840,7 @@ static int | ||
840 | 840 | cifs_d_revalidate(struct dentry *direntry, unsigned int flags) |
841 | 841 | { |
842 | 842 | struct inode *inode; |
843 | + int rc; | |
843 | 844 | |
844 | 845 | if (flags & LOOKUP_RCU) |
845 | 846 | return -ECHILD; |
@@ -849,8 +850,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) | ||
849 | 850 | if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) |
850 | 851 | CIFS_I(inode)->time = 0; /* force reval */ |
851 | 852 | |
852 | - if (cifs_revalidate_dentry(direntry)) | |
853 | - return 0; | |
853 | + rc = cifs_revalidate_dentry(direntry); | |
854 | + if (rc) { | |
855 | + cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); | |
856 | + switch (rc) { | |
857 | + case -ENOENT: | |
858 | + case -ESTALE: | |
859 | + /* | |
860 | + * Those errors mean the dentry is invalid | |
861 | + * (file was deleted or recreated) | |
862 | + */ | |
863 | + return 0; | |
864 | + default: | |
865 | + /* | |
866 | + * Otherwise some unexpected error happened | |
867 | + * report it as-is to VFS layer | |
868 | + */ | |
869 | + return rc; | |
870 | + } | |
871 | + } | |
854 | 872 | else { |
855 | 873 | /* |
856 | 874 | * If the inode wasn't known to be a dfs entry when |
@@ -222,7 +222,7 @@ struct smb2_negotiate_req { | ||
222 | 222 | __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */ |
223 | 223 | __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */ |
224 | 224 | __le16 Reserved2; |
225 | - __le16 Dialects[1]; /* One dialect (vers=) at a time for now */ | |
225 | + __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */ | |
226 | 226 | } __packed; |
227 | 227 | |
228 | 228 | /* Dialects */ |
@@ -654,9 +654,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | ||
654 | 654 | |
655 | 655 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
656 | 656 | |
657 | + set_page_huge_active(page); | |
657 | 658 | /* |
658 | 659 | * unlock_page because locked by add_to_page_cache() |
659 | - * page_put due to reference from alloc_huge_page() | |
660 | + * put_page() due to reference from alloc_huge_page() | |
660 | 661 | */ |
661 | 662 | unlock_page(page); |
662 | 663 | put_page(page); |
@@ -949,8 +949,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect) | ||
949 | 949 | |
950 | 950 | buflen -= thislen; |
951 | 951 | memcpy(&buf[buflen], name, thislen); |
952 | - tmp = dget_dlock(d->d_parent); | |
953 | 952 | spin_unlock(&d->d_lock); |
953 | + tmp = dget_parent(d); | |
954 | 954 | |
955 | 955 | dput(d); |
956 | 956 | d = tmp; |
@@ -58,6 +58,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse | ||
58 | 58 | } |
59 | 59 | #endif |
60 | 60 | |
61 | +#if defined(CONFIG_UM) || defined(CONFIG_IA64) | |
61 | 62 | /* |
62 | 63 | * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out |
63 | 64 | * extra segments containing the gate DSO contents. Dumping its |
@@ -72,5 +73,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); | ||
72 | 73 | extern int |
73 | 74 | elf_core_write_extra_data(struct coredump_params *cprm); |
74 | 75 | extern size_t elf_core_extra_data_size(void); |
76 | +#else | |
77 | +static inline Elf_Half elf_core_extra_phdrs(void) | |
78 | +{ | |
79 | + return 0; | |
80 | +} | |
81 | + | |
82 | +static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) | |
83 | +{ | |
84 | + return 1; | |
85 | +} | |
86 | + | |
87 | +static inline int elf_core_write_extra_data(struct coredump_params *cprm) | |
88 | +{ | |
89 | + return 1; | |
90 | +} | |
91 | + | |
92 | +static inline size_t elf_core_extra_data_size(void) | |
93 | +{ | |
94 | + return 0; | |
95 | +} | |
96 | +#endif | |
75 | 97 | |
76 | 98 | #endif /* _LINUX_ELFCORE_H */ |
@@ -541,6 +541,9 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr | ||
541 | 541 | set_huge_pte_at(mm, addr, ptep, pte); |
542 | 542 | } |
543 | 543 | #endif |
544 | + | |
545 | +void set_page_huge_active(struct page *page); | |
546 | + | |
544 | 547 | #else /* CONFIG_HUGETLB_PAGE */ |
545 | 548 | struct hstate {}; |
546 | 549 | #define alloc_huge_page(v, a, r) NULL |
@@ -118,6 +118,12 @@ struct msi_desc { | ||
118 | 118 | list_for_each_entry((desc), dev_to_msi_list((dev)), list) |
119 | 119 | #define for_each_msi_entry_safe(desc, tmp, dev) \ |
120 | 120 | list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) |
121 | +#define for_each_msi_vector(desc, __irq, dev) \ | |
122 | + for_each_msi_entry((desc), (dev)) \ | |
123 | + if ((desc)->irq) \ | |
124 | + for (__irq = (desc)->irq; \ | |
125 | + __irq < ((desc)->irq + (desc)->nvec_used); \ | |
126 | + __irq++) | |
121 | 127 | |
122 | 128 | #ifdef CONFIG_PCI_MSI |
123 | 129 | #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) |
@@ -98,7 +98,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | ||
98 | 98 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o |
99 | 99 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o |
100 | 100 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
101 | -obj-$(CONFIG_ELFCORE) += elfcore.o | |
102 | 101 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ |
103 | 102 | obj-$(CONFIG_TRACING) += trace/ |
104 | 103 | obj-$(CONFIG_TRACE_CLOCK) += trace/ |
@@ -1,26 +0,0 @@ | ||
1 | -// SPDX-License-Identifier: GPL-2.0 | |
2 | -#include <linux/elf.h> | |
3 | -#include <linux/fs.h> | |
4 | -#include <linux/mm.h> | |
5 | -#include <linux/binfmts.h> | |
6 | -#include <linux/elfcore.h> | |
7 | - | |
8 | -Elf_Half __weak elf_core_extra_phdrs(void) | |
9 | -{ | |
10 | - return 0; | |
11 | -} | |
12 | - | |
13 | -int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) | |
14 | -{ | |
15 | - return 1; | |
16 | -} | |
17 | - | |
18 | -int __weak elf_core_write_extra_data(struct coredump_params *cprm) | |
19 | -{ | |
20 | - return 1; | |
21 | -} | |
22 | - | |
23 | -size_t __weak elf_core_extra_data_size(void) | |
24 | -{ | |
25 | - return 0; | |
26 | -} |
@@ -437,22 +437,22 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | ||
437 | 437 | |
438 | 438 | can_reserve = msi_check_reservation_mode(domain, info, dev); |
439 | 439 | |
440 | - for_each_msi_entry(desc, dev) { | |
441 | - virq = desc->irq; | |
442 | - if (desc->nvec_used == 1) | |
443 | - dev_dbg(dev, "irq %d for MSI\n", virq); | |
444 | - else | |
440 | + /* | |
441 | + * This flag is set by the PCI layer as we need to activate | |
442 | + * the MSI entries before the PCI layer enables MSI in the | |
443 | + * card. Otherwise the card latches a random msi message. | |
444 | + */ | |
445 | + if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) | |
446 | + goto skip_activate; | |
447 | + | |
448 | + for_each_msi_vector(desc, i, dev) { | |
449 | + if (desc->irq == i) { | |
450 | + virq = desc->irq; | |
445 | 451 | dev_dbg(dev, "irq [%d-%d] for MSI\n", |
446 | 452 | virq, virq + desc->nvec_used - 1); |
447 | - /* | |
448 | - * This flag is set by the PCI layer as we need to activate | |
449 | - * the MSI entries before the PCI layer enables MSI in the | |
450 | - * card. Otherwise the card latches a random msi message. | |
451 | - */ | |
452 | - if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) | |
453 | - continue; | |
453 | + } | |
454 | 454 | |
455 | - irq_data = irq_domain_get_irq_data(domain, desc->irq); | |
455 | + irq_data = irq_domain_get_irq_data(domain, i); | |
456 | 456 | if (!can_reserve) { |
457 | 457 | irqd_clr_can_reserve(irq_data); |
458 | 458 | if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) |
@@ -463,28 +463,24 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | ||
463 | 463 | goto cleanup; |
464 | 464 | } |
465 | 465 | |
466 | +skip_activate: | |
466 | 467 | /* |
467 | 468 | * If these interrupts use reservation mode, clear the activated bit |
468 | 469 | * so request_irq() will assign the final vector. |
469 | 470 | */ |
470 | 471 | if (can_reserve) { |
471 | - for_each_msi_entry(desc, dev) { | |
472 | - irq_data = irq_domain_get_irq_data(domain, desc->irq); | |
472 | + for_each_msi_vector(desc, i, dev) { | |
473 | + irq_data = irq_domain_get_irq_data(domain, i); | |
473 | 474 | irqd_clr_activated(irq_data); |
474 | 475 | } |
475 | 476 | } |
476 | 477 | return 0; |
477 | 478 | |
478 | 479 | cleanup: |
479 | - for_each_msi_entry(desc, dev) { | |
480 | - struct irq_data *irqd; | |
481 | - | |
482 | - if (desc->irq == virq) | |
483 | - break; | |
484 | - | |
485 | - irqd = irq_domain_get_irq_data(domain, desc->irq); | |
486 | - if (irqd_is_activated(irqd)) | |
487 | - irq_domain_deactivate_irq(irqd); | |
480 | + for_each_msi_vector(desc, i, dev) { | |
481 | + irq_data = irq_domain_get_irq_data(domain, i); | |
482 | + if (irqd_is_activated(irq_data)) | |
483 | + irq_domain_deactivate_irq(irq_data); | |
488 | 484 | } |
489 | 485 | msi_domain_free_irqs(domain, dev); |
490 | 486 | return ret; |
@@ -1945,6 +1945,10 @@ int register_kretprobe(struct kretprobe *rp) | ||
1945 | 1945 | if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) |
1946 | 1946 | return -EINVAL; |
1947 | 1947 | |
1948 | + /* If only rp->kp.addr is specified, check reregistering kprobes */ | |
1949 | + if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) | |
1950 | + return -EINVAL; | |
1951 | + | |
1948 | 1952 | if (kretprobe_blacklist_size) { |
1949 | 1953 | addr = kprobe_addr(&rp->kp); |
1950 | 1954 | if (IS_ERR(addr)) |
@@ -2278,7 +2278,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | ||
2278 | 2278 | spinlock_t *ptl; |
2279 | 2279 | struct mm_struct *mm = vma->vm_mm; |
2280 | 2280 | unsigned long haddr = address & HPAGE_PMD_MASK; |
2281 | - bool was_locked = false; | |
2281 | + bool do_unlock_page = false; | |
2282 | 2282 | pmd_t _pmd; |
2283 | 2283 | |
2284 | 2284 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); |
@@ -2291,7 +2291,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | ||
2291 | 2291 | VM_BUG_ON(freeze && !page); |
2292 | 2292 | if (page) { |
2293 | 2293 | VM_WARN_ON_ONCE(!PageLocked(page)); |
2294 | - was_locked = true; | |
2295 | 2294 | if (page != pmd_page(*pmd)) |
2296 | 2295 | goto out; |
2297 | 2296 | } |
@@ -2300,19 +2299,29 @@ repeat: | ||
2300 | 2299 | if (pmd_trans_huge(*pmd)) { |
2301 | 2300 | if (!page) { |
2302 | 2301 | page = pmd_page(*pmd); |
2303 | - if (unlikely(!trylock_page(page))) { | |
2304 | - get_page(page); | |
2305 | - _pmd = *pmd; | |
2306 | - spin_unlock(ptl); | |
2307 | - lock_page(page); | |
2308 | - spin_lock(ptl); | |
2309 | - if (unlikely(!pmd_same(*pmd, _pmd))) { | |
2310 | - unlock_page(page); | |
2302 | + /* | |
2303 | + * An anonymous page must be locked, to ensure that a | |
2304 | + * concurrent reuse_swap_page() sees stable mapcount; | |
2305 | + * but reuse_swap_page() is not used on shmem or file, | |
2306 | + * and page lock must not be taken when zap_pmd_range() | |
2307 | + * calls __split_huge_pmd() while i_mmap_lock is held. | |
2308 | + */ | |
2309 | + if (PageAnon(page)) { | |
2310 | + if (unlikely(!trylock_page(page))) { | |
2311 | + get_page(page); | |
2312 | + _pmd = *pmd; | |
2313 | + spin_unlock(ptl); | |
2314 | + lock_page(page); | |
2315 | + spin_lock(ptl); | |
2316 | + if (unlikely(!pmd_same(*pmd, _pmd))) { | |
2317 | + unlock_page(page); | |
2318 | + put_page(page); | |
2319 | + page = NULL; | |
2320 | + goto repeat; | |
2321 | + } | |
2311 | 2322 | put_page(page); |
2312 | - page = NULL; | |
2313 | - goto repeat; | |
2314 | 2323 | } |
2315 | - put_page(page); | |
2324 | + do_unlock_page = true; | |
2316 | 2325 | } |
2317 | 2326 | } |
2318 | 2327 | if (PageMlocked(page)) |
@@ -2322,7 +2331,7 @@ repeat: | ||
2322 | 2331 | __split_huge_pmd_locked(vma, pmd, haddr, freeze); |
2323 | 2332 | out: |
2324 | 2333 | spin_unlock(ptl); |
2325 | - if (!was_locked && page) | |
2334 | + if (do_unlock_page) | |
2326 | 2335 | unlock_page(page); |
2327 | 2336 | /* |
2328 | 2337 | * No need to double call mmu_notifier->invalidate_range() callback. |
@@ -68,6 +68,21 @@ DEFINE_SPINLOCK(hugetlb_lock); | ||
68 | 68 | static int num_fault_mutexes; |
69 | 69 | struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; |
70 | 70 | |
71 | +static inline bool PageHugeFreed(struct page *head) | |
72 | +{ | |
73 | + return page_private(head + 4) == -1UL; | |
74 | +} | |
75 | + | |
76 | +static inline void SetPageHugeFreed(struct page *head) | |
77 | +{ | |
78 | + set_page_private(head + 4, -1UL); | |
79 | +} | |
80 | + | |
81 | +static inline void ClearPageHugeFreed(struct page *head) | |
82 | +{ | |
83 | + set_page_private(head + 4, 0); | |
84 | +} | |
85 | + | |
71 | 86 | /* Forward declaration */ |
72 | 87 | static int hugetlb_acct_memory(struct hstate *h, long delta); |
73 | 88 |
@@ -858,6 +873,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) | ||
858 | 873 | list_move(&page->lru, &h->hugepage_freelists[nid]); |
859 | 874 | h->free_huge_pages++; |
860 | 875 | h->free_huge_pages_node[nid]++; |
876 | + SetPageHugeFreed(page); | |
861 | 877 | } |
862 | 878 | |
863 | 879 | static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) |
@@ -875,6 +891,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) | ||
875 | 891 | return NULL; |
876 | 892 | list_move(&page->lru, &h->hugepage_activelist); |
877 | 893 | set_page_refcounted(page); |
894 | + ClearPageHugeFreed(page); | |
878 | 895 | h->free_huge_pages--; |
879 | 896 | h->free_huge_pages_node[nid]--; |
880 | 897 | return page; |
@@ -1196,12 +1213,11 @@ struct hstate *size_to_hstate(unsigned long size) | ||
1196 | 1213 | */ |
1197 | 1214 | bool page_huge_active(struct page *page) |
1198 | 1215 | { |
1199 | - VM_BUG_ON_PAGE(!PageHuge(page), page); | |
1200 | - return PageHead(page) && PagePrivate(&page[1]); | |
1216 | + return PageHeadHuge(page) && PagePrivate(&page[1]); | |
1201 | 1217 | } |
1202 | 1218 | |
1203 | 1219 | /* never called for tail page */ |
1204 | -static void set_page_huge_active(struct page *page) | |
1220 | +void set_page_huge_active(struct page *page) | |
1205 | 1221 | { |
1206 | 1222 | VM_BUG_ON_PAGE(!PageHeadHuge(page), page); |
1207 | 1223 | SetPagePrivate(&page[1]); |
@@ -1305,6 +1321,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) | ||
1305 | 1321 | set_hugetlb_cgroup(page, NULL); |
1306 | 1322 | h->nr_huge_pages++; |
1307 | 1323 | h->nr_huge_pages_node[nid]++; |
1324 | + ClearPageHugeFreed(page); | |
1308 | 1325 | spin_unlock(&hugetlb_lock); |
1309 | 1326 | } |
1310 | 1327 |
@@ -1500,6 +1517,7 @@ int dissolve_free_huge_page(struct page *page) | ||
1500 | 1517 | { |
1501 | 1518 | int rc = -EBUSY; |
1502 | 1519 | |
1520 | +retry: | |
1503 | 1521 | /* Not to disrupt normal path by vainly holding hugetlb_lock */ |
1504 | 1522 | if (!PageHuge(page)) |
1505 | 1523 | return 0; |
@@ -1516,6 +1534,26 @@ int dissolve_free_huge_page(struct page *page) | ||
1516 | 1534 | int nid = page_to_nid(head); |
1517 | 1535 | if (h->free_huge_pages - h->resv_huge_pages == 0) |
1518 | 1536 | goto out; |
1537 | + | |
1538 | + /* | |
1539 | + * We should make sure that the page is already on the free list | |
1540 | + * when it is dissolved. | |
1541 | + */ | |
1542 | + if (unlikely(!PageHugeFreed(head))) { | |
1543 | + spin_unlock(&hugetlb_lock); | |
1544 | + cond_resched(); | |
1545 | + | |
1546 | + /* | |
1547 | + * Theoretically, we should return -EBUSY when we | |
1548 | + * encounter this race. In fact, we have a chance | |
1549 | + * to successfully dissolve the page if we do a | |
1550 | + * retry. Because the race window is quite small. | |
1551 | + * If we seize this opportunity, it is an optimization | |
1552 | + * for increasing the success rate of dissolving page. | |
1553 | + */ | |
1554 | + goto retry; | |
1555 | + } | |
1556 | + | |
1519 | 1557 | /* |
1520 | 1558 | * Move PageHWPoison flag from head page to the raw error page, |
1521 | 1559 | * which makes any subpages rather than the error page reusable. |
@@ -4940,9 +4978,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) | ||
4940 | 4978 | { |
4941 | 4979 | bool ret = true; |
4942 | 4980 | |
4943 | - VM_BUG_ON_PAGE(!PageHead(page), page); | |
4944 | 4981 | spin_lock(&hugetlb_lock); |
4945 | - if (!page_huge_active(page) || !get_page_unless_zero(page)) { | |
4982 | + if (!PageHeadHuge(page) || !page_huge_active(page) || | |
4983 | + !get_page_unless_zero(page)) { | |
4946 | 4984 | ret = false; |
4947 | 4985 | goto unlock; |
4948 | 4986 | } |
@@ -234,14 +234,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, | ||
234 | 234 | * |
235 | 235 | * Find @size free area aligned to @align in the specified range and node. |
236 | 236 | * |
237 | - * When allocation direction is bottom-up, the @start should be greater | |
238 | - * than the end of the kernel image. Otherwise, it will be trimmed. The | |
239 | - * reason is that we want the bottom-up allocation just near the kernel | |
240 | - * image so it is highly likely that the allocated memory and the kernel | |
241 | - * will reside in the same node. | |
242 | - * | |
243 | - * If bottom-up allocation failed, will try to allocate memory top-down. | |
244 | - * | |
245 | 237 | * Return: |
246 | 238 | * Found address on success, 0 on failure. |
247 | 239 | */ |
@@ -250,8 +242,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, | ||
250 | 242 | phys_addr_t end, int nid, |
251 | 243 | enum memblock_flags flags) |
252 | 244 | { |
253 | - phys_addr_t kernel_end, ret; | |
254 | - | |
255 | 245 | /* pump up @end */ |
256 | 246 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE || |
257 | 247 | end == MEMBLOCK_ALLOC_KASAN) |
@@ -260,40 +250,13 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, | ||
260 | 250 | /* avoid allocating the first page */ |
261 | 251 | start = max_t(phys_addr_t, start, PAGE_SIZE); |
262 | 252 | end = max(start, end); |
263 | - kernel_end = __pa_symbol(_end); | |
264 | - | |
265 | - /* | |
266 | - * try bottom-up allocation only when bottom-up mode | |
267 | - * is set and @end is above the kernel image. | |
268 | - */ | |
269 | - if (memblock_bottom_up() && end > kernel_end) { | |
270 | - phys_addr_t bottom_up_start; | |
271 | - | |
272 | - /* make sure we will allocate above the kernel */ | |
273 | - bottom_up_start = max(start, kernel_end); | |
274 | 253 | |
275 | - /* ok, try bottom-up allocation first */ | |
276 | - ret = __memblock_find_range_bottom_up(bottom_up_start, end, | |
277 | - size, align, nid, flags); | |
278 | - if (ret) | |
279 | - return ret; | |
280 | - | |
281 | - /* | |
282 | - * we always limit bottom-up allocation above the kernel, | |
283 | - * but top-down allocation doesn't have the limit, so | |
284 | - * retrying top-down allocation may succeed when bottom-up | |
285 | - * allocation failed. | |
286 | - * | |
287 | - * bottom-up allocation is expected to be fail very rarely, | |
288 | - * so we use WARN_ONCE() here to see the stack trace if | |
289 | - * fail happens. | |
290 | - */ | |
291 | - WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), | |
292 | - "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); | |
293 | - } | |
294 | - | |
295 | - return __memblock_find_range_top_down(start, end, size, align, nid, | |
296 | - flags); | |
254 | + if (memblock_bottom_up()) | |
255 | + return __memblock_find_range_bottom_up(start, end, size, align, | |
256 | + nid, flags); | |
257 | + else | |
258 | + return __memblock_find_range_top_down(start, end, size, align, | |
259 | + nid, flags); | |
297 | 260 | } |
298 | 261 | |
299 | 262 | /** |
@@ -330,7 +330,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) | ||
330 | 330 | } |
331 | 331 | |
332 | 332 | dev->needed_headroom = t_hlen + hlen; |
333 | - mtu -= (dev->hard_header_len + t_hlen); | |
333 | + mtu -= t_hlen; | |
334 | 334 | |
335 | 335 | if (mtu < IPV4_MIN_MTU) |
336 | 336 | mtu = IPV4_MIN_MTU; |
@@ -360,7 +360,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, | ||
360 | 360 | nt = netdev_priv(dev); |
361 | 361 | t_hlen = nt->hlen + sizeof(struct iphdr); |
362 | 362 | dev->min_mtu = ETH_MIN_MTU; |
363 | - dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; | |
363 | + dev->max_mtu = IP_MAX_MTU - t_hlen; | |
364 | 364 | ip_tunnel_add(itn, nt); |
365 | 365 | return nt; |
366 | 366 |
@@ -502,12 +502,11 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, | ||
502 | 502 | const struct iphdr *inner_iph) |
503 | 503 | { |
504 | 504 | struct ip_tunnel *tunnel = netdev_priv(dev); |
505 | - int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; | |
505 | + int pkt_size = skb->len - tunnel->hlen; | |
506 | 506 | int mtu; |
507 | 507 | |
508 | 508 | if (df) |
509 | - mtu = dst_mtu(&rt->dst) - dev->hard_header_len | |
510 | - - sizeof(struct iphdr) - tunnel->hlen; | |
509 | + mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel->hlen); | |
511 | 510 | else |
512 | 511 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; |
513 | 512 |
@@ -935,7 +934,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) | ||
935 | 934 | { |
936 | 935 | struct ip_tunnel *tunnel = netdev_priv(dev); |
937 | 936 | int t_hlen = tunnel->hlen + sizeof(struct iphdr); |
938 | - int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; | |
937 | + int max_mtu = IP_MAX_MTU - t_hlen; | |
939 | 938 | |
940 | 939 | if (new_mtu < ETH_MIN_MTU) |
941 | 940 | return -EINVAL; |
@@ -1112,10 +1111,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], | ||
1112 | 1111 | |
1113 | 1112 | mtu = ip_tunnel_bind_dev(dev); |
1114 | 1113 | if (tb[IFLA_MTU]) { |
1115 | - unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; | |
1114 | + unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr)); | |
1116 | 1115 | |
1117 | - mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, | |
1118 | - (unsigned int)(max - sizeof(struct iphdr))); | |
1116 | + mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max); | |
1119 | 1117 | } |
1120 | 1118 | |
1121 | 1119 | err = dev_set_mtu(dev, mtu); |
@@ -87,7 +87,8 @@ void lapb_kick(struct lapb_cb *lapb) | ||
87 | 87 | skb = skb_dequeue(&lapb->write_queue); |
88 | 88 | |
89 | 89 | do { |
90 | - if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { | |
90 | + skbn = skb_copy(skb, GFP_ATOMIC); | |
91 | + if (!skbn) { | |
91 | 92 | skb_queue_head(&lapb->write_queue, skb); |
92 | 93 | break; |
93 | 94 | } |
@@ -128,8 +128,11 @@ int drv_sta_state(struct ieee80211_local *local, | ||
128 | 128 | } else if (old_state == IEEE80211_STA_AUTH && |
129 | 129 | new_state == IEEE80211_STA_ASSOC) { |
130 | 130 | ret = drv_sta_add(local, sdata, &sta->sta); |
131 | - if (ret == 0) | |
131 | + if (ret == 0) { | |
132 | 132 | sta->uploaded = true; |
133 | + if (rcu_access_pointer(sta->sta.rates)) | |
134 | + drv_sta_rate_tbl_update(local, sdata, &sta->sta); | |
135 | + } | |
133 | 136 | } else if (old_state == IEEE80211_STA_ASSOC && |
134 | 137 | new_state == IEEE80211_STA_AUTH) { |
135 | 138 | drv_sta_remove(local, sdata, &sta->sta); |
@@ -941,7 +941,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, | ||
941 | 941 | if (old) |
942 | 942 | kfree_rcu(old, rcu_head); |
943 | 943 | |
944 | - drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); | |
944 | + if (sta->uploaded) | |
945 | + drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); | |
945 | 946 | |
946 | 947 | ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); |
947 | 948 |
@@ -1010,7 +1010,7 @@ static int __init af_rxrpc_init(void) | ||
1010 | 1010 | goto error_security; |
1011 | 1011 | } |
1012 | 1012 | |
1013 | - ret = register_pernet_subsys(&rxrpc_net_ops); | |
1013 | + ret = register_pernet_device(&rxrpc_net_ops); | |
1014 | 1014 | if (ret) |
1015 | 1015 | goto error_pernet; |
1016 | 1016 |
@@ -1055,7 +1055,7 @@ error_key_type: | ||
1055 | 1055 | error_sock: |
1056 | 1056 | proto_unregister(&rxrpc_proto); |
1057 | 1057 | error_proto: |
1058 | - unregister_pernet_subsys(&rxrpc_net_ops); | |
1058 | + unregister_pernet_device(&rxrpc_net_ops); | |
1059 | 1059 | error_pernet: |
1060 | 1060 | rxrpc_exit_security(); |
1061 | 1061 | error_security: |
@@ -1077,7 +1077,7 @@ static void __exit af_rxrpc_exit(void) | ||
1077 | 1077 | unregister_key_type(&key_type_rxrpc); |
1078 | 1078 | sock_unregister(PF_RXRPC); |
1079 | 1079 | proto_unregister(&rxrpc_proto); |
1080 | - unregister_pernet_subsys(&rxrpc_net_ops); | |
1080 | + unregister_pernet_device(&rxrpc_net_ops); | |
1081 | 1081 | ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); |
1082 | 1082 | ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); |
1083 | 1083 |