Skip to content

Commit 9c864bb

Browse files
committed
Rebuild rocky10_0 with kernel-6.12.0-55.27.1.el10_0
Rebuild_History BUILDABLE Rebuilding Kernel from rpm changelog with Fuzz Limit: 87.50% Number of commits in upstream range v6.12~1..kernel-mainline: 52012 Number of commits in rpm: 33 Number of commits matched with upstream: 27 (81.82%) Number of commits in upstream but not in rpm: 51990 Number of commits NOT found in upstream: 6 (18.18%) Rebuilding Kernel on Branch rocky10_0_rebuild_kernel-6.12.0-55.27.1.el10_0 for kernel-6.12.0-55.27.1.el10_0 Clean Cherry Picks: 17 (62.96%) Empty Cherry Picks: 5 (18.52%) _______________________________ Full Details Located here: ciq/ciq_backports/kernel-6.12.0-55.27.1.el10_0/rebuild.details.txt Includes: * git commit header above * Empty Commits with upstream SHA * RPM ChangeLog Entries that could not be matched Individual Empty Commit failures contained in the same containing directory. The git message for empty commits will have the path for the failed commit. File names are the first 8 characters of the upstream SHA
1 parent 0541075 commit 9c864bb

File tree

17 files changed

+185
-42
lines changed

17 files changed

+185
-42
lines changed
File renamed without changes.

Makefile.rhelver

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ RHEL_MINOR = 0
1212
#
1313
# Use this spot to avoid future merge conflicts.
1414
# Do not trim this comment.
15-
RHEL_RELEASE = 55.25.1
15+
RHEL_RELEASE = 55.27.1
1616

1717
#
1818
# RHEL_REBASE_NUM
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
Rebuild_History BUILDABLE
2+
Rebuilding Kernel from rpm changelog with Fuzz Limit: 87.50%
3+
Number of commits in upstream range v6.12~1..kernel-mainline: 52012
4+
Number of commits in rpm: 33
5+
Number of commits matched with upstream: 27 (81.82%)
6+
Number of commits in upstream but not in rpm: 51990
7+
Number of commits NOT found in upstream: 6 (18.18%)
8+
9+
Rebuilding Kernel on Branch rocky10_0_rebuild_kernel-6.12.0-55.27.1.el10_0 for kernel-6.12.0-55.27.1.el10_0
10+
Clean Cherry Picks: 17 (62.96%)
11+
Empty Cherry Picks: 5 (18.52%)
12+
_______________________________
13+
14+
__EMPTY COMMITS__________________________
15+
8b926f237743f020518162c62b93cb7107a2b5eb PCI/pwrctrl: Cancel outstanding rescan work when unregistering
16+
ee40c9920ac286c5bfe7c811e66ff899266d2582 mm: fix copy_vma() error handling for hugetlb mappings
17+
918850c13608c7b138512c2ecbfd3436b7a51797 tools/testing/vma: add missing function stub
18+
081056dc00a27bccb55ccc3c6f230a3d5fd3f7e0 mm/hugetlb: unshare page tables during VMA split, not before
19+
1013af4f585fccc4d3e5c5824d174de2257f7d6d mm/hugetlb: fix huge_pmd_unshare() vs GUP-fast race
20+
21+
__CHANGES NOT IN UPSTREAM________________
22+
Porting to Rocky Linux 10, debranding and Rocky Linux branding'
23+
Add partial riscv64 support for build root'
24+
Provide basic VisionFive 2 support'
25+
Revert "net/sched: Always pass notifications when child class becomes empty"
26+
net/sched: Always pass notifications when child class becomes empty
27+
redhat: update BUILD_TARGET to use rhel-10.0-z-test-pesign
28+
Revert "sch_htb: make htb_qlen_notify() idempotent" (Jan Stancek) [RHEL-108141]
29+
Revert "sch_drr: make drr_qlen_notify() idempotent" (Jan Stancek) [RHEL-108141]
30+
Revert "sch_qfq: make qfq_qlen_notify() idempotent" (Jan Stancek) [RHEL-108141]
31+
Revert "codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog()" (Jan Stancek) [RHEL-108141]
32+
Revert "sch_htb: make htb_deactivate() idempotent" (Jan Stancek) [RHEL-108141]
33+
Revert "net/sched: Always pass notifications when child class becomes empty" (Jan Stancek) [RHEL-108141]
34+

drivers/pci/pwrctl/core.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,8 @@ EXPORT_SYMBOL_GPL(pci_pwrctl_device_set_ready);
110110
*/
111111
void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl)
112112
{
113+
cancel_work_sync(&pwrctl->work);
114+
113115
/*
114116
* We don't have to delete the link here. Typically, this function
115117
* is only called when the power control device is being detached. If

include/linux/hugetlb.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,8 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
272272
bool is_hugetlb_entry_migration(pte_t pte);
273273
bool is_hugetlb_entry_hwpoisoned(pte_t pte);
274274
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
275+
void fixup_hugetlb_reservations(struct vm_area_struct *vma);
276+
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
275277

276278
#else /* !CONFIG_HUGETLB_PAGE */
277279

@@ -465,6 +467,12 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
465467

466468
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
467469

470+
static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
471+
{
472+
}
473+
474+
static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {}
475+
468476
#endif /* !CONFIG_HUGETLB_PAGE */
469477

470478
#ifndef pgd_write

mm/hugetlb.c

Lines changed: 66 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
8787
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
8888
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
8989
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
90-
unsigned long start, unsigned long end);
90+
unsigned long start, unsigned long end, bool take_locks);
9191
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
9292

9393
static void hugetlb_free_folio(struct folio *folio)
@@ -1218,7 +1218,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
12181218
/*
12191219
* Reset and decrement one ref on hugepage private reservation.
12201220
* Called with mm->mmap_lock writer semaphore held.
1221-
* This function should be only used by move_vma() and operate on
1221+
* This function should be only used by mremap and operate on
12221222
* same sized vma. It should never come here with last ref on the
12231223
* reservation.
12241224
*/
@@ -5093,26 +5093,40 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
50935093
{
50945094
if (addr & ~(huge_page_mask(hstate_vma(vma))))
50955095
return -EINVAL;
5096+
return 0;
5097+
}
50965098

5099+
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr)
5100+
{
50975101
/*
50985102
* PMD sharing is only possible for PUD_SIZE-aligned address ranges
50995103
* in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
51005104
* split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
5105+
* This function is called in the middle of a VMA split operation, with
5106+
* MM, VMA and rmap all write-locked to prevent concurrent page table
5107+
* walks (except hardware and gup_fast()).
51015108
*/
5109+
vma_assert_write_locked(vma);
5110+
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
5111+
51025112
if (addr & ~PUD_MASK) {
5103-
/*
5104-
* hugetlb_vm_op_split is called right before we attempt to
5105-
* split the VMA. We will need to unshare PMDs in the old and
5106-
* new VMAs, so let's unshare before we split.
5107-
*/
51085113
unsigned long floor = addr & PUD_MASK;
51095114
unsigned long ceil = floor + PUD_SIZE;
51105115

5111-
if (floor >= vma->vm_start && ceil <= vma->vm_end)
5112-
hugetlb_unshare_pmds(vma, floor, ceil);
5116+
if (floor >= vma->vm_start && ceil <= vma->vm_end) {
5117+
/*
5118+
* Locking:
5119+
* Use take_locks=false here.
5120+
* The file rmap lock is already held.
5121+
* The hugetlb VMA lock can't be taken when we already
5122+
* hold the file rmap lock, and we don't need it because
5123+
* its purpose is to synchronize against concurrent page
5124+
* table walks, which are not possible thanks to the
5125+
* locks held by our caller.
5126+
*/
5127+
hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false);
5128+
}
51135129
}
5114-
5115-
return 0;
51165130
}
51175131

51185132
static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
@@ -7265,6 +7279,13 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
72657279
return 0;
72667280

72677281
pud_clear(pud);
7282+
/*
7283+
* Once our caller drops the rmap lock, some other process might be
7284+
* using this page table as a normal, non-hugetlb page table.
7285+
* Wait for pending gup_fast() in other threads to finish before letting
7286+
* that happen.
7287+
*/
7288+
tlb_remove_table_sync_one();
72687289
put_page(virt_to_page(ptep));
72697290
mm_dec_nr_pmds(mm);
72707291
return 1;
@@ -7497,9 +7518,16 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
74977518
}
74987519
}
74997520

7521+
/*
7522+
* If @take_locks is false, the caller must ensure that no concurrent page table
7523+
* access can happen (except for gup_fast() and hardware page walks).
7524+
* If @take_locks is true, we take the hugetlb VMA lock (to lock out things like
7525+
* concurrent page fault handling) and the file rmap lock.
7526+
*/
75007527
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
75017528
unsigned long start,
7502-
unsigned long end)
7529+
unsigned long end,
7530+
bool take_locks)
75037531
{
75047532
struct hstate *h = hstate_vma(vma);
75057533
unsigned long sz = huge_page_size(h);
@@ -7523,8 +7551,12 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
75237551
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
75247552
start, end);
75257553
mmu_notifier_invalidate_range_start(&range);
7526-
hugetlb_vma_lock_write(vma);
7527-
i_mmap_lock_write(vma->vm_file->f_mapping);
7554+
if (take_locks) {
7555+
hugetlb_vma_lock_write(vma);
7556+
i_mmap_lock_write(vma->vm_file->f_mapping);
7557+
} else {
7558+
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7559+
}
75287560
for (address = start; address < end; address += PUD_SIZE) {
75297561
ptep = hugetlb_walk(vma, address, sz);
75307562
if (!ptep)
@@ -7534,8 +7566,10 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
75347566
spin_unlock(ptl);
75357567
}
75367568
flush_hugetlb_tlb_range(vma, start, end);
7537-
i_mmap_unlock_write(vma->vm_file->f_mapping);
7538-
hugetlb_vma_unlock_write(vma);
7569+
if (take_locks) {
7570+
i_mmap_unlock_write(vma->vm_file->f_mapping);
7571+
hugetlb_vma_unlock_write(vma);
7572+
}
75397573
/*
75407574
* No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
75417575
* Documentation/mm/mmu_notifier.rst.
@@ -7550,7 +7584,22 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
75507584
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
75517585
{
75527586
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7553-
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
7587+
ALIGN_DOWN(vma->vm_end, PUD_SIZE),
7588+
/* take_locks = */ true);
7589+
}
7590+
7591+
/*
7592+
* For hugetlb, mremap() is an odd edge case - while the VMA copying is
7593+
* performed, we permit both the old and new VMAs to reference the same
7594+
* reservation.
7595+
*
7596+
* We fix this up after the operation succeeds, or if a newly allocated VMA
7597+
* is closed as a result of a failure to allocate memory.
7598+
*/
7599+
void fixup_hugetlb_reservations(struct vm_area_struct *vma)
7600+
{
7601+
if (is_vm_hugetlb_page(vma))
7602+
clear_vma_resv_huge_pages(vma);
75547603
}
75557604

75567605
#ifdef CONFIG_CMA

mm/mremap.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -743,9 +743,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
743743
mremap_userfaultfd_prep(new_vma, uf);
744744
}
745745

746-
if (is_vm_hugetlb_page(vma)) {
747-
clear_vma_resv_huge_pages(vma);
748-
}
746+
fixup_hugetlb_reservations(vma);
749747

750748
/* Conceal VM_ACCOUNT so old reservation is not undone */
751749
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {

mm/vma.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -416,7 +416,14 @@ static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
416416
init_vma_prep(&vp, vma);
417417
vp.insert = new;
418418
vma_prepare(&vp);
419+
420+
/*
421+
* Get rid of huge pages and shared page tables straddling the split
422+
* boundary.
423+
*/
419424
vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
425+
if (is_vm_hugetlb_page(vma))
426+
hugetlb_split(vma, addr);
420427

421428
if (new_below) {
422429
vma->vm_start = addr;
@@ -1681,6 +1688,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
16811688
return new_vma;
16821689

16831690
out_vma_link:
1691+
fixup_hugetlb_reservations(new_vma);
16841692
vma_close(new_vma);
16851693

16861694
if (new_vma->vm_file)

mm/vma_internal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <linux/file.h>
1818
#include <linux/fs.h>
1919
#include <linux/huge_mm.h>
20+
#include <linux/hugetlb.h>
2021
#include <linux/hugetlb_inline.h>
2122
#include <linux/kernel.h>
2223
#include <linux/khugepaged.h>

net/sched/sch_codel.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,10 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
6565
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
6666
drop_func, dequeue_func);
6767

68-
if (q->stats.drop_count) {
68+
/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
69+
* or HTB crashes. Defer it for next round.
70+
*/
71+
if (q->stats.drop_count && sch->q.qlen) {
6972
qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
7073
q->stats.drop_count = 0;
7174
q->stats.drop_len = 0;

0 commit comments

Comments
 (0)