Skip to content

Commit

Permalink
sync mm-stable with mm-hotfixes-stable to pick up depended-upon upstr…
Browse files Browse the repository at this point in the history
…eam changes
  • Loading branch information
akpm00 committed Apr 18, 2023
2 parents e492cd6 + ef83274 commit f8f238f
Show file tree
Hide file tree
Showing 11 changed files with 247 additions and 102 deletions.
20 changes: 20 additions & 0 deletions fs/nilfs2/segment.c
Original file line number Diff line number Diff line change
Expand Up @@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
return 0;
}

/**
* nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
* @sci: segment constructor object
*
* nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
* the current segment summary block.
*/
static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
{
struct nilfs_segsum_pointer *ssp;

ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
if (ssp->offset < ssp->bh->b_size)
memset(ssp->bh->b_data + ssp->offset, 0,
ssp->bh->b_size - ssp->offset);
}

static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
{
sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
Expand All @@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
* The current segment is filled up
* (internal code)
*/
nilfs_segctor_zeropad_segsum(sci);
sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
return nilfs_segctor_reset_segment_buffer(sci);
}
Expand Down Expand Up @@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
goto retry;
}
if (unlikely(required)) {
nilfs_segctor_zeropad_segsum(sci);
err = nilfs_segbuf_extend_segsum(segbuf);
if (unlikely(err))
goto failed;
Expand Down Expand Up @@ -1533,6 +1552,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
sci->sc_stage = prev_stage;
}
nilfs_segctor_zeropad_segsum(sci);
nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
return 0;

Expand Down
39 changes: 21 additions & 18 deletions include/linux/kmsan.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
* @page_shift: page_shift passed to vmap_range_noflush().
*
* KMSAN maps shadow and origin pages of @pages into contiguous ranges in
* vmalloc metadata address range.
* vmalloc metadata address range. Returns 0 on success, callers must check
* for non-zero return value.
*/
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages,
unsigned int page_shift);
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages,
unsigned int page_shift);

/**
* kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
Expand All @@ -159,11 +160,12 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
* @page_shift: page_shift argument passed to vmap_range_noflush().
*
* KMSAN creates new metadata pages for the physical pages mapped into the
* virtual memory.
* virtual memory. Returns 0 on success, callers must check for non-zero return
* value.
*/
void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int page_shift);
int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int page_shift);

/**
* kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
Expand Down Expand Up @@ -281,25 +283,26 @@ static inline void kmsan_kfree_large(const void *ptr)
{
}

static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
unsigned long end,
pgprot_t prot,
struct page **pages,
unsigned int page_shift)
static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
unsigned long end,
pgprot_t prot,
struct page **pages,
unsigned int page_shift)
{
return 0;
}

static inline void kmsan_vunmap_range_noflush(unsigned long start,
unsigned long end)
{
}

static inline void kmsan_ioremap_page_range(unsigned long start,
unsigned long end,
phys_addr_t phys_addr,
pgprot_t prot,
unsigned int page_shift)
static inline int kmsan_ioremap_page_range(unsigned long start,
unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
unsigned int page_shift)
{
return 0;
}

static inline void kmsan_iounmap_page_range(unsigned long start,
Expand Down
1 change: 1 addition & 0 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -1308,6 +1308,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
fail_pcpu:
while (i > 0)
percpu_counter_destroy(&mm->rss_stat[--i]);
destroy_context(mm);
fail_nocontext:
mm_free_pgd(mm);
fail_nopgd:
Expand Down
69 changes: 40 additions & 29 deletions kernel/sys.c
Original file line number Diff line number Diff line change
Expand Up @@ -664,6 +664,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
struct cred *new;
int retval;
kuid_t kruid, keuid, ksuid;
bool ruid_new, euid_new, suid_new;

kruid = make_kuid(ns, ruid);
keuid = make_kuid(ns, euid);
Expand All @@ -678,25 +679,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if ((suid != (uid_t) -1) && !uid_valid(ksuid))
return -EINVAL;

old = current_cred();

/* check for no-op */
if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
(euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
uid_eq(keuid, old->fsuid))) &&
(suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
return 0;

ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
!uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
!uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
if ((ruid_new || euid_new || suid_new) &&
!ns_capable_setid(old->user_ns, CAP_SETUID))
return -EPERM;

new = prepare_creds();
if (!new)
return -ENOMEM;

old = current_cred();

retval = -EPERM;
if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
goto error;
if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
!uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
goto error;
if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
!uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
goto error;
}

if (ruid != (uid_t) -1) {
new->uid = kruid;
if (!uid_eq(kruid, old->uid)) {
Expand Down Expand Up @@ -761,6 +766,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
struct cred *new;
int retval;
kgid_t krgid, kegid, ksgid;
bool rgid_new, egid_new, sgid_new;

krgid = make_kgid(ns, rgid);
kegid = make_kgid(ns, egid);
Expand All @@ -773,23 +779,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
return -EINVAL;

old = current_cred();

/* check for no-op */
if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
(egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
gid_eq(kegid, old->fsgid))) &&
(sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
return 0;

rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
!gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
!gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
if ((rgid_new || egid_new || sgid_new) &&
!ns_capable_setid(old->user_ns, CAP_SETGID))
return -EPERM;

new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();

retval = -EPERM;
if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
goto error;
if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
!gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
goto error;
if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
!gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
goto error;
}

if (rgid != (gid_t) -1)
new->gid = krgid;
Expand Down
47 changes: 24 additions & 23 deletions lib/maple_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -4965,7 +4965,8 @@ static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
* Return: True if found in a leaf, false otherwise.
*
*/
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
unsigned long *gap_min, unsigned long *gap_max)
{
enum maple_type type = mte_node_type(mas->node);
struct maple_node *node = mas_mn(mas);
Expand Down Expand Up @@ -5030,8 +5031,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)

if (unlikely(ma_is_leaf(type))) {
mas->offset = offset;
mas->min = min;
mas->max = min + gap - 1;
*gap_min = min;
*gap_max = min + gap - 1;
return true;
}

Expand All @@ -5055,24 +5056,26 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
{
enum maple_type type = mte_node_type(mas->node);
unsigned long pivot, min, gap = 0;
unsigned char offset;
unsigned long *gaps;
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
void __rcu **slots = ma_slots(mas_mn(mas), type);
unsigned char offset, data_end;
unsigned long *gaps, *pivots;
void __rcu **slots;
struct maple_node *node;
bool found = false;

if (ma_is_dense(type)) {
mas->offset = (unsigned char)(mas->index - mas->min);
return true;
}

gaps = ma_gaps(mte_to_node(mas->node), type);
node = mas_mn(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
gaps = ma_gaps(node, type);
offset = mas->offset;
min = mas_safe_min(mas, pivots, offset);
for (; offset < mt_slots[type]; offset++) {
pivot = mas_safe_pivot(mas, pivots, offset, type);
if (offset && !pivot)
break;
data_end = ma_data_end(node, type, pivots, mas->max);
for (; offset <= data_end; offset++) {
pivot = mas_logical_pivot(mas, pivots, offset, type);

/* Not within lower bounds */
if (mas->index > pivot)
Expand Down Expand Up @@ -5307,6 +5310,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
unsigned long *pivots;
enum maple_type mt;

if (min >= max)
return -EINVAL;

if (mas_is_start(mas))
mas_start(mas);
else if (mas->offset >= 2)
Expand Down Expand Up @@ -5361,6 +5367,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
{
struct maple_enode *last = mas->node;

if (min >= max)
return -EINVAL;

if (mas_is_start(mas)) {
mas_start(mas);
mas->offset = mas_data_end(mas);
Expand All @@ -5380,7 +5389,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
mas->index = min;
mas->last = max;

while (!mas_rev_awalk(mas, size)) {
while (!mas_rev_awalk(mas, size, &min, &max)) {
if (last == mas->node) {
if (!mas_rewind_node(mas))
return -EBUSY;
Expand All @@ -5395,17 +5404,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
return -EBUSY;

/*
* mas_rev_awalk() has set mas->min and mas->max to the gap values. If
* the maximum is outside the window we are searching, then use the last
* location in the search.
* mas->max and mas->min is the range of the gap.
* mas->index and mas->last are currently set to the search range.
*/

/* Trim the upper limit to the max. */
if (mas->max <= mas->last)
mas->last = mas->max;
if (max <= mas->last)
mas->last = max;

mas->index = mas->last - size + 1;
return 0;
Expand Down
Loading

0 comments on commit f8f238f

Please sign in to comment.