Fixes and cleanups for JFS filesystem

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEIodevzQLVs53l6BhNqiEXrVAjGQFAmiLd+AACgkQNqiEXrVA
 jGTZjxAAjJGErKMS4XwC10Cpyy7en97xQF8qHnGWr0nIss7dvdN0zw8oUBCZFuso
 uhtWddAcEwuWK+VcNj1TdEf3tSyxbjGkMA9Gh7amv7eI7YVNrFz7OZBSWMsd2DPP
 ZJzPfExDIGEygWP5KDPd3qrCIA+hMwwJmpCY0sO7zZbrH8aftOUJQe5Gi/a+43f4
 n+vTE1d/0h0PK/Vvy1Ceiql7V0HdJ7SWOfBfTTTQivdzdk8UFqgDoC27WJ1DpfQR
 XTTEIoqizSeSV5YXDqwF6d7QQ3gROvzQlk0J7r3CUJ4gYHF0hDEFmHStYnkz8ZKB
 Z+KMMEnd2Mt/NUFBI6+tYnMzAVj5fq4XLTfe7GqM7ZL+z2JRH+kAT7SGQJ0fabPA
 FlNIA7fpLd1c0uuZghgS1rdY2uLYJEY2gFumcn9T0fbMsE10hSbfrkbldXCvkMpN
 z1pLHj0yvG1yXUOXjgvdkpJEGpdp63Dq7qNq5yAbXCURzH08nAQQ2Fx9vX86WNta
 8vkBrx/6X2uIE7Cd4WOSj3IYb4LY9BSBc9fScVd7D5kkwN0qAHTJNSdMUO0vpvjx
 +wwYMhfIVAreacrmSVsCAh+Zaf7zg/VwS+5GZ+trBx5/KNeLVI5f/QbOXcDhHWm3
 Y/2oQ8TAZgcPoyIROmZ/bQD4T+v48IESNrE54yt5WUazAeIxqYo=
 =jTbL
 -----END PGP SIGNATURE-----

Merge tag 'jfs-6.17' of github.com:kleikamp/linux-shaggy

Pull jfs updates from Dave Kleikamp:
 "Fixes and cleanups for JFS filesystem"

* tag 'jfs-6.17' of github.com:kleikamp/linux-shaggy:
  jfs: fix metapage reference count leak in dbAllocCtl
  jfs: stop using write_cache_pages
  jfs: truncate good inode pages when hard link is 0
  jfs: jfs_xtree: replace XT_GETPAGE macro with xt_getpage()
  jfs: Regular file corruption check
  jfs: upper bound check of tree index in dbAllocAG
This commit is contained in:
Linus Torvalds 2025-07-31 10:27:11 -07:00
commit 440e6d7e14
5 changed files with 96 additions and 69 deletions

View file

@ -44,6 +44,9 @@ static int jfs_open(struct inode *inode, struct file *file)
{
int rc;
if (S_ISREG(inode->i_mode) && inode->i_size < 0)
return -EIO;
if ((rc = dquot_file_open(inode, file)))
return rc;

View file

@ -145,9 +145,9 @@ void jfs_evict_inode(struct inode *inode)
if (!inode->i_nlink && !is_bad_inode(inode)) {
dquot_initialize(inode);
truncate_inode_pages_final(&inode->i_data);
if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
truncate_inode_pages_final(&inode->i_data);
if (test_cflag(COMMIT_Freewmap, inode))
jfs_free_zero_link(inode);

View file

@ -1389,6 +1389,12 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
(1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
if (ti < 0 || ti >= le32_to_cpu(dcp->nleafs)) {
jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
release_metapage(mp);
return -EIO;
}
/* dmap control page trees fan-out by 4 and a single allocation
* group may be described by 1 or 2 subtrees within the ag level
* dmap control page, depending upon the ag size. examine the ag's
@ -1809,8 +1815,10 @@ dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
return -EIO;
dp = (struct dmap *) mp->data;
if (dp->tree.budmin < 0)
if (dp->tree.budmin < 0) {
release_metapage(mp);
return -EIO;
}
/* try to allocate the blocks.
*/

View file

@ -421,7 +421,7 @@ static void metapage_write_end_io(struct bio *bio)
}
static int metapage_write_folio(struct folio *folio,
struct writeback_control *wbc, void *unused)
struct writeback_control *wbc)
{
struct bio *bio = NULL;
int block_offset; /* block offset of mp within page */
@ -550,10 +550,12 @@ static int metapage_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct blk_plug plug;
struct folio *folio = NULL;
int err;
blk_start_plug(&plug);
err = write_cache_pages(mapping, wbc, metapage_write_folio, NULL);
while ((folio = writeback_iter(mapping, wbc, folio, &err)))
err = metapage_write_folio(folio, wbc);
blk_finish_plug(&plug);
return err;
@ -813,7 +815,7 @@ static int metapage_write_one(struct folio *folio)
if (folio_clear_dirty_for_io(folio)) {
folio_get(folio);
ret = metapage_write_folio(folio, &wbc, NULL);
ret = metapage_write_folio(folio, &wbc);
if (ret == 0)
folio_wait_writeback(folio);
folio_put(folio);

View file

@ -49,26 +49,6 @@
#define XT_PAGE(IP, MP) BT_PAGE(IP, MP, xtpage_t, i_xtroot)
/* get page buffer for specified block address */
/* ToDo: Replace this ugly macro with a function */
#define XT_GETPAGE(IP, BN, MP, SIZE, P, RC) \
do { \
BT_GETPAGE(IP, BN, MP, xtpage_t, SIZE, P, RC, i_xtroot); \
if (!(RC)) { \
if ((le16_to_cpu((P)->header.nextindex) < XTENTRYSTART) || \
(le16_to_cpu((P)->header.nextindex) > \
le16_to_cpu((P)->header.maxentry)) || \
(le16_to_cpu((P)->header.maxentry) > \
(((BN) == 0) ? XTROOTMAXSLOT : PSIZE >> L2XTSLOTSIZE))) { \
jfs_error((IP)->i_sb, \
"XT_GETPAGE: xtree page corrupt\n"); \
BT_PUTPAGE(MP); \
MP = NULL; \
RC = -EIO; \
} \
} \
} while (0)
/* for consistency */
#define XT_PUTPAGE(MP) BT_PUTPAGE(MP)
@ -114,6 +94,42 @@ static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split,
static int xtSplitRoot(tid_t tid, struct inode *ip,
struct xtsplit * split, struct metapage ** rmpp);
/*
* xt_getpage()
*
* function: get the page buffer for a specified block address.
*
* parameters:
* ip - pointer to the inode
* bn - block number (s64) of the xtree page to be retrieved;
* mp - pointer to a metapage pointer where the page buffer is returned;
*
* returns:
* A pointer to the xtree page (xtpage_t) on success, -EIO on error.
*/
static inline xtpage_t *xt_getpage(struct inode *ip, s64 bn, struct metapage **mp)
{
xtpage_t *p;
int rc;
BT_GETPAGE(ip, bn, *mp, xtpage_t, PSIZE, p, rc, i_xtroot);
if (rc)
return ERR_PTR(rc);
if ((le16_to_cpu(p->header.nextindex) < XTENTRYSTART) ||
(le16_to_cpu(p->header.nextindex) >
le16_to_cpu(p->header.maxentry)) ||
(le16_to_cpu(p->header.maxentry) >
((bn == 0) ? XTROOTMAXSLOT : PSIZE >> L2XTSLOTSIZE))) {
jfs_error(ip->i_sb, "xt_getpage: xtree page corrupt\n");
BT_PUTPAGE(*mp);
*mp = NULL;
return ERR_PTR(-EIO);
}
return p;
}
/*
* xtLookup()
*
@ -216,7 +232,6 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
int *cmpp, struct btstack * btstack, int flag)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
int rc = 0;
int cmp = 1; /* init for empty page */
s64 bn; /* block number */
struct metapage *mp; /* page buffer */
@ -252,9 +267,9 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
*/
for (bn = 0;;) {
/* get/pin the page to search */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
/* try sequential access heuristics with the previous
* access entry in target leaf page:
@ -807,10 +822,10 @@ xtSplitUp(tid_t tid,
* insert router entry in parent for new right child page <rp>
*/
/* get/pin the parent page <sp> */
XT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
if (rc) {
sp = xt_getpage(ip, parent->bn, &smp);
if (IS_ERR(sp)) {
XT_PUTPAGE(rcmp);
return rc;
return PTR_ERR(sp);
}
/*
@ -1062,10 +1077,10 @@ xtSplitPage(tid_t tid, struct inode *ip,
* update previous pointer of old next/right page of <sp>
*/
if (nextbn != 0) {
XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
if (rc) {
p = xt_getpage(ip, nextbn, &mp);
if (IS_ERR(p)) {
XT_PUTPAGE(rmp);
goto clean_up;
return PTR_ERR(p);
}
BT_MARK_DIRTY(mp, ip);
@ -1417,9 +1432,9 @@ int xtExtend(tid_t tid, /* transaction id */
return rc;
/* get back old page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
/*
* if leaf root has been split, original root has been
* copied to new child page, i.e., original entry now
@ -1433,9 +1448,9 @@ int xtExtend(tid_t tid, /* transaction id */
XT_PUTPAGE(mp);
/* get new child page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@ -1711,9 +1726,9 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
return rc;
/* get back old page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
/*
* if leaf root has been split, original root has been
* copied to new child page, i.e., original entry now
@ -1727,9 +1742,9 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
XT_PUTPAGE(mp);
/* get new child page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@ -1788,9 +1803,9 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
XT_PUTPAGE(mp);
/* get new right page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@ -1864,9 +1879,9 @@ printf("xtUpdate.updateLeft.split p:0x%p\n", p);
return rc;
/* get back old page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
/*
* if leaf root has been split, original root has been
@ -1881,9 +1896,9 @@ printf("xtUpdate.updateLeft.split p:0x%p\n", p);
XT_PUTPAGE(mp);
/* get new child page */
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
@ -2187,7 +2202,6 @@ void xtInitRoot(tid_t tid, struct inode *ip)
*/
s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
{
int rc = 0;
s64 teof;
struct metapage *mp;
xtpage_t *p;
@ -2268,9 +2282,9 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
* first access of each page:
*/
getPage:
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
/* process entries backward from last index */
index = le16_to_cpu(p->header.nextindex) - 1;
@ -2506,9 +2520,9 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
/* get back the parent page */
bn = parent->bn;
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
index = parent->index;
@ -2791,9 +2805,9 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
* first access of each page:
*/
getPage:
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
/* process entries backward from last index */
index = le16_to_cpu(p->header.nextindex) - 1;
@ -2836,9 +2850,9 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
/* get back the parent page */
bn = parent->bn;
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
p = xt_getpage(ip, bn, &mp);
if (IS_ERR(p))
return PTR_ERR(p);
index = parent->index;