M7350/kernel/fs/reiserfs/stree.c

2263 lines
64 KiB
C
Raw Normal View History

2024-09-09 08:52:07 +00:00
/*
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
/*
* Written by Anatoly P. Pinchuk pap@namesys.botik.ru
* Programm System Institute
* Pereslavl-Zalessky Russia
*/
#include <linux/time.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include "reiserfs.h"
#include <linux/buffer_head.h>
#include <linux/quotaops.h>
/* Does the buffer contain a disk block which is in the tree. */
inline int B_IS_IN_TREE(const struct buffer_head *bh)
{
RFALSE(B_LEVEL(bh) > MAX_HEIGHT,
"PAP-1010: block (%b) has too big level (%z)", bh, bh);
return (B_LEVEL(bh) != FREE_LEVEL);
}
2024-09-09 08:57:42 +00:00
/* to get item head in le form */
2024-09-09 08:52:07 +00:00
inline void copy_item_head(struct item_head *to,
const struct item_head *from)
{
memcpy(to, from, IH_SIZE);
}
2024-09-09 08:57:42 +00:00
/*
* k1 is pointer to on-disk structure which is stored in little-endian
* form. k2 is pointer to cpu variable. For key of items of the same
* object this returns 0.
* Returns: -1 if key1 < key2
* 0 if key1 == key2
* 1 if key1 > key2
*/
2024-09-09 08:52:07 +00:00
inline int comp_short_keys(const struct reiserfs_key *le_key,
const struct cpu_key *cpu_key)
{
__u32 n;
n = le32_to_cpu(le_key->k_dir_id);
if (n < cpu_key->on_disk_key.k_dir_id)
return -1;
if (n > cpu_key->on_disk_key.k_dir_id)
return 1;
n = le32_to_cpu(le_key->k_objectid);
if (n < cpu_key->on_disk_key.k_objectid)
return -1;
if (n > cpu_key->on_disk_key.k_objectid)
return 1;
return 0;
}
2024-09-09 08:57:42 +00:00
/*
* k1 is pointer to on-disk structure which is stored in little-endian
* form. k2 is pointer to cpu variable.
* Compare keys using all 4 key fields.
* Returns: -1 if key1 < key2 0
* if key1 = key2 1 if key1 > key2
*/
2024-09-09 08:52:07 +00:00
static inline int comp_keys(const struct reiserfs_key *le_key,
const struct cpu_key *cpu_key)
{
int retval;
retval = comp_short_keys(le_key, cpu_key);
if (retval)
return retval;
if (le_key_k_offset(le_key_version(le_key), le_key) <
cpu_key_k_offset(cpu_key))
return -1;
if (le_key_k_offset(le_key_version(le_key), le_key) >
cpu_key_k_offset(cpu_key))
return 1;
if (cpu_key->key_length == 3)
return 0;
/* this part is needed only when tail conversion is in progress */
if (le_key_k_type(le_key_version(le_key), le_key) <
cpu_key_k_type(cpu_key))
return -1;
if (le_key_k_type(le_key_version(le_key), le_key) >
cpu_key_k_type(cpu_key))
return 1;
return 0;
}
inline int comp_short_le_keys(const struct reiserfs_key *key1,
const struct reiserfs_key *key2)
{
__u32 *k1_u32, *k2_u32;
int key_length = REISERFS_SHORT_KEY_LEN;
k1_u32 = (__u32 *) key1;
k2_u32 = (__u32 *) key2;
for (; key_length--; ++k1_u32, ++k2_u32) {
if (le32_to_cpu(*k1_u32) < le32_to_cpu(*k2_u32))
return -1;
if (le32_to_cpu(*k1_u32) > le32_to_cpu(*k2_u32))
return 1;
}
return 0;
}
inline void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from)
{
int version;
to->on_disk_key.k_dir_id = le32_to_cpu(from->k_dir_id);
to->on_disk_key.k_objectid = le32_to_cpu(from->k_objectid);
2024-09-09 08:57:42 +00:00
/* find out version of the key */
2024-09-09 08:52:07 +00:00
version = le_key_version(from);
to->version = version;
to->on_disk_key.k_offset = le_key_k_offset(version, from);
to->on_disk_key.k_type = le_key_k_type(version, from);
}
2024-09-09 08:57:42 +00:00
/*
* this does not say which one is bigger, it only returns 1 if keys
* are not equal, 0 otherwise
*/
2024-09-09 08:52:07 +00:00
inline int comp_le_keys(const struct reiserfs_key *k1,
const struct reiserfs_key *k2)
{
return memcmp(k1, k2, sizeof(struct reiserfs_key));
}
/**************************************************************************
* Binary search toolkit function *
* Search for an item in the array by the item key *
* Returns: 1 if found, 0 if not found; *
* *pos = number of the searched element if found, else the *
* number of the first element that is larger than key. *
**************************************************************************/
2024-09-09 08:57:42 +00:00
/*
* For those not familiar with binary search: lbound is the leftmost item
* that it could be, rbound the rightmost item that it could be. We examine
* the item halfway between lbound and rbound, and that tells us either
* that we can increase lbound, or decrease rbound, or that we have found it,
* or if lbound <= rbound that there are no possible items, and we have not
* found it. With each examination we cut the number of possible items it
* could be by one more than half rounded down, or we find it.
*/
2024-09-09 08:52:07 +00:00
static inline int bin_search(const void *key, /* Key to search for. */
const void *base, /* First item in the array. */
int num, /* Number of items in the array. */
2024-09-09 08:57:42 +00:00
/*
* Item size in the array. searched. Lest the
* reader be confused, note that this is crafted
* as a general function, and when it is applied
* specifically to the array of item headers in a
* node, width is actually the item header size
* not the item size.
*/
int width,
2024-09-09 08:52:07 +00:00
int *pos /* Number of the searched for element. */
)
{
int rbound, lbound, j;
for (j = ((rbound = num - 1) + (lbound = 0)) / 2;
lbound <= rbound; j = (rbound + lbound) / 2)
switch (comp_keys
((struct reiserfs_key *)((char *)base + j * width),
(struct cpu_key *)key)) {
case -1:
lbound = j + 1;
continue;
case 1:
rbound = j - 1;
continue;
case 0:
*pos = j;
return ITEM_FOUND; /* Key found in the array. */
}
2024-09-09 08:57:42 +00:00
/*
* bin_search did not find given key, it returns position of key,
* that is minimal and greater than the given one.
*/
2024-09-09 08:52:07 +00:00
*pos = lbound;
return ITEM_NOT_FOUND;
}
/* Minimal possible key. It is never in the tree. */
const struct reiserfs_key MIN_KEY = { 0, 0, {{0, 0},} };
/* Maximal possible key. It is never in the tree. */
static const struct reiserfs_key MAX_KEY = {
2024-09-09 08:57:42 +00:00
cpu_to_le32(0xffffffff),
cpu_to_le32(0xffffffff),
{{cpu_to_le32(0xffffffff),
cpu_to_le32(0xffffffff)},}
2024-09-09 08:52:07 +00:00
};
2024-09-09 08:57:42 +00:00
/*
* Get delimiting key of the buffer by looking for it in the buffers in the
* path, starting from the bottom of the path, and going upwards. We must
* check the path's validity at each step. If the key is not in the path,
* there is no delimiting key in the tree (buffer is first or last buffer
* in tree), and in this case we return a special key, either MIN_KEY or
* MAX_KEY.
*/
2024-09-09 08:52:07 +00:00
static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_path,
const struct super_block *sb)
{
int position, path_offset = chk_path->path_length;
struct buffer_head *parent;
RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET,
"PAP-5010: invalid offset in the path");
/* While not higher in path than first element. */
while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
RFALSE(!buffer_uptodate
(PATH_OFFSET_PBUFFER(chk_path, path_offset)),
"PAP-5020: parent is not uptodate");
/* Parent at the path is not in the tree now. */
if (!B_IS_IN_TREE
(parent =
PATH_OFFSET_PBUFFER(chk_path, path_offset)))
return &MAX_KEY;
/* Check whether position in the parent is correct. */
if ((position =
PATH_OFFSET_POSITION(chk_path,
path_offset)) >
B_NR_ITEMS(parent))
return &MAX_KEY;
/* Check whether parent at the path really points to the child. */
if (B_N_CHILD_NUM(parent, position) !=
PATH_OFFSET_PBUFFER(chk_path,
path_offset + 1)->b_blocknr)
return &MAX_KEY;
2024-09-09 08:57:42 +00:00
/*
* Return delimiting key if position in the parent
* is not equal to zero.
*/
2024-09-09 08:52:07 +00:00
if (position)
2024-09-09 08:57:42 +00:00
return internal_key(parent, position - 1);
2024-09-09 08:52:07 +00:00
}
/* Return MIN_KEY if we are in the root of the buffer tree. */
if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
b_blocknr == SB_ROOT_BLOCK(sb))
return &MIN_KEY;
return &MAX_KEY;
}
/* Get delimiting key of the buffer at the path and its right neighbor. */
inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
const struct super_block *sb)
{
int position, path_offset = chk_path->path_length;
struct buffer_head *parent;
RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET,
"PAP-5030: invalid offset in the path");
while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
RFALSE(!buffer_uptodate
(PATH_OFFSET_PBUFFER(chk_path, path_offset)),
"PAP-5040: parent is not uptodate");
/* Parent at the path is not in the tree now. */
if (!B_IS_IN_TREE
(parent =
PATH_OFFSET_PBUFFER(chk_path, path_offset)))
return &MIN_KEY;
/* Check whether position in the parent is correct. */
if ((position =
PATH_OFFSET_POSITION(chk_path,
path_offset)) >
B_NR_ITEMS(parent))
return &MIN_KEY;
2024-09-09 08:57:42 +00:00
/*
* Check whether parent at the path really points
* to the child.
*/
2024-09-09 08:52:07 +00:00
if (B_N_CHILD_NUM(parent, position) !=
PATH_OFFSET_PBUFFER(chk_path,
path_offset + 1)->b_blocknr)
return &MIN_KEY;
2024-09-09 08:57:42 +00:00
/*
* Return delimiting key if position in the parent
* is not the last one.
*/
2024-09-09 08:52:07 +00:00
if (position != B_NR_ITEMS(parent))
2024-09-09 08:57:42 +00:00
return internal_key(parent, position);
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
2024-09-09 08:52:07 +00:00
/* Return MAX_KEY if we are in the root of the buffer tree. */
if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
b_blocknr == SB_ROOT_BLOCK(sb))
return &MAX_KEY;
return &MIN_KEY;
}
2024-09-09 08:57:42 +00:00
/*
* Check whether a key is contained in the tree rooted from a buffer at a path.
* This works by looking at the left and right delimiting keys for the buffer
* in the last path_element in the path. These delimiting keys are stored
* at least one level above that buffer in the tree. If the buffer is the
* first or last node in the tree order then one of the delimiting keys may
* be absent, and in this case get_lkey and get_rkey return a special key
* which is MIN_KEY or MAX_KEY.
*/
static inline int key_in_buffer(
/* Path which should be checked. */
struct treepath *chk_path,
/* Key which should be checked. */
const struct cpu_key *key,
2024-09-09 08:52:07 +00:00
struct super_block *sb
)
{
RFALSE(!key || chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET
|| chk_path->path_length > MAX_HEIGHT,
"PAP-5050: pointer to the key(%p) is NULL or invalid path length(%d)",
key, chk_path->path_length);
RFALSE(!PATH_PLAST_BUFFER(chk_path)->b_bdev,
"PAP-5060: device must not be NODEV");
if (comp_keys(get_lkey(chk_path, sb), key) == 1)
/* left delimiting key is bigger, that the key we look for */
return 0;
/* if ( comp_keys(key, get_rkey(chk_path, sb)) != -1 ) */
if (comp_keys(get_rkey(chk_path, sb), key) != 1)
/* key must be less than right delimitiing key */
return 0;
return 1;
}
int reiserfs_check_path(struct treepath *p)
{
RFALSE(p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET,
"path not properly relsed");
return 0;
}
2024-09-09 08:57:42 +00:00
/*
* Drop the reference to each buffer in a path and restore
2024-09-09 08:52:07 +00:00
* dirty bits clean when preparing the buffer for the log.
2024-09-09 08:57:42 +00:00
* This version should only be called from fix_nodes()
*/
2024-09-09 08:52:07 +00:00
void pathrelse_and_restore(struct super_block *sb,
struct treepath *search_path)
{
int path_offset = search_path->path_length;
RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
"clm-4000: invalid path offset");
while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) {
struct buffer_head *bh;
bh = PATH_OFFSET_PBUFFER(search_path, path_offset--);
reiserfs_restore_prepared_buffer(sb, bh);
brelse(bh);
}
search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
}
/* Drop the reference to each buffer in a path */
void pathrelse(struct treepath *search_path)
{
int path_offset = search_path->path_length;
RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
"PAP-5090: invalid path offset");
while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET)
brelse(PATH_OFFSET_PBUFFER(search_path, path_offset--));
search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
}
static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
{
struct block_head *blkh;
struct item_head *ih;
int used_space;
int prev_location;
int i;
int nr;
blkh = (struct block_head *)buf;
if (blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) {
reiserfs_warning(NULL, "reiserfs-5080",
"this should be caught earlier");
return 0;
}
nr = blkh_nr_item(blkh);
if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) {
/* item number is too big or too small */
reiserfs_warning(NULL, "reiserfs-5081",
"nr_item seems wrong: %z", bh);
return 0;
}
ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1;
used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih));
2024-09-09 08:57:42 +00:00
/* free space does not match to calculated amount of use space */
2024-09-09 08:52:07 +00:00
if (used_space != blocksize - blkh_free_space(blkh)) {
reiserfs_warning(NULL, "reiserfs-5082",
"free space seems wrong: %z", bh);
return 0;
}
2024-09-09 08:57:42 +00:00
/*
* FIXME: it is_leaf will hit performance too much - we may have
* return 1 here
*/
2024-09-09 08:52:07 +00:00
/* check tables of item heads */
ih = (struct item_head *)(buf + BLKH_SIZE);
prev_location = blocksize;
for (i = 0; i < nr; i++, ih++) {
if (le_ih_k_type(ih) == TYPE_ANY) {
reiserfs_warning(NULL, "reiserfs-5083",
"wrong item type for item %h",
ih);
return 0;
}
if (ih_location(ih) >= blocksize
|| ih_location(ih) < IH_SIZE * nr) {
reiserfs_warning(NULL, "reiserfs-5084",
"item location seems wrong: %h",
ih);
return 0;
}
if (ih_item_len(ih) < 1
|| ih_item_len(ih) > MAX_ITEM_LEN(blocksize)) {
reiserfs_warning(NULL, "reiserfs-5085",
"item length seems wrong: %h",
ih);
return 0;
}
if (prev_location - ih_location(ih) != ih_item_len(ih)) {
reiserfs_warning(NULL, "reiserfs-5086",
"item location seems wrong "
"(second one): %h", ih);
return 0;
}
prev_location = ih_location(ih);
}
2024-09-09 08:57:42 +00:00
/* one may imagine many more checks */
2024-09-09 08:52:07 +00:00
return 1;
}
/* returns 1 if buf looks like an internal node, 0 otherwise */
static int is_internal(char *buf, int blocksize, struct buffer_head *bh)
{
struct block_head *blkh;
int nr;
int used_space;
blkh = (struct block_head *)buf;
nr = blkh_level(blkh);
if (nr <= DISK_LEAF_NODE_LEVEL || nr > MAX_HEIGHT) {
/* this level is not possible for internal nodes */
reiserfs_warning(NULL, "reiserfs-5087",
"this should be caught earlier");
return 0;
}
nr = blkh_nr_item(blkh);
2024-09-09 08:57:42 +00:00
/* for internal which is not root we might check min number of keys */
2024-09-09 08:52:07 +00:00
if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) {
reiserfs_warning(NULL, "reiserfs-5088",
"number of key seems wrong: %z", bh);
return 0;
}
used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1);
if (used_space != blocksize - blkh_free_space(blkh)) {
reiserfs_warning(NULL, "reiserfs-5089",
"free space seems wrong: %z", bh);
return 0;
}
2024-09-09 08:57:42 +00:00
/* one may imagine many more checks */
2024-09-09 08:52:07 +00:00
return 1;
}
2024-09-09 08:57:42 +00:00
/*
* make sure that bh contains formatted node of reiserfs tree of
* 'level'-th level
*/
2024-09-09 08:52:07 +00:00
static int is_tree_node(struct buffer_head *bh, int level)
{
if (B_LEVEL(bh) != level) {
reiserfs_warning(NULL, "reiserfs-5090", "node level %d does "
"not match to the expected one %d",
B_LEVEL(bh), level);
return 0;
}
if (level == DISK_LEAF_NODE_LEVEL)
return is_leaf(bh->b_data, bh->b_size, bh);
return is_internal(bh->b_data, bh->b_size, bh);
}
#define SEARCH_BY_KEY_READA 16
/*
* The function is NOT SCHEDULE-SAFE!
* It might unlock the write lock if we needed to wait for a block
* to be read. Note that in this case it won't recover the lock to avoid
* high contention resulting from too much lock requests, especially
* the caller (search_by_key) will perform other schedule-unsafe
* operations just after calling this function.
*
2024-09-09 08:57:42 +00:00
* @return depth of lock to be restored after read completes
2024-09-09 08:52:07 +00:00
*/
2024-09-09 08:57:42 +00:00
static int search_by_key_reada(struct super_block *s,
2024-09-09 08:52:07 +00:00
struct buffer_head **bh,
b_blocknr_t *b, int num)
{
int i, j;
2024-09-09 08:57:42 +00:00
int depth = -1;
2024-09-09 08:52:07 +00:00
for (i = 0; i < num; i++) {
bh[i] = sb_getblk(s, b[i]);
}
/*
* We are going to read some blocks on which we
* have a reference. It's safe, though we might be
* reading blocks concurrently changed if we release
* the lock. But it's still fine because we check later
* if the tree changed
*/
for (j = 0; j < i; j++) {
/*
* note, this needs attention if we are getting rid of the BKL
2024-09-09 08:57:42 +00:00
* you have to make sure the prepared bit isn't set on this
* buffer
2024-09-09 08:52:07 +00:00
*/
if (!buffer_uptodate(bh[j])) {
2024-09-09 08:57:42 +00:00
if (depth == -1)
depth = reiserfs_write_unlock_nested(s);
2024-09-09 08:52:07 +00:00
ll_rw_block(READA, 1, bh + j);
}
brelse(bh[j]);
}
2024-09-09 08:57:42 +00:00
return depth;
2024-09-09 08:52:07 +00:00
}
2024-09-09 08:57:42 +00:00
/*
* This function fills up the path from the root to the leaf as it
* descends the tree looking for the key. It uses reiserfs_bread to
* try to find buffers in the cache given their block number. If it
* does not find them in the cache it reads them from disk. For each
* node search_by_key finds using reiserfs_bread it then uses
* bin_search to look through that node. bin_search will find the
* position of the block_number of the next node if it is looking
* through an internal node. If it is looking through a leaf node
* bin_search will find the position of the item which has key either
* equal to given key, or which is the maximal key less than the given
* key. search_by_key returns a path that must be checked for the
* correctness of the top of the path but need not be checked for the
* correctness of the bottom of the path
*/
/*
* search_by_key - search for key (and item) in stree
* @sb: superblock
* @key: pointer to key to search for
* @search_path: Allocated and initialized struct treepath; Returned filled
* on success.
* @stop_level: How far down the tree to search, Use DISK_LEAF_NODE_LEVEL to
* stop at leaf level.
*
* The function is NOT SCHEDULE-SAFE!
*/
int search_by_key(struct super_block *sb, const struct cpu_key *key,
struct treepath *search_path, int stop_level)
2024-09-09 08:52:07 +00:00
{
b_blocknr_t block_number;
int expected_level;
struct buffer_head *bh;
struct path_element *last_element;
int node_level, retval;
int right_neighbor_of_leaf_node;
int fs_gen;
struct buffer_head *reada_bh[SEARCH_BY_KEY_READA];
b_blocknr_t reada_blocks[SEARCH_BY_KEY_READA];
int reada_count = 0;
#ifdef CONFIG_REISERFS_CHECK
int repeat_counter = 0;
#endif
PROC_INFO_INC(sb, search_by_key);
2024-09-09 08:57:42 +00:00
/*
* As we add each node to a path we increase its count. This means
* that we must be careful to release all nodes in a path before we
* either discard the path struct or re-use the path struct, as we
* do here.
*/
2024-09-09 08:52:07 +00:00
pathrelse(search_path);
right_neighbor_of_leaf_node = 0;
2024-09-09 08:57:42 +00:00
/*
* With each iteration of this loop we search through the items in the
* current node, and calculate the next current node(next path element)
* for the next iteration of this loop..
*/
2024-09-09 08:52:07 +00:00
block_number = SB_ROOT_BLOCK(sb);
expected_level = -1;
while (1) {
#ifdef CONFIG_REISERFS_CHECK
if (!(++repeat_counter % 50000))
reiserfs_warning(sb, "PAP-5100",
"%s: there were %d iterations of "
"while loop looking for key %K",
current->comm, repeat_counter,
key);
#endif
/* prep path to have another element added to it. */
last_element =
PATH_OFFSET_PELEMENT(search_path,
++search_path->path_length);
fs_gen = get_generation(sb);
2024-09-09 08:57:42 +00:00
/*
* Read the next tree node, and set the last element
* in the path to have a pointer to it.
*/
2024-09-09 08:52:07 +00:00
if ((bh = last_element->pe_buffer =
sb_getblk(sb, block_number))) {
/*
2024-09-09 08:57:42 +00:00
* We'll need to drop the lock if we encounter any
* buffers that need to be read. If all of them are
* already up to date, we don't need to drop the lock.
2024-09-09 08:52:07 +00:00
*/
2024-09-09 08:57:42 +00:00
int depth = -1;
if (!buffer_uptodate(bh) && reada_count > 1)
depth = search_by_key_reada(sb, reada_bh,
reada_blocks, reada_count);
if (!buffer_uptodate(bh) && depth == -1)
depth = reiserfs_write_unlock_nested(sb);
2024-09-09 08:52:07 +00:00
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
2024-09-09 08:57:42 +00:00
if (depth != -1)
reiserfs_write_lock_nested(sb, depth);
2024-09-09 08:52:07 +00:00
if (!buffer_uptodate(bh))
goto io_error;
} else {
2024-09-09 08:57:42 +00:00
io_error:
2024-09-09 08:52:07 +00:00
search_path->path_length--;
pathrelse(search_path);
return IO_ERROR;
}
reada_count = 0;
if (expected_level == -1)
expected_level = SB_TREE_HEIGHT(sb);
expected_level--;
2024-09-09 08:57:42 +00:00
/*
* It is possible that schedule occurred. We must check
* whether the key to search is still in the tree rooted
* from the current buffer. If not then repeat search
* from the root.
*/
2024-09-09 08:52:07 +00:00
if (fs_changed(fs_gen, sb) &&
(!B_IS_IN_TREE(bh) ||
B_LEVEL(bh) != expected_level ||
!key_in_buffer(search_path, key, sb))) {
PROC_INFO_INC(sb, search_by_key_fs_changed);
PROC_INFO_INC(sb, search_by_key_restarted);
PROC_INFO_INC(sb,
sbk_restarted[expected_level - 1]);
pathrelse(search_path);
2024-09-09 08:57:42 +00:00
/*
* Get the root block number so that we can
* repeat the search starting from the root.
*/
2024-09-09 08:52:07 +00:00
block_number = SB_ROOT_BLOCK(sb);
expected_level = -1;
right_neighbor_of_leaf_node = 0;
/* repeat search from the root */
continue;
}
2024-09-09 08:57:42 +00:00
/*
* only check that the key is in the buffer if key is not
* equal to the MAX_KEY. Latter case is only possible in
* "finish_unfinished()" processing during mount.
*/
2024-09-09 08:52:07 +00:00
RFALSE(comp_keys(&MAX_KEY, key) &&
!key_in_buffer(search_path, key, sb),
"PAP-5130: key is not in the buffer");
#ifdef CONFIG_REISERFS_CHECK
if (REISERFS_SB(sb)->cur_tb) {
print_cur_tb("5140");
reiserfs_panic(sb, "PAP-5140",
"schedule occurred in do_balance!");
}
#endif
2024-09-09 08:57:42 +00:00
/*
* make sure, that the node contents look like a node of
* certain level
*/
2024-09-09 08:52:07 +00:00
if (!is_tree_node(bh, expected_level)) {
reiserfs_error(sb, "vs-5150",
"invalid format found in block %ld. "
"Fsck?", bh->b_blocknr);
pathrelse(search_path);
return IO_ERROR;
}
/* ok, we have acquired next formatted node in the tree */
node_level = B_LEVEL(bh);
PROC_INFO_BH_STAT(sb, bh, node_level - 1);
RFALSE(node_level < stop_level,
"vs-5152: tree level (%d) is less than stop level (%d)",
node_level, stop_level);
2024-09-09 08:57:42 +00:00
retval = bin_search(key, item_head(bh, 0),
2024-09-09 08:52:07 +00:00
B_NR_ITEMS(bh),
(node_level ==
DISK_LEAF_NODE_LEVEL) ? IH_SIZE :
KEY_SIZE,
2024-09-09 08:57:42 +00:00
&last_element->pe_position);
2024-09-09 08:52:07 +00:00
if (node_level == stop_level) {
return retval;
}
/* we are not in the stop level */
2024-09-09 08:57:42 +00:00
/*
* item has been found, so we choose the pointer which
* is to the right of the found one
*/
2024-09-09 08:52:07 +00:00
if (retval == ITEM_FOUND)
last_element->pe_position++;
2024-09-09 08:57:42 +00:00
/*
* if item was not found we choose the position which is to
* the left of the found item. This requires no code,
* bin_search did it already.
*/
2024-09-09 08:52:07 +00:00
2024-09-09 08:57:42 +00:00
/*
* So we have chosen a position in the current node which is
* an internal node. Now we calculate child block number by
* position in the node.
*/
2024-09-09 08:52:07 +00:00
block_number =
B_N_CHILD_NUM(bh, last_element->pe_position);
2024-09-09 08:57:42 +00:00
/*
* if we are going to read leaf nodes, try for read
* ahead as well
*/
2024-09-09 08:52:07 +00:00
if ((search_path->reada & PATH_READA) &&
node_level == DISK_LEAF_NODE_LEVEL + 1) {
int pos = last_element->pe_position;
int limit = B_NR_ITEMS(bh);
struct reiserfs_key *le_key;
if (search_path->reada & PATH_READA_BACK)
limit = 0;
while (reada_count < SEARCH_BY_KEY_READA) {
if (pos == limit)
break;
reada_blocks[reada_count++] =
B_N_CHILD_NUM(bh, pos);
if (search_path->reada & PATH_READA_BACK)
pos--;
else
pos++;
/*
* check to make sure we're in the same object
*/
2024-09-09 08:57:42 +00:00
le_key = internal_key(bh, pos);
2024-09-09 08:52:07 +00:00
if (le32_to_cpu(le_key->k_objectid) !=
key->on_disk_key.k_objectid) {
break;
}
}
}
}
}
2024-09-09 08:57:42 +00:00
/*
* Form the path to an item and position in this item which contains
* file byte defined by key. If there is no such item
* corresponding to the key, we point the path to the item with
* maximal key less than key, and *pos_in_item is set to one
* past the last entry/byte in the item. If searching for entry in a
* directory item, and it is not found, *pos_in_item is set to one
* entry more than the entry with maximal key which is less than the
* sought key.
*
* Note that if there is no entry in this same node which is one more,
* then we point to an imaginary entry. for direct items, the
* position is in units of bytes, for indirect items the position is
* in units of blocknr entries, for directory items the position is in
* units of directory entries.
*/
2024-09-09 08:52:07 +00:00
/* The function is NOT SCHEDULE-SAFE! */
2024-09-09 08:57:42 +00:00
int search_for_position_by_key(struct super_block *sb,
/* Key to search (cpu variable) */
const struct cpu_key *p_cpu_key,
/* Filled up by this function. */
struct treepath *search_path)
2024-09-09 08:52:07 +00:00
{
struct item_head *p_le_ih; /* pointer to on-disk structure */
int blk_size;
loff_t item_offset, offset;
struct reiserfs_dir_entry de;
int retval;
/* If searching for directory entry. */
if (is_direntry_cpu_key(p_cpu_key))
return search_by_entry_key(sb, p_cpu_key, search_path,
&de);
/* If not searching for directory entry. */
/* If item is found. */
retval = search_item(sb, p_cpu_key, search_path);
if (retval == IO_ERROR)
return retval;
if (retval == ITEM_FOUND) {
RFALSE(!ih_item_len
2024-09-09 08:57:42 +00:00
(item_head
2024-09-09 08:52:07 +00:00
(PATH_PLAST_BUFFER(search_path),
PATH_LAST_POSITION(search_path))),
"PAP-5165: item length equals zero");
pos_in_item(search_path) = 0;
return POSITION_FOUND;
}
RFALSE(!PATH_LAST_POSITION(search_path),
"PAP-5170: position equals zero");
/* Item is not found. Set path to the previous item. */
p_le_ih =
2024-09-09 08:57:42 +00:00
item_head(PATH_PLAST_BUFFER(search_path),
2024-09-09 08:52:07 +00:00
--PATH_LAST_POSITION(search_path));
blk_size = sb->s_blocksize;
2024-09-09 08:57:42 +00:00
if (comp_short_keys(&p_le_ih->ih_key, p_cpu_key))
2024-09-09 08:52:07 +00:00
return FILE_NOT_FOUND;
2024-09-09 08:57:42 +00:00
/* FIXME: quite ugly this far */
2024-09-09 08:52:07 +00:00
item_offset = le_ih_k_offset(p_le_ih);
offset = cpu_key_k_offset(p_cpu_key);
/* Needed byte is contained in the item pointed to by the path. */
if (item_offset <= offset &&
item_offset + op_bytes_number(p_le_ih, blk_size) > offset) {
pos_in_item(search_path) = offset - item_offset;
if (is_indirect_le_ih(p_le_ih)) {
pos_in_item(search_path) /= blk_size;
}
return POSITION_FOUND;
}
2024-09-09 08:57:42 +00:00
/*
* Needed byte is not contained in the item pointed to by the
* path. Set pos_in_item out of the item.
*/
2024-09-09 08:52:07 +00:00
if (is_indirect_le_ih(p_le_ih))
pos_in_item(search_path) =
ih_item_len(p_le_ih) / UNFM_P_SIZE;
else
pos_in_item(search_path) = ih_item_len(p_le_ih);
return POSITION_NOT_FOUND;
}
/* Compare given item and item pointed to by the path. */
int comp_items(const struct item_head *stored_ih, const struct treepath *path)
{
struct buffer_head *bh = PATH_PLAST_BUFFER(path);
struct item_head *ih;
/* Last buffer at the path is not in the tree. */
if (!B_IS_IN_TREE(bh))
return 1;
/* Last path position is invalid. */
if (PATH_LAST_POSITION(path) >= B_NR_ITEMS(bh))
return 1;
/* we need only to know, whether it is the same item */
2024-09-09 08:57:42 +00:00
ih = tp_item_head(path);
2024-09-09 08:52:07 +00:00
return memcmp(stored_ih, ih, IH_SIZE);
}
2024-09-09 08:57:42 +00:00
/* unformatted nodes are not logged anymore, ever. This is safe now */
2024-09-09 08:52:07 +00:00
#define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1)
2024-09-09 08:57:42 +00:00
/* block can not be forgotten as it is in I/O or held by someone */
2024-09-09 08:52:07 +00:00
#define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh)))
2024-09-09 08:57:42 +00:00
/* prepare for delete or cut of direct item */
2024-09-09 08:52:07 +00:00
static inline int prepare_for_direct_item(struct treepath *path,
struct item_head *le_ih,
struct inode *inode,
loff_t new_file_length, int *cut_size)
{
loff_t round_len;
if (new_file_length == max_reiserfs_offset(inode)) {
/* item has to be deleted */
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE;
}
2024-09-09 08:57:42 +00:00
/* new file gets truncated */
2024-09-09 08:52:07 +00:00
if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
round_len = ROUND_UP(new_file_length);
/* this was new_file_length < le_ih ... */
if (round_len < le_ih_k_offset(le_ih)) {
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE; /* Delete this item. */
}
/* Calculate first position and size for cutting from item. */
pos_in_item(path) = round_len - (le_ih_k_offset(le_ih) - 1);
*cut_size = -(ih_item_len(le_ih) - pos_in_item(path));
return M_CUT; /* Cut from this item. */
}
2024-09-09 08:57:42 +00:00
/* old file: items may have any length */
2024-09-09 08:52:07 +00:00
if (new_file_length < le_ih_k_offset(le_ih)) {
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE; /* Delete this item. */
}
2024-09-09 08:57:42 +00:00
2024-09-09 08:52:07 +00:00
/* Calculate first position and size for cutting from item. */
*cut_size = -(ih_item_len(le_ih) -
(pos_in_item(path) =
new_file_length + 1 - le_ih_k_offset(le_ih)));
return M_CUT; /* Cut from this item. */
}
static inline int prepare_for_direntry_item(struct treepath *path,
struct item_head *le_ih,
struct inode *inode,
loff_t new_file_length,
int *cut_size)
{
if (le_ih_k_offset(le_ih) == DOT_OFFSET &&
new_file_length == max_reiserfs_offset(inode)) {
RFALSE(ih_entry_count(le_ih) != 2,
"PAP-5220: incorrect empty directory item (%h)", le_ih);
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
2024-09-09 08:57:42 +00:00
/* Delete the directory item containing "." and ".." entry. */
return M_DELETE;
2024-09-09 08:52:07 +00:00
}
if (ih_entry_count(le_ih) == 1) {
2024-09-09 08:57:42 +00:00
/*
* Delete the directory item such as there is one record only
* in this item
*/
2024-09-09 08:52:07 +00:00
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE;
}
/* Cut one record from the directory item. */
*cut_size =
-(DEH_SIZE +
entry_length(get_last_bh(path), le_ih, pos_in_item(path)));
return M_CUT;
}
#define JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD (2 * JOURNAL_PER_BALANCE_CNT + 1)
2024-09-09 08:57:42 +00:00
/*
* If the path points to a directory or direct item, calculate mode
* and the size cut, for balance.
* If the path points to an indirect item, remove some number of its
* unformatted nodes.
* In case of file truncate calculate whether this item must be
* deleted/truncated or last unformatted node of this item will be
* converted to a direct item.
* This function returns a determination of what balance mode the
* calling function should employ.
*/
static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th,
struct inode *inode,
struct treepath *path,
const struct cpu_key *item_key,
/*
* Number of unformatted nodes
* which were removed from end
* of the file.
*/
int *removed,
int *cut_size,
/* MAX_KEY_OFFSET in case of delete. */
unsigned long long new_file_length
2024-09-09 08:52:07 +00:00
)
{
struct super_block *sb = inode->i_sb;
2024-09-09 08:57:42 +00:00
struct item_head *p_le_ih = tp_item_head(path);
2024-09-09 08:52:07 +00:00
struct buffer_head *bh = PATH_PLAST_BUFFER(path);
BUG_ON(!th->t_trans_id);
/* Stat_data item. */
if (is_statdata_le_ih(p_le_ih)) {
RFALSE(new_file_length != max_reiserfs_offset(inode),
"PAP-5210: mode must be M_DELETE");
*cut_size = -(IH_SIZE + ih_item_len(p_le_ih));
return M_DELETE;
}
/* Directory item. */
if (is_direntry_le_ih(p_le_ih))
return prepare_for_direntry_item(path, p_le_ih, inode,
new_file_length,
cut_size);
/* Direct item. */
if (is_direct_le_ih(p_le_ih))
return prepare_for_direct_item(path, p_le_ih, inode,
new_file_length, cut_size);
/* Case of an indirect item. */
{
int blk_size = sb->s_blocksize;
struct item_head s_ih;
int need_re_search;
int delete = 0;
int result = M_CUT;
int pos = 0;
if ( new_file_length == max_reiserfs_offset (inode) ) {
2024-09-09 08:57:42 +00:00
/*
* prepare_for_delete_or_cut() is called by
* reiserfs_delete_item()
*/
2024-09-09 08:52:07 +00:00
new_file_length = 0;
delete = 1;
}
do {
need_re_search = 0;
*cut_size = 0;
bh = PATH_PLAST_BUFFER(path);
2024-09-09 08:57:42 +00:00
copy_item_head(&s_ih, tp_item_head(path));
2024-09-09 08:52:07 +00:00
pos = I_UNFM_NUM(&s_ih);
while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > new_file_length) {
__le32 *unfm;
__u32 block;
2024-09-09 08:57:42 +00:00
/*
* Each unformatted block deletion may involve
* one additional bitmap block into the transaction,
* thereby the initial journal space reservation
* might not be enough.
*/
2024-09-09 08:52:07 +00:00
if (!delete && (*cut_size) != 0 &&
reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD)
break;
2024-09-09 08:57:42 +00:00
unfm = (__le32 *)ih_item_body(bh, &s_ih) + pos - 1;
2024-09-09 08:52:07 +00:00
block = get_block_num(unfm, 0);
if (block != 0) {
reiserfs_prepare_for_journal(sb, bh, 1);
put_block_num(unfm, 0, 0);
2024-09-09 08:57:42 +00:00
journal_mark_dirty(th, bh);
2024-09-09 08:52:07 +00:00
reiserfs_free_block(th, inode, block, 1);
}
2024-09-09 08:57:42 +00:00
reiserfs_cond_resched(sb);
2024-09-09 08:52:07 +00:00
if (item_moved (&s_ih, path)) {
need_re_search = 1;
break;
}
pos --;
(*removed)++;
(*cut_size) -= UNFM_P_SIZE;
if (pos == 0) {
(*cut_size) -= IH_SIZE;
result = M_DELETE;
break;
}
}
2024-09-09 08:57:42 +00:00
/*
* a trick. If the buffer has been logged, this will
* do nothing. If we've broken the loop without logging
* it, it will restore the buffer
*/
2024-09-09 08:52:07 +00:00
reiserfs_restore_prepared_buffer(sb, bh);
} while (need_re_search &&
search_for_position_by_key(sb, item_key, path) == POSITION_FOUND);
pos_in_item(path) = pos * UNFM_P_SIZE;
if (*cut_size == 0) {
2024-09-09 08:57:42 +00:00
/*
* Nothing was cut. maybe convert last unformatted node to the
* direct item?
*/
2024-09-09 08:52:07 +00:00
result = M_CONVERT;
}
return result;
}
}
/* Calculate number of bytes which will be deleted or cut during balance */
static int calc_deleted_bytes_number(struct tree_balance *tb, char mode)
{
int del_size;
2024-09-09 08:57:42 +00:00
struct item_head *p_le_ih = tp_item_head(tb->tb_path);
2024-09-09 08:52:07 +00:00
if (is_statdata_le_ih(p_le_ih))
return 0;
del_size =
(mode ==
M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0];
if (is_direntry_le_ih(p_le_ih)) {
2024-09-09 08:57:42 +00:00
/*
* return EMPTY_DIR_SIZE; We delete emty directories only.
* we can't use EMPTY_DIR_SIZE, as old format dirs have a
* different empty size. ick. FIXME, is this right?
*/
2024-09-09 08:52:07 +00:00
return del_size;
}
if (is_indirect_le_ih(p_le_ih))
del_size = (del_size / UNFM_P_SIZE) *
(PATH_PLAST_BUFFER(tb->tb_path)->b_size);
return del_size;
}
static void init_tb_struct(struct reiserfs_transaction_handle *th,
struct tree_balance *tb,
struct super_block *sb,
struct treepath *path, int size)
{
BUG_ON(!th->t_trans_id);
memset(tb, '\0', sizeof(struct tree_balance));
tb->transaction_handle = th;
tb->tb_sb = sb;
tb->tb_path = path;
PATH_OFFSET_PBUFFER(path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
PATH_OFFSET_POSITION(path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
tb->insert_size[0] = size;
}
void padd_item(char *item, int total_length, int length)
{
int i;
for (i = total_length; i > length;)
item[--i] = 0;
}
#ifdef REISERQUOTA_DEBUG
char key2type(struct reiserfs_key *ih)
{
if (is_direntry_le_key(2, ih))
return 'd';
if (is_direct_le_key(2, ih))
return 'D';
if (is_indirect_le_key(2, ih))
return 'i';
if (is_statdata_le_key(2, ih))
return 's';
return 'u';
}
char head2type(struct item_head *ih)
{
if (is_direntry_le_ih(ih))
return 'd';
if (is_direct_le_ih(ih))
return 'D';
if (is_indirect_le_ih(ih))
return 'i';
if (is_statdata_le_ih(ih))
return 's';
return 'u';
}
#endif
2024-09-09 08:57:42 +00:00
/*
* Delete object item.
2024-09-09 08:52:07 +00:00
* th - active transaction handle
* path - path to the deleted item
* item_key - key to search for the deleted item
* indode - used for updating i_blocks and quotas
* un_bh - NULL or unformatted node pointer
*/
int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
struct treepath *path, const struct cpu_key *item_key,
struct inode *inode, struct buffer_head *un_bh)
{
struct super_block *sb = inode->i_sb;
struct tree_balance s_del_balance;
struct item_head s_ih;
struct item_head *q_ih;
int quota_cut_bytes;
int ret_value, del_size, removed;
2024-09-09 08:57:42 +00:00
int depth;
2024-09-09 08:52:07 +00:00
#ifdef CONFIG_REISERFS_CHECK
char mode;
int iter = 0;
#endif
BUG_ON(!th->t_trans_id);
init_tb_struct(th, &s_del_balance, sb, path,
0 /*size is unknown */ );
while (1) {
removed = 0;
#ifdef CONFIG_REISERFS_CHECK
iter++;
mode =
#endif
prepare_for_delete_or_cut(th, inode, path,
item_key, &removed,
&del_size,
max_reiserfs_offset(inode));
RFALSE(mode != M_DELETE, "PAP-5320: mode must be M_DELETE");
2024-09-09 08:57:42 +00:00
copy_item_head(&s_ih, tp_item_head(path));
2024-09-09 08:52:07 +00:00
s_del_balance.insert_size[0] = del_size;
ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL);
if (ret_value != REPEAT_SEARCH)
break;
PROC_INFO_INC(sb, delete_item_restarted);
2024-09-09 08:57:42 +00:00
/* file system changed, repeat search */
2024-09-09 08:52:07 +00:00
ret_value =
search_for_position_by_key(sb, item_key, path);
if (ret_value == IO_ERROR)
break;
if (ret_value == FILE_NOT_FOUND) {
reiserfs_warning(sb, "vs-5340",
"no items of the file %K found",
item_key);
break;
}
} /* while (1) */
if (ret_value != CARRY_ON) {
unfix_nodes(&s_del_balance);
return 0;
}
2024-09-09 08:57:42 +00:00
/* reiserfs_delete_item returns item length when success */
2024-09-09 08:52:07 +00:00
ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
2024-09-09 08:57:42 +00:00
q_ih = tp_item_head(path);
2024-09-09 08:52:07 +00:00
quota_cut_bytes = ih_item_len(q_ih);
2024-09-09 08:57:42 +00:00
/*
* hack so the quota code doesn't have to guess if the file has a
* tail. On tail insert, we allocate quota for 1 unformatted node.
* We test the offset because the tail might have been
* split into multiple items, and we only want to decrement for
* the unfm node once
2024-09-09 08:52:07 +00:00
*/
if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(q_ih)) {
if ((le_ih_k_offset(q_ih) & (sb->s_blocksize - 1)) == 1) {
quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
} else {
quota_cut_bytes = 0;
}
}
if (un_bh) {
int off;
char *data;
2024-09-09 08:57:42 +00:00
/*
* We are in direct2indirect conversion, so move tail contents
* to the unformatted node
*/
/*
* note, we do the copy before preparing the buffer because we
* don't care about the contents of the unformatted node yet.
* the only thing we really care about is the direct item's
* data is in the unformatted node.
*
* Otherwise, we would have to call
* reiserfs_prepare_for_journal on the unformatted node,
* which might schedule, meaning we'd have to loop all the
* way back up to the start of the while loop.
*
* The unformatted node must be dirtied later on. We can't be
* sure here if the entire tail has been deleted yet.
*
* un_bh is from the page cache (all unformatted nodes are
* from the page cache) and might be a highmem page. So, we
* can't use un_bh->b_data.
* -clm
2024-09-09 08:52:07 +00:00
*/
data = kmap_atomic(un_bh->b_page);
off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
memcpy(data + off,
2024-09-09 08:57:42 +00:00
ih_item_body(PATH_PLAST_BUFFER(path), &s_ih),
2024-09-09 08:52:07 +00:00
ret_value);
kunmap_atomic(data);
}
2024-09-09 08:57:42 +00:00
2024-09-09 08:52:07 +00:00
/* Perform balancing after all resources have been collected at once. */
do_balance(&s_del_balance, NULL, NULL, M_DELETE);
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(sb, REISERFS_DEBUG_CODE,
"reiserquota delete_item(): freeing %u, id=%u type=%c",
quota_cut_bytes, inode->i_uid, head2type(&s_ih));
#endif
2024-09-09 08:57:42 +00:00
depth = reiserfs_write_unlock_nested(inode->i_sb);
2024-09-09 08:52:07 +00:00
dquot_free_space_nodirty(inode, quota_cut_bytes);
2024-09-09 08:57:42 +00:00
reiserfs_write_lock_nested(inode->i_sb, depth);
2024-09-09 08:52:07 +00:00
/* Return deleted body length */
return ret_value;
}
2024-09-09 08:57:42 +00:00
/*
* Summary Of Mechanisms For Handling Collisions Between Processes:
*
* deletion of the body of the object is performed by iput(), with the
* result that if multiple processes are operating on a file, the
* deletion of the body of the file is deferred until the last process
* that has an open inode performs its iput().
*
* writes and truncates are protected from collisions by use of
* semaphores.
*
* creates, linking, and mknod are protected from collisions with other
* processes by making the reiserfs_add_entry() the last step in the
* creation, and then rolling back all changes if there was a collision.
* - Hans
2024-09-09 08:52:07 +00:00
*/
/* this deletes item which never gets split */
void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
struct inode *inode, struct reiserfs_key *key)
{
2024-09-09 08:57:42 +00:00
struct super_block *sb = th->t_super;
2024-09-09 08:52:07 +00:00
struct tree_balance tb;
INITIALIZE_PATH(path);
int item_len = 0;
int tb_init = 0;
struct cpu_key cpu_key;
int retval;
int quota_cut_bytes = 0;
BUG_ON(!th->t_trans_id);
le_key2cpu_key(&cpu_key, key);
while (1) {
retval = search_item(th->t_super, &cpu_key, &path);
if (retval == IO_ERROR) {
reiserfs_error(th->t_super, "vs-5350",
"i/o failure occurred trying "
"to delete %K", &cpu_key);
break;
}
if (retval != ITEM_FOUND) {
pathrelse(&path);
2024-09-09 08:57:42 +00:00
/*
* No need for a warning, if there is just no free
* space to insert '..' item into the
* newly-created subdir
*/
2024-09-09 08:52:07 +00:00
if (!
((unsigned long long)
GET_HASH_VALUE(le_key_k_offset
(le_key_version(key), key)) == 0
&& (unsigned long long)
GET_GENERATION_NUMBER(le_key_k_offset
(le_key_version(key),
key)) == 1))
reiserfs_warning(th->t_super, "vs-5355",
"%k not found", key);
break;
}
if (!tb_init) {
tb_init = 1;
2024-09-09 08:57:42 +00:00
item_len = ih_item_len(tp_item_head(&path));
2024-09-09 08:52:07 +00:00
init_tb_struct(th, &tb, th->t_super, &path,
-(IH_SIZE + item_len));
}
2024-09-09 08:57:42 +00:00
quota_cut_bytes = ih_item_len(tp_item_head(&path));
2024-09-09 08:52:07 +00:00
retval = fix_nodes(M_DELETE, &tb, NULL, NULL);
if (retval == REPEAT_SEARCH) {
PROC_INFO_INC(th->t_super, delete_solid_item_restarted);
continue;
}
if (retval == CARRY_ON) {
do_balance(&tb, NULL, NULL, M_DELETE);
2024-09-09 08:57:42 +00:00
/*
* Should we count quota for item? (we don't
* count quotas for save-links)
*/
if (inode) {
int depth;
2024-09-09 08:52:07 +00:00
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
"reiserquota delete_solid_item(): freeing %u id=%u type=%c",
quota_cut_bytes, inode->i_uid,
key2type(key));
#endif
2024-09-09 08:57:42 +00:00
depth = reiserfs_write_unlock_nested(sb);
2024-09-09 08:52:07 +00:00
dquot_free_space_nodirty(inode,
quota_cut_bytes);
2024-09-09 08:57:42 +00:00
reiserfs_write_lock_nested(sb, depth);
2024-09-09 08:52:07 +00:00
}
break;
}
2024-09-09 08:57:42 +00:00
/* IO_ERROR, NO_DISK_SPACE, etc */
2024-09-09 08:52:07 +00:00
reiserfs_warning(th->t_super, "vs-5360",
"could not delete %K due to fix_nodes failure",
&cpu_key);
unfix_nodes(&tb);
break;
}
reiserfs_check_path(&path);
}
int reiserfs_delete_object(struct reiserfs_transaction_handle *th,
struct inode *inode)
{
int err;
inode->i_size = 0;
BUG_ON(!th->t_trans_id);
/* for directory this deletes item containing "." and ".." */
err =
reiserfs_do_truncate(th, inode, NULL, 0 /*no timestamp updates */ );
if (err)
return err;
#if defined( USE_INODE_GENERATION_COUNTER )
if (!old_format_only(th->t_super)) {
__le32 *inode_generation;
inode_generation =
&REISERFS_SB(th->t_super)->s_rs->s_inode_generation;
le32_add_cpu(inode_generation, 1);
}
/* USE_INODE_GENERATION_COUNTER */
#endif
reiserfs_delete_solid_item(th, inode, INODE_PKEY(inode));
return err;
}
static void unmap_buffers(struct page *page, loff_t pos)
{
struct buffer_head *bh;
struct buffer_head *head;
struct buffer_head *next;
unsigned long tail_index;
unsigned long cur_index;
if (page) {
if (page_has_buffers(page)) {
tail_index = pos & (PAGE_CACHE_SIZE - 1);
cur_index = 0;
head = page_buffers(page);
bh = head;
do {
next = bh->b_this_page;
2024-09-09 08:57:42 +00:00
/*
* we want to unmap the buffers that contain
* the tail, and all the buffers after it
* (since the tail must be at the end of the
* file). We don't want to unmap file data
* before the tail, since it might be dirty
* and waiting to reach disk
2024-09-09 08:52:07 +00:00
*/
cur_index += bh->b_size;
if (cur_index > tail_index) {
reiserfs_unmap_buffer(bh);
}
bh = next;
} while (bh != head);
}
}
}
static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th,
struct inode *inode,
struct page *page,
struct treepath *path,
const struct cpu_key *item_key,
loff_t new_file_size, char *mode)
{
struct super_block *sb = inode->i_sb;
int block_size = sb->s_blocksize;
int cut_bytes;
BUG_ON(!th->t_trans_id);
BUG_ON(new_file_size != inode->i_size);
2024-09-09 08:57:42 +00:00
/*
* the page being sent in could be NULL if there was an i/o error
* reading in the last block. The user will hit problems trying to
* read the file, but for now we just skip the indirect2direct
2024-09-09 08:52:07 +00:00
*/
if (atomic_read(&inode->i_count) > 1 ||
!tail_has_to_be_packed(inode) ||
!page || (REISERFS_I(inode)->i_flags & i_nopack_mask)) {
/* leave tail in an unformatted node */
*mode = M_SKIP_BALANCING;
cut_bytes =
block_size - (new_file_size & (block_size - 1));
pathrelse(path);
return cut_bytes;
}
2024-09-09 08:57:42 +00:00
2024-09-09 08:52:07 +00:00
/* Perform the conversion to a direct_item. */
return indirect2direct(th, inode, page, path, item_key,
new_file_size, mode);
}
2024-09-09 08:57:42 +00:00
/*
* we did indirect_to_direct conversion. And we have inserted direct
* item successesfully, but there were no disk space to cut unfm
* pointer being converted. Therefore we have to delete inserted
* direct item(s)
*/
2024-09-09 08:52:07 +00:00
static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th,
struct inode *inode, struct treepath *path)
{
struct cpu_key tail_key;
int tail_len;
int removed;
BUG_ON(!th->t_trans_id);
2024-09-09 08:57:42 +00:00
make_cpu_key(&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4);
2024-09-09 08:52:07 +00:00
tail_key.key_length = 4;
tail_len =
(cpu_key_k_offset(&tail_key) & (inode->i_sb->s_blocksize - 1)) - 1;
while (tail_len) {
/* look for the last byte of the tail */
if (search_for_position_by_key(inode->i_sb, &tail_key, path) ==
POSITION_NOT_FOUND)
reiserfs_panic(inode->i_sb, "vs-5615",
"found invalid item");
RFALSE(path->pos_in_item !=
2024-09-09 08:57:42 +00:00
ih_item_len(tp_item_head(path)) - 1,
2024-09-09 08:52:07 +00:00
"vs-5616: appended bytes found");
PATH_LAST_POSITION(path)--;
removed =
reiserfs_delete_item(th, path, &tail_key, inode,
NULL /*unbh not needed */ );
RFALSE(removed <= 0
|| removed > tail_len,
"vs-5617: there was tail %d bytes, removed item length %d bytes",
tail_len, removed);
tail_len -= removed;
set_cpu_key_k_offset(&tail_key,
cpu_key_k_offset(&tail_key) - removed);
}
reiserfs_warning(inode->i_sb, "reiserfs-5091", "indirect_to_direct "
"conversion has been rolled back due to "
"lack of disk space");
mark_inode_dirty(inode);
}
/* (Truncate or cut entry) or delete object item. Returns < 0 on failure */
int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
struct treepath *path,
struct cpu_key *item_key,
struct inode *inode,
struct page *page, loff_t new_file_size)
{
struct super_block *sb = inode->i_sb;
2024-09-09 08:57:42 +00:00
/*
* Every function which is going to call do_balance must first
* create a tree_balance structure. Then it must fill up this
* structure by using the init_tb_struct and fix_nodes functions.
* After that we can make tree balancing.
*/
2024-09-09 08:52:07 +00:00
struct tree_balance s_cut_balance;
struct item_head *p_le_ih;
2024-09-09 08:57:42 +00:00
int cut_size = 0; /* Amount to be cut. */
int ret_value = CARRY_ON;
int removed = 0; /* Number of the removed unformatted nodes. */
int is_inode_locked = 0;
2024-09-09 08:52:07 +00:00
char mode; /* Mode of the balance. */
int retval2 = -1;
int quota_cut_bytes;
loff_t tail_pos = 0;
2024-09-09 08:57:42 +00:00
int depth;
2024-09-09 08:52:07 +00:00
BUG_ON(!th->t_trans_id);
init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
cut_size);
2024-09-09 08:57:42 +00:00
/*
* Repeat this loop until we either cut the item without needing
* to balance, or we fix_nodes without schedule occurring
*/
2024-09-09 08:52:07 +00:00
while (1) {
2024-09-09 08:57:42 +00:00
/*
* Determine the balance mode, position of the first byte to
* be cut, and size to be cut. In case of the indirect item
* free unformatted nodes which are pointed to by the cut
* pointers.
*/
2024-09-09 08:52:07 +00:00
mode =
prepare_for_delete_or_cut(th, inode, path,
item_key, &removed,
&cut_size, new_file_size);
if (mode == M_CONVERT) {
2024-09-09 08:57:42 +00:00
/*
* convert last unformatted node to direct item or
* leave tail in the unformatted node
*/
2024-09-09 08:52:07 +00:00
RFALSE(ret_value != CARRY_ON,
"PAP-5570: can not convert twice");
ret_value =
maybe_indirect_to_direct(th, inode, page,
path, item_key,
new_file_size, &mode);
if (mode == M_SKIP_BALANCING)
/* tail has been left in the unformatted node */
return ret_value;
is_inode_locked = 1;
2024-09-09 08:57:42 +00:00
/*
* removing of last unformatted node will
* change value we have to return to truncate.
* Save it
*/
2024-09-09 08:52:07 +00:00
retval2 = ret_value;
2024-09-09 08:57:42 +00:00
/*
* So, we have performed the first part of the
* conversion:
* inserting the new direct item. Now we are
* removing the last unformatted node pointer.
* Set key to search for it.
*/
2024-09-09 08:52:07 +00:00
set_cpu_key_k_type(item_key, TYPE_INDIRECT);
item_key->key_length = 4;
new_file_size -=
(new_file_size & (sb->s_blocksize - 1));
tail_pos = new_file_size;
set_cpu_key_k_offset(item_key, new_file_size + 1);
if (search_for_position_by_key
(sb, item_key,
path) == POSITION_NOT_FOUND) {
print_block(PATH_PLAST_BUFFER(path), 3,
PATH_LAST_POSITION(path) - 1,
PATH_LAST_POSITION(path) + 1);
reiserfs_panic(sb, "PAP-5580", "item to "
"convert does not exist (%K)",
item_key);
}
continue;
}
if (cut_size == 0) {
pathrelse(path);
return 0;
}
s_cut_balance.insert_size[0] = cut_size;
ret_value = fix_nodes(mode, &s_cut_balance, NULL, NULL);
if (ret_value != REPEAT_SEARCH)
break;
PROC_INFO_INC(sb, cut_from_item_restarted);
ret_value =
search_for_position_by_key(sb, item_key, path);
if (ret_value == POSITION_FOUND)
continue;
reiserfs_warning(sb, "PAP-5610", "item %K not found",
item_key);
unfix_nodes(&s_cut_balance);
return (ret_value == IO_ERROR) ? -EIO : -ENOENT;
} /* while */
2024-09-09 08:57:42 +00:00
/* check fix_nodes results (IO_ERROR or NO_DISK_SPACE) */
2024-09-09 08:52:07 +00:00
if (ret_value != CARRY_ON) {
if (is_inode_locked) {
2024-09-09 08:57:42 +00:00
/*
* FIXME: this seems to be not needed: we are always
* able to cut item
*/
2024-09-09 08:52:07 +00:00
indirect_to_direct_roll_back(th, inode, path);
}
if (ret_value == NO_DISK_SPACE)
reiserfs_warning(sb, "reiserfs-5092",
"NO_DISK_SPACE");
unfix_nodes(&s_cut_balance);
return -EIO;
}
/* go ahead and perform balancing */
RFALSE(mode == M_PASTE || mode == M_INSERT, "invalid mode");
/* Calculate number of bytes that need to be cut from the item. */
quota_cut_bytes =
(mode ==
2024-09-09 08:57:42 +00:00
M_DELETE) ? ih_item_len(tp_item_head(path)) : -s_cut_balance.
2024-09-09 08:52:07 +00:00
insert_size[0];
if (retval2 == -1)
ret_value = calc_deleted_bytes_number(&s_cut_balance, mode);
else
ret_value = retval2;
2024-09-09 08:57:42 +00:00
/*
* For direct items, we only change the quota when deleting the last
* item.
2024-09-09 08:52:07 +00:00
*/
2024-09-09 08:57:42 +00:00
p_le_ih = tp_item_head(s_cut_balance.tb_path);
2024-09-09 08:52:07 +00:00
if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_le_ih)) {
if (mode == M_DELETE &&
(le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) ==
1) {
2024-09-09 08:57:42 +00:00
/* FIXME: this is to keep 3.5 happy */
2024-09-09 08:52:07 +00:00
REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
} else {
quota_cut_bytes = 0;
}
}
#ifdef CONFIG_REISERFS_CHECK
if (is_inode_locked) {
struct item_head *le_ih =
2024-09-09 08:57:42 +00:00
tp_item_head(s_cut_balance.tb_path);
/*
* we are going to complete indirect2direct conversion. Make
* sure, that we exactly remove last unformatted node pointer
* of the item
*/
2024-09-09 08:52:07 +00:00
if (!is_indirect_le_ih(le_ih))
reiserfs_panic(sb, "vs-5652",
"item must be indirect %h", le_ih);
if (mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE)
reiserfs_panic(sb, "vs-5653", "completing "
"indirect2direct conversion indirect "
"item %h being deleted must be of "
"4 byte long", le_ih);
if (mode == M_CUT
&& s_cut_balance.insert_size[0] != -UNFM_P_SIZE) {
reiserfs_panic(sb, "vs-5654", "can not complete "
"indirect2direct conversion of %h "
"(CUT, insert_size==%d)",
le_ih, s_cut_balance.insert_size[0]);
}
2024-09-09 08:57:42 +00:00
/*
* it would be useful to make sure, that right neighboring
* item is direct item of this file
*/
2024-09-09 08:52:07 +00:00
}
#endif
do_balance(&s_cut_balance, NULL, NULL, mode);
if (is_inode_locked) {
2024-09-09 08:57:42 +00:00
/*
* we've done an indirect->direct conversion. when the
* data block was freed, it was removed from the list of
* blocks that must be flushed before the transaction
* commits, make sure to unmap and invalidate it
2024-09-09 08:52:07 +00:00
*/
unmap_buffers(page, tail_pos);
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
}
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota cut_from_item(): freeing %u id=%u type=%c",
quota_cut_bytes, inode->i_uid, '?');
#endif
2024-09-09 08:57:42 +00:00
depth = reiserfs_write_unlock_nested(sb);
2024-09-09 08:52:07 +00:00
dquot_free_space_nodirty(inode, quota_cut_bytes);
2024-09-09 08:57:42 +00:00
reiserfs_write_lock_nested(sb, depth);
2024-09-09 08:52:07 +00:00
return ret_value;
}
static void truncate_directory(struct reiserfs_transaction_handle *th,
struct inode *inode)
{
BUG_ON(!th->t_trans_id);
if (inode->i_nlink)
reiserfs_error(inode->i_sb, "vs-5655", "link count != 0");
set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), DOT_OFFSET);
set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_DIRENTRY);
reiserfs_delete_solid_item(th, inode, INODE_PKEY(inode));
reiserfs_update_sd(th, inode);
set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), SD_OFFSET);
set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_STAT_DATA);
}
2024-09-09 08:57:42 +00:00
/*
* Truncate file to the new size. Note, this must be called with a
* transaction already started
*/
2024-09-09 08:52:07 +00:00
int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
2024-09-09 08:57:42 +00:00
struct inode *inode, /* ->i_size contains new size */
2024-09-09 08:52:07 +00:00
struct page *page, /* up to date for last block */
2024-09-09 08:57:42 +00:00
/*
* when it is called by file_release to convert
* the tail - no timestamps should be updated
*/
int update_timestamps
2024-09-09 08:52:07 +00:00
)
{
INITIALIZE_PATH(s_search_path); /* Path to the current object item. */
struct item_head *p_le_ih; /* Pointer to an item header. */
2024-09-09 08:57:42 +00:00
/* Key to search for a previous file item. */
struct cpu_key s_item_key;
2024-09-09 08:52:07 +00:00
loff_t file_size, /* Old file size. */
new_file_size; /* New file size. */
int deleted; /* Number of deleted or truncated bytes. */
int retval;
int err = 0;
BUG_ON(!th->t_trans_id);
if (!
(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|| S_ISLNK(inode->i_mode)))
return 0;
2024-09-09 08:57:42 +00:00
/* deletion of directory - no need to update timestamps */
2024-09-09 08:52:07 +00:00
if (S_ISDIR(inode->i_mode)) {
truncate_directory(th, inode);
return 0;
}
/* Get new file size. */
new_file_size = inode->i_size;
2024-09-09 08:57:42 +00:00
/* FIXME: note, that key type is unimportant here */
2024-09-09 08:52:07 +00:00
make_cpu_key(&s_item_key, inode, max_reiserfs_offset(inode),
TYPE_DIRECT, 3);
retval =
search_for_position_by_key(inode->i_sb, &s_item_key,
&s_search_path);
if (retval == IO_ERROR) {
reiserfs_error(inode->i_sb, "vs-5657",
"i/o failure occurred trying to truncate %K",
&s_item_key);
err = -EIO;
goto out;
}
if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) {
reiserfs_error(inode->i_sb, "PAP-5660",
"wrong result %d of search for %K", retval,
&s_item_key);
err = -EIO;
goto out;
}
s_search_path.pos_in_item--;
/* Get real file size (total length of all file items) */
2024-09-09 08:57:42 +00:00
p_le_ih = tp_item_head(&s_search_path);
2024-09-09 08:52:07 +00:00
if (is_statdata_le_ih(p_le_ih))
file_size = 0;
else {
loff_t offset = le_ih_k_offset(p_le_ih);
int bytes =
op_bytes_number(p_le_ih, inode->i_sb->s_blocksize);
2024-09-09 08:57:42 +00:00
/*
* this may mismatch with real file size: if last direct item
* had no padding zeros and last unformatted node had no free
* space, this file would have this file size
*/
2024-09-09 08:52:07 +00:00
file_size = offset + bytes - 1;
}
/*
* are we doing a full truncate or delete, if so
* kick in the reada code
*/
if (new_file_size == 0)
s_search_path.reada = PATH_READA | PATH_READA_BACK;
if (file_size == 0 || file_size < new_file_size) {
goto update_and_out;
}
/* Update key to search for the last file item. */
set_cpu_key_k_offset(&s_item_key, file_size);
do {
/* Cut or delete file item. */
deleted =
reiserfs_cut_from_item(th, &s_search_path, &s_item_key,
inode, page, new_file_size);
if (deleted < 0) {
reiserfs_warning(inode->i_sb, "vs-5665",
"reiserfs_cut_from_item failed");
reiserfs_check_path(&s_search_path);
return 0;
}
RFALSE(deleted > file_size,
"PAP-5670: reiserfs_cut_from_item: too many bytes deleted: deleted %d, file_size %lu, item_key %K",
deleted, file_size, &s_item_key);
/* Change key to search the last file item. */
file_size -= deleted;
set_cpu_key_k_offset(&s_item_key, file_size);
2024-09-09 08:57:42 +00:00
/*
* While there are bytes to truncate and previous
* file item is presented in the tree.
*/
2024-09-09 08:52:07 +00:00
/*
2024-09-09 08:57:42 +00:00
* This loop could take a really long time, and could log
* many more blocks than a transaction can hold. So, we do
* a polite journal end here, and if the transaction needs
* ending, we make sure the file is consistent before ending
* the current trans and starting a new one
2024-09-09 08:52:07 +00:00
*/
if (journal_transaction_should_end(th, 0) ||
reiserfs_transaction_free_space(th) <= JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) {
pathrelse(&s_search_path);
if (update_timestamps) {
inode->i_mtime = CURRENT_TIME_SEC;
inode->i_ctime = CURRENT_TIME_SEC;
}
reiserfs_update_sd(th, inode);
2024-09-09 08:57:42 +00:00
err = journal_end(th);
2024-09-09 08:52:07 +00:00
if (err)
goto out;
err = journal_begin(th, inode->i_sb,
JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD + JOURNAL_PER_BALANCE_CNT * 4) ;
if (err)
goto out;
reiserfs_update_inode_transaction(inode);
}
} while (file_size > ROUND_UP(new_file_size) &&
search_for_position_by_key(inode->i_sb, &s_item_key,
&s_search_path) == POSITION_FOUND);
RFALSE(file_size > ROUND_UP(new_file_size),
2024-09-09 08:57:42 +00:00
"PAP-5680: truncate did not finish: new_file_size %lld, current %lld, oid %d",
2024-09-09 08:52:07 +00:00
new_file_size, file_size, s_item_key.on_disk_key.k_objectid);
2024-09-09 08:57:42 +00:00
update_and_out:
2024-09-09 08:52:07 +00:00
if (update_timestamps) {
2024-09-09 08:57:42 +00:00
/* this is truncate, not file closing */
2024-09-09 08:52:07 +00:00
inode->i_mtime = CURRENT_TIME_SEC;
inode->i_ctime = CURRENT_TIME_SEC;
}
reiserfs_update_sd(th, inode);
2024-09-09 08:57:42 +00:00
out:
2024-09-09 08:52:07 +00:00
pathrelse(&s_search_path);
return err;
}
#ifdef CONFIG_REISERFS_CHECK
2024-09-09 08:57:42 +00:00
/* this makes sure, that we __append__, not overwrite or add holes */
2024-09-09 08:52:07 +00:00
static void check_research_for_paste(struct treepath *path,
const struct cpu_key *key)
{
2024-09-09 08:57:42 +00:00
struct item_head *found_ih = tp_item_head(path);
2024-09-09 08:52:07 +00:00
if (is_direct_le_ih(found_ih)) {
if (le_ih_k_offset(found_ih) +
op_bytes_number(found_ih,
get_last_bh(path)->b_size) !=
cpu_key_k_offset(key)
|| op_bytes_number(found_ih,
get_last_bh(path)->b_size) !=
pos_in_item(path))
reiserfs_panic(NULL, "PAP-5720", "found direct item "
"%h or position (%d) does not match "
"to key %K", found_ih,
pos_in_item(path), key);
}
if (is_indirect_le_ih(found_ih)) {
if (le_ih_k_offset(found_ih) +
op_bytes_number(found_ih,
get_last_bh(path)->b_size) !=
cpu_key_k_offset(key)
|| I_UNFM_NUM(found_ih) != pos_in_item(path)
|| get_ih_free_space(found_ih) != 0)
reiserfs_panic(NULL, "PAP-5730", "found indirect "
"item (%h) or position (%d) does not "
"match to key (%K)",
found_ih, pos_in_item(path), key);
}
}
#endif /* config reiserfs check */
2024-09-09 08:57:42 +00:00
/*
* Paste bytes to the existing item.
* Returns bytes number pasted into the item.
*/
int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th,
/* Path to the pasted item. */
struct treepath *search_path,
/* Key to search for the needed item. */
const struct cpu_key *key,
/* Inode item belongs to */
struct inode *inode,
/* Pointer to the bytes to paste. */
const char *body,
/* Size of pasted bytes. */
2024-09-09 08:52:07 +00:00
int pasted_size)
2024-09-09 08:57:42 +00:00
{
struct super_block *sb = inode->i_sb;
2024-09-09 08:52:07 +00:00
struct tree_balance s_paste_balance;
int retval;
int fs_gen;
2024-09-09 08:57:42 +00:00
int depth;
2024-09-09 08:52:07 +00:00
BUG_ON(!th->t_trans_id);
fs_gen = get_generation(inode->i_sb);
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota paste_into_item(): allocating %u id=%u type=%c",
pasted_size, inode->i_uid,
2024-09-09 08:57:42 +00:00
key2type(&key->on_disk_key));
2024-09-09 08:52:07 +00:00
#endif
2024-09-09 08:57:42 +00:00
depth = reiserfs_write_unlock_nested(sb);
2024-09-09 08:52:07 +00:00
retval = dquot_alloc_space_nodirty(inode, pasted_size);
2024-09-09 08:57:42 +00:00
reiserfs_write_lock_nested(sb, depth);
2024-09-09 08:52:07 +00:00
if (retval) {
pathrelse(search_path);
return retval;
}
init_tb_struct(th, &s_paste_balance, th->t_super, search_path,
pasted_size);
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
s_paste_balance.key = key->on_disk_key;
#endif
/* DQUOT_* can schedule, must check before the fix_nodes */
if (fs_changed(fs_gen, inode->i_sb)) {
goto search_again;
}
while ((retval =
fix_nodes(M_PASTE, &s_paste_balance, NULL,
body)) == REPEAT_SEARCH) {
2024-09-09 08:57:42 +00:00
search_again:
2024-09-09 08:52:07 +00:00
/* file system changed while we were in the fix_nodes */
PROC_INFO_INC(th->t_super, paste_into_item_restarted);
retval =
search_for_position_by_key(th->t_super, key,
search_path);
if (retval == IO_ERROR) {
retval = -EIO;
goto error_out;
}
if (retval == POSITION_FOUND) {
reiserfs_warning(inode->i_sb, "PAP-5710",
"entry or pasted byte (%K) exists",
key);
retval = -EEXIST;
goto error_out;
}
#ifdef CONFIG_REISERFS_CHECK
check_research_for_paste(search_path, key);
#endif
}
2024-09-09 08:57:42 +00:00
/*
* Perform balancing after all resources are collected by fix_nodes,
* and accessing them will not risk triggering schedule.
*/
2024-09-09 08:52:07 +00:00
if (retval == CARRY_ON) {
do_balance(&s_paste_balance, NULL /*ih */ , body, M_PASTE);
return 0;
}
retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
2024-09-09 08:57:42 +00:00
error_out:
2024-09-09 08:52:07 +00:00
/* this also releases the path */
unfix_nodes(&s_paste_balance);
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota paste_into_item(): freeing %u id=%u type=%c",
pasted_size, inode->i_uid,
2024-09-09 08:57:42 +00:00
key2type(&key->on_disk_key));
2024-09-09 08:52:07 +00:00
#endif
2024-09-09 08:57:42 +00:00
depth = reiserfs_write_unlock_nested(sb);
2024-09-09 08:52:07 +00:00
dquot_free_space_nodirty(inode, pasted_size);
2024-09-09 08:57:42 +00:00
reiserfs_write_lock_nested(sb, depth);
2024-09-09 08:52:07 +00:00
return retval;
}
2024-09-09 08:57:42 +00:00
/*
* Insert new item into the buffer at the path.
2024-09-09 08:52:07 +00:00
* th - active transaction handle
* path - path to the inserted item
* ih - pointer to the item header to insert
* body - pointer to the bytes to insert
*/
int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
struct treepath *path, const struct cpu_key *key,
struct item_head *ih, struct inode *inode,
const char *body)
{
struct tree_balance s_ins_balance;
int retval;
int fs_gen = 0;
int quota_bytes = 0;
BUG_ON(!th->t_trans_id);
if (inode) { /* Do we count quotas for item? */
2024-09-09 08:57:42 +00:00
int depth;
2024-09-09 08:52:07 +00:00
fs_gen = get_generation(inode->i_sb);
quota_bytes = ih_item_len(ih);
2024-09-09 08:57:42 +00:00
/*
* hack so the quota code doesn't have to guess
* if the file has a tail, links are always tails,
* so there's no guessing needed
2024-09-09 08:52:07 +00:00
*/
if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(ih))
quota_bytes = inode->i_sb->s_blocksize + UNFM_P_SIZE;
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota insert_item(): allocating %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
2024-09-09 08:57:42 +00:00
/*
* We can't dirty inode here. It would be immediately
* written but appropriate stat item isn't inserted yet...
*/
depth = reiserfs_write_unlock_nested(inode->i_sb);
2024-09-09 08:52:07 +00:00
retval = dquot_alloc_space_nodirty(inode, quota_bytes);
2024-09-09 08:57:42 +00:00
reiserfs_write_lock_nested(inode->i_sb, depth);
2024-09-09 08:52:07 +00:00
if (retval) {
pathrelse(path);
return retval;
}
}
init_tb_struct(th, &s_ins_balance, th->t_super, path,
IH_SIZE + ih_item_len(ih));
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
s_ins_balance.key = key->on_disk_key;
#endif
2024-09-09 08:57:42 +00:00
/*
* DQUOT_* can schedule, must check to be sure calling
* fix_nodes is safe
*/
2024-09-09 08:52:07 +00:00
if (inode && fs_changed(fs_gen, inode->i_sb)) {
goto search_again;
}
while ((retval =
fix_nodes(M_INSERT, &s_ins_balance, ih,
body)) == REPEAT_SEARCH) {
2024-09-09 08:57:42 +00:00
search_again:
2024-09-09 08:52:07 +00:00
/* file system changed while we were in the fix_nodes */
PROC_INFO_INC(th->t_super, insert_item_restarted);
retval = search_item(th->t_super, key, path);
if (retval == IO_ERROR) {
retval = -EIO;
goto error_out;
}
if (retval == ITEM_FOUND) {
reiserfs_warning(th->t_super, "PAP-5760",
"key %K already exists in the tree",
key);
retval = -EEXIST;
goto error_out;
}
}
/* make balancing after all resources will be collected at a time */
if (retval == CARRY_ON) {
do_balance(&s_ins_balance, ih, body, M_INSERT);
return 0;
}
retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
2024-09-09 08:57:42 +00:00
error_out:
2024-09-09 08:52:07 +00:00
/* also releases the path */
unfix_nodes(&s_ins_balance);
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
"reiserquota insert_item(): freeing %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
2024-09-09 08:57:42 +00:00
if (inode) {
int depth = reiserfs_write_unlock_nested(inode->i_sb);
2024-09-09 08:52:07 +00:00
dquot_free_space_nodirty(inode, quota_bytes);
2024-09-09 08:57:42 +00:00
reiserfs_write_lock_nested(inode->i_sb, depth);
}
2024-09-09 08:52:07 +00:00
return retval;
}