summaryrefslogtreecommitdiff
path: root/drivers/block/zram/zram_drv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/zram/zram_drv.c')
-rw-r--r--drivers/block/zram/zram_drv.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9fa15bb9d118..81a557c33a1f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -106,7 +106,7 @@ static void zram_set_obj_size(struct zram_meta *meta,
meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
}
-static inline int is_partial_io(struct bio_vec *bvec)
+static inline bool is_partial_io(struct bio_vec *bvec)
{
return bvec->bv_len != PAGE_SIZE;
}
@@ -114,25 +114,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
/*
* Check if request is within bounds and aligned on zram logical blocks.
*/
-static inline int valid_io_request(struct zram *zram,
+static inline bool valid_io_request(struct zram *zram,
sector_t start, unsigned int size)
{
u64 end, bound;
/* unaligned request */
if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
- return 0;
+ return false;
if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
- return 0;
+ return false;
end = start + (size >> SECTOR_SHIFT);
bound = zram->disksize >> SECTOR_SHIFT;
/* out of range range */
if (unlikely(start >= bound || end > bound || start > end))
- return 0;
+ return false;
/* I/O request is valid */
- return 1;
+ return true;
}
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
@@ -157,7 +157,7 @@ static inline void update_used_max(struct zram *zram,
} while (old_max != cur_max);
}
-static int page_zero_filled(void *ptr)
+static bool page_zero_filled(void *ptr)
{
unsigned int pos;
unsigned long *page;
@@ -166,10 +166,10 @@ static int page_zero_filled(void *ptr)
for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
if (page[pos])
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static void handle_zero_page(struct bio_vec *bvec)
@@ -365,6 +365,9 @@ static ssize_t comp_algorithm_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
size_t sz;
+ if (!zcomp_available_algorithm(buf))
+ return -EINVAL;
+
down_write(&zram->init_lock);
if (init_done(zram)) {
up_write(&zram->init_lock);
@@ -378,9 +381,6 @@ static ssize_t comp_algorithm_store(struct device *dev,
if (sz > 0 && zram->compressor[sz - 1] == '\n')
zram->compressor[sz - 1] = 0x00;
- if (!zcomp_available_algorithm(zram->compressor))
- len = -EINVAL;
-
up_write(&zram->init_lock);
return len;
}
@@ -726,14 +726,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
alloced_pages = zs_get_total_pages(meta->mem_pool);
+ update_used_max(zram, alloced_pages);
+
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
zs_free(meta->mem_pool, handle);
ret = -ENOMEM;
goto out;
}
- update_used_max(zram, alloced_pages);
-
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {