void block_init(void)
{
- breq_kcache = kmem_cache_create("block_reqs", sizeof(struct block_request),
- __alignof__(struct block_request), 0, 0, 0);
- bh_kcache = kmem_cache_create("buffer_heads", sizeof(struct buffer_head),
- __alignof__(struct buffer_head), 0, 0, 0);
+ breq_kcache = kmem_cache_create("block_reqs",
+ sizeof(struct block_request),
+ __alignof__(struct block_request), 0,
+ NULL, 0, 0, NULL);
+ bh_kcache = kmem_cache_create("buffer_heads",
+ sizeof(struct buffer_head),
+ __alignof__(struct buffer_head), 0,
+ NULL, 0, 0, NULL);
- #ifdef __CONFIG_EXT2FS__
+ #ifdef CONFIG_EXT2FS
/* Now probe for and init the block device for the ext2 ram disk */
extern uint8_t _binary_mnt_ext2fs_img_size[];
extern uint8_t _binary_mnt_ext2fs_img_start[];
memset(ram_bd, 0, sizeof(struct block_device));
ram_bd->b_id = 31337;
ram_bd->b_sector_sz = 512;
- ram_bd->b_nr_sector = (unsigned int)_binary_mnt_ext2fs_img_size / 512;
+ ram_bd->b_nr_sector = (unsigned long)_binary_mnt_ext2fs_img_size / 512;
kref_init(&ram_bd->b_kref, fake_release, 1);
pm_init(&ram_bd->b_pm, &block_pm_op, ram_bd);
ram_bd->b_data = _binary_mnt_ext2fs_img_start;
- strncpy(ram_bd->b_name, "RAMDISK", BDEV_INLINE_NAME);
- ram_bd->b_name[BDEV_INLINE_NAME - 1] = '\0';
+ strlcpy(ram_bd->b_name, "RAMDISK", BDEV_INLINE_NAME);
/* Connect it to the file system */
- struct file *ram_bf = make_device("/dev/ramdisk", S_IRUSR | S_IWUSR,
+ struct file *ram_bf = make_device("/dev_vfs/ramdisk", S_IRUSR | S_IWUSR,
__S_IFBLK, &block_f_op);
/* make sure the inode tracks the right pm (not it's internal one) */
ram_bf->f_dentry->d_inode->i_mapping = &ram_bd->b_pm;
ram_bf->f_dentry->d_inode->i_bdev = ram_bd; /* this holds the bd kref */
kref_put(&ram_bf->f_kref);
- #endif /* __CONFIG_EXT2FS__ */
+ #endif /* CONFIG_EXT2FS */
}
/* Generic helper, returns a kref'd reference out of principle. */
void free_bhs(struct page *page)
{
struct buffer_head *bh, *next;
- assert(page->pg_flags & PG_BUFFER);
+ assert(atomic_read(&page->pg_flags) & PG_BUFFER);
bh = (struct buffer_head*)page->pg_private;
while (bh) {
next = bh->bh_next;
int8_t irq_state = 0;
if (!sem_up_irqsave(&breq->sem, &irq_state)) {
/* This shouldn't happen anymore. Let brho know if it does. */
- warn("[kernel] no one waiting on breq %08p", breq);
+ warn("[kernel] no one waiting on breq %p", breq);
}
}
{
int8_t irq_state = 0;
/* Since printk takes a while, this may make you lose the race */
- printd("Sleeping on breq %08p\n", breq);
+ printd("Sleeping on breq %p\n", breq);
assert(irq_is_enabled());
sem_down_irqsave(&breq->sem, &irq_state);
}
* readpage, we read them in when a specific block is there */
int block_readpage(struct page_map *pm, struct page *page)
{
- page->pg_flags |= PG_UPTODATE;
+ atomic_or(&page->pg_flags, PG_UPTODATE);
return 0;
}
if (!blk_num)
warn("Asking for the 0th block of a bdev...");
/* Make sure there's a page in the page cache. Should always be one. */
- error = pm_load_page(pm, blk_num / blk_per_pg, &page);
+ error = pm_load_page(pm, blk_num / blk_per_pg, &page);
if (error)
panic("Failed to load page! (%d)", error);
my_buf = page2kva(page) + blk_offset;
- assert(page->pg_flags & PG_BUFFER); /* Should be part of a page map */
+ atomic_or(&page->pg_flags, PG_BUFFER);
retry:
bh = (struct buffer_head*)page->pg_private;
prev = 0;
struct page *page = bh->bh_page;
/* TODO: race on flag modification */
bh->bh_flags |= BH_DIRTY;
- page->pg_flags |= PG_DIRTY;
+ atomic_or(&page->pg_flags, PG_DIRTY);
}
/* Decrefs the buffer from bdev_get_buffer(). Call this when you no longer
* reclaiming will be in page sized chunks from the page cache. */
void bdev_put_buffer(struct buffer_head *bh)
{
- page_decref(bh->bh_page);
+ pm_put_page(bh->bh_page);
}
/* Block device page map ops: */