提交 dd892625 编写于 作者: L Linus Torvalds

Merge tag 'staging-5.4-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging

Pull IIO fixes and staging driver from Greg KH:
 "Here is a mix of a number of IIO driver fixes for 5.4-rc7, and a whole
  new staging driver.

  The IIO fixes resolve some reported issues, all are tiny.

  The staging driver addition is the vboxsf filesystem, which is the
  VirtualBox guest shared folder code. Hans has been trying to get
  filesystem reviewers to review the code for many months now, and
  Christoph finally said to just merge it in staging now as it is
  stand-alone and the filesystem people can review it easier over time
  that way.

  I know it's late for this big of an addition, but it is stand-alone.

  The code has been in linux-next for a while, long enough to pick up a
  few tiny fixes for it already so people are looking at it.

  All of these have been in linux-next with no reported issues"

* tag 'staging-5.4-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging:
  staging: Fix error return code in vboxsf_fill_super()
  staging: vboxsf: fix dereference of pointer dentry before it is null checked
  staging: vboxsf: Remove unused including <linux/version.h>
  staging: Add VirtualBox guest shared folder (vboxsf) support
  iio: adc: stm32-adc: fix stopping dma
  iio: imu: inv_mpu6050: fix no data on MPU6050
  iio: srf04: fix wrong limitation in distance measuring
  iio: imu: adis16480: make sure provided frequency is positive
......@@ -17339,6 +17339,12 @@ F: include/linux/vbox_utils.h
F: include/uapi/linux/vbox*.h
F: drivers/virt/vboxguest/
VIRTUAL BOX SHARED FOLDER VFS DRIVER:
M: Hans de Goede <hdegoede@redhat.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: drivers/staging/vboxsf/*
VIRTUAL SERIO DEVICE DRIVER
M: Stephen Chandler Paul <thatslyude@gmail.com>
S: Maintained
......
......@@ -1399,7 +1399,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev)
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
if (ret) {
dmaengine_terminate_all(adc->dma_chan);
dmaengine_terminate_sync(adc->dma_chan);
return ret;
}
......@@ -1477,7 +1477,7 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
stm32_adc_conv_irq_disable(adc);
if (adc->dma_chan)
dmaengine_terminate_all(adc->dma_chan);
dmaengine_terminate_sync(adc->dma_chan);
if (stm32_adc_set_trig(indio_dev, NULL))
dev_err(&indio_dev->dev, "Can't clear trigger\n");
......
......@@ -317,8 +317,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
struct adis16480 *st = iio_priv(indio_dev);
unsigned int t, reg;
if (val < 0 || val2 < 0)
return -EINVAL;
t = val * 1000 + val2 / 1000;
if (t <= 0)
if (t == 0)
return -EINVAL;
/*
......
......@@ -114,54 +114,63 @@ static const struct inv_mpu6050_hw hw_info[] = {
.name = "MPU6050",
.reg = &reg_set_6050,
.config = &chip_config_6050,
.fifo_size = 1024,
},
{
.whoami = INV_MPU6500_WHOAMI_VALUE,
.name = "MPU6500",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_MPU6515_WHOAMI_VALUE,
.name = "MPU6515",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_MPU6000_WHOAMI_VALUE,
.name = "MPU6000",
.reg = &reg_set_6050,
.config = &chip_config_6050,
.fifo_size = 1024,
},
{
.whoami = INV_MPU9150_WHOAMI_VALUE,
.name = "MPU9150",
.reg = &reg_set_6050,
.config = &chip_config_6050,
.fifo_size = 1024,
},
{
.whoami = INV_MPU9250_WHOAMI_VALUE,
.name = "MPU9250",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_MPU9255_WHOAMI_VALUE,
.name = "MPU9255",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_ICM20608_WHOAMI_VALUE,
.name = "ICM20608",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_ICM20602_WHOAMI_VALUE,
.name = "ICM20602",
.reg = &reg_set_icm20602,
.config = &chip_config_6050,
.fifo_size = 1008,
},
};
......
......@@ -100,12 +100,14 @@ struct inv_mpu6050_chip_config {
* @name: name of the chip.
* @reg: register map of the chip.
* @config: configuration of the chip.
* @fifo_size: size of the FIFO in bytes.
*/
struct inv_mpu6050_hw {
u8 whoami;
u8 *name;
const struct inv_mpu6050_reg_map *reg;
const struct inv_mpu6050_chip_config *config;
size_t fifo_size;
};
/*
......
......@@ -180,9 +180,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
"failed to ack interrupt\n");
goto flush_fifo;
}
/* handle fifo overflow by reseting fifo */
if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT)
goto flush_fifo;
if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
dev_warn(regmap_get_device(st->map),
"spurious interrupt with status 0x%x\n", int_status);
......@@ -211,6 +208,18 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (result)
goto end_session;
fifo_count = get_unaligned_be16(&data[0]);
/*
* Handle fifo overflow by resetting fifo.
* Reset if there is only 3 data set free remaining to mitigate
* possible delay between reading fifo count and fifo data.
*/
nb = 3 * bytes_per_datum;
if (fifo_count >= st->hw->fifo_size - nb) {
dev_warn(regmap_get_device(st->map), "fifo overflow reset\n");
goto flush_fifo;
}
/* compute and process all complete datum */
nb = fifo_count / bytes_per_datum;
inv_mpu6050_update_period(st, pf->timestamp, nb);
......
......@@ -110,7 +110,7 @@ static int srf04_read(struct srf04_data *data)
udelay(data->cfg->trigger_pulse_us);
gpiod_set_value(data->gpiod_trig, 0);
/* it cannot take more than 20 ms */
/* it should not take more than 20 ms until echo is rising */
ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
if (ret < 0) {
mutex_unlock(&data->lock);
......@@ -120,7 +120,8 @@ static int srf04_read(struct srf04_data *data)
return -ETIMEDOUT;
}
ret = wait_for_completion_killable_timeout(&data->falling, HZ/50);
/* it cannot take more than 50 ms until echo is falling */
ret = wait_for_completion_killable_timeout(&data->falling, HZ/20);
if (ret < 0) {
mutex_unlock(&data->lock);
return ret;
......@@ -135,19 +136,19 @@ static int srf04_read(struct srf04_data *data)
dt_ns = ktime_to_ns(ktime_dt);
/*
* measuring more than 3 meters is beyond the capabilities of
* the sensor
* measuring more than 6,45 meters is beyond the capabilities of
* the supported sensors
* ==> filter out invalid results for not measuring echos of
* another us sensor
*
* formula:
* distance 3 m
* time = ---------- = --------- = 9404389 ns
* speed 319 m/s
* distance 6,45 * 2 m
* time = ---------- = ------------ = 40438871 ns
* speed 319 m/s
*
* using a minimum speed at -20 °C of 319 m/s
*/
if (dt_ns > 9404389)
if (dt_ns > 40438871)
return -EIO;
time_ns = dt_ns;
......@@ -159,20 +160,20 @@ static int srf04_read(struct srf04_data *data)
* with Temp in °C
* and speed in m/s
*
* use 343 m/s as ultrasonic speed at 20 °C here in absence of the
* use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the
* temperature
*
* therefore:
* time 343
* distance = ------ * -----
* 10^6 2
* time 343,5 time * 106
* distance = ------ * ------- = ------------
* 10^6 2 617176
* with time in ns
* and distance in mm (one way)
*
* because we limit to 3 meters the multiplication with 343 just
* because we limit to 6,45 meters the multiplication with 106 just
* fits into 32 bit
*/
distance_mm = time_ns * 343 / 2000000;
distance_mm = time_ns * 106 / 617176;
return distance_mm;
}
......
......@@ -125,4 +125,6 @@ source "drivers/staging/exfat/Kconfig"
source "drivers/staging/qlge/Kconfig"
source "drivers/staging/vboxsf/Kconfig"
endif # STAGING
......@@ -53,3 +53,4 @@ obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_VBOXSF_FS) += vboxsf/
config VBOXSF_FS
tristate "VirtualBox guest shared folder (vboxsf) support"
depends on X86 && VBOXGUEST
select NLS
help
VirtualBox hosts can share folders with guests, this driver
implements the Linux-guest side of this allowing folders exported
by the host to be mounted under Linux.
If you want to use shared folders in VirtualBox guests, answer Y or M.
# SPDX-License-Identifier: MIT
obj-$(CONFIG_VBOXSF_FS) += vboxsf.o
vboxsf-y := dir.o file.o utils.o vboxsf_wrappers.o super.o
TODO:
- Find a file-system developer to review this and give their Reviewed-By
- Address any items coming up during review
- Move to fs/vboxfs
Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>
and Hans de Goede <hdegoede@redhat.com>
// SPDX-License-Identifier: MIT
/*
* VirtualBox Guest Shared Folders support: Directory inode and file operations
*
* Copyright (C) 2006-2018 Oracle Corporation
*/
#include <linux/namei.h>
#include <linux/vbox_utils.h>
#include "vfsmod.h"
static int vboxsf_dir_open(struct inode *inode, struct file *file)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
struct shfl_createparms params = {};
struct vboxsf_dir_info *sf_d;
int err;
sf_d = vboxsf_dir_info_alloc();
if (!sf_d)
return -ENOMEM;
params.handle = SHFL_HANDLE_NIL;
params.create_flags = SHFL_CF_DIRECTORY | SHFL_CF_ACT_OPEN_IF_EXISTS |
SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ;
err = vboxsf_create_at_dentry(file_dentry(file), &params);
if (err)
goto err_free_dir_info;
if (params.result != SHFL_FILE_EXISTS) {
err = -ENOENT;
goto err_close;
}
err = vboxsf_dir_read_all(sbi, sf_d, params.handle);
if (err)
goto err_close;
vboxsf_close(sbi->root, params.handle);
file->private_data = sf_d;
return 0;
err_close:
vboxsf_close(sbi->root, params.handle);
err_free_dir_info:
vboxsf_dir_info_free(sf_d);
return err;
}
static int vboxsf_dir_release(struct inode *inode, struct file *file)
{
if (file->private_data)
vboxsf_dir_info_free(file->private_data);
return 0;
}
static unsigned int vboxsf_get_d_type(u32 mode)
{
unsigned int d_type;
switch (mode & SHFL_TYPE_MASK) {
case SHFL_TYPE_FIFO:
d_type = DT_FIFO;
break;
case SHFL_TYPE_DEV_CHAR:
d_type = DT_CHR;
break;
case SHFL_TYPE_DIRECTORY:
d_type = DT_DIR;
break;
case SHFL_TYPE_DEV_BLOCK:
d_type = DT_BLK;
break;
case SHFL_TYPE_FILE:
d_type = DT_REG;
break;
case SHFL_TYPE_SYMLINK:
d_type = DT_LNK;
break;
case SHFL_TYPE_SOCKET:
d_type = DT_SOCK;
break;
case SHFL_TYPE_WHITEOUT:
d_type = DT_WHT;
break;
default:
d_type = DT_UNKNOWN;
break;
}
return d_type;
}
static bool vboxsf_dir_emit(struct file *dir, struct dir_context *ctx)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(file_inode(dir)->i_sb);
struct vboxsf_dir_info *sf_d = dir->private_data;
struct shfl_dirinfo *info;
struct vboxsf_dir_buf *b;
unsigned int d_type;
loff_t i, cur = 0;
ino_t fake_ino;
size_t size;
int err;
list_for_each_entry(b, &sf_d->info_list, head) {
try_next_entry:
if (ctx->pos >= cur + b->entries) {
cur += b->entries;
continue;
}
/*
* Note the vboxsf_dir_info objects we are iterating over here
* are variable sized, so the info pointer may end up being
* unaligned. This is how we get the data from the host.
* Since vboxsf is only supported on x86 machines this is not
* a problem.
*/
for (i = 0, info = b->buf; i < ctx->pos - cur; i++) {
size = offsetof(struct shfl_dirinfo, name.string) +
info->name.size;
info = (struct shfl_dirinfo *)((uintptr_t)info + size);
}
/* Info now points to the right entry, emit it. */
d_type = vboxsf_get_d_type(info->info.attr.mode);
/*
* On 32 bit systems pos is 64 signed, while ino is 32 bit
* unsigned so fake_ino may overflow, check for this.
*/
if ((ino_t)(ctx->pos + 1) != (u64)(ctx->pos + 1)) {
vbg_err("vboxsf: fake ino overflow, truncating dir\n");
return false;
}
fake_ino = ctx->pos + 1;
if (sbi->nls) {
char d_name[NAME_MAX];
err = vboxsf_nlscpy(sbi, d_name, NAME_MAX,
info->name.string.utf8,
info->name.length);
if (err) {
/* skip erroneous entry and proceed */
ctx->pos += 1;
goto try_next_entry;
}
return dir_emit(ctx, d_name, strlen(d_name),
fake_ino, d_type);
}
return dir_emit(ctx, info->name.string.utf8, info->name.length,
fake_ino, d_type);
}
return false;
}
static int vboxsf_dir_iterate(struct file *dir, struct dir_context *ctx)
{
bool keep_iterating;
for (keep_iterating = true; keep_iterating; ctx->pos += 1)
keep_iterating = vboxsf_dir_emit(dir, ctx);
return 0;
}
const struct file_operations vboxsf_dir_fops = {
.open = vboxsf_dir_open,
.iterate = vboxsf_dir_iterate,
.release = vboxsf_dir_release,
.read = generic_read_dir,
.llseek = generic_file_llseek,
};
/*
* This is called during name resolution/lookup to check if the @dentry in
* the cache is still valid. the job is handled by vboxsf_inode_revalidate.
*/
static int vboxsf_dentry_revalidate(struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
if (d_really_is_positive(dentry))
return vboxsf_inode_revalidate(dentry) == 0;
else
return vboxsf_stat_dentry(dentry, NULL) == -ENOENT;
}
const struct dentry_operations vboxsf_dentry_ops = {
.d_revalidate = vboxsf_dentry_revalidate
};
/* iops */
static struct dentry *vboxsf_dir_lookup(struct inode *parent,
struct dentry *dentry,
unsigned int flags)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
struct shfl_fsobjinfo fsinfo;
struct inode *inode;
int err;
dentry->d_time = jiffies;
err = vboxsf_stat_dentry(dentry, &fsinfo);
if (err) {
inode = (err == -ENOENT) ? NULL : ERR_PTR(err);
} else {
inode = vboxsf_new_inode(parent->i_sb);
if (!IS_ERR(inode))
vboxsf_init_inode(sbi, inode, &fsinfo);
}
return d_splice_alias(inode, dentry);
}
static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry,
struct shfl_fsobjinfo *info)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
struct vboxsf_inode *sf_i;
struct inode *inode;
inode = vboxsf_new_inode(parent->i_sb);
if (IS_ERR(inode))
return PTR_ERR(inode);
sf_i = VBOXSF_I(inode);
/* The host may have given us different attr then requested */
sf_i->force_restat = 1;
vboxsf_init_inode(sbi, inode, info);
d_instantiate(dentry, inode);
return 0;
}
static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
umode_t mode, int is_dir)
{
struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
struct shfl_createparms params = {};
int err;
params.handle = SHFL_HANDLE_NIL;
params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW |
SHFL_CF_ACT_FAIL_IF_EXISTS |
SHFL_CF_ACCESS_READWRITE |
(is_dir ? SHFL_CF_DIRECTORY : 0);
params.info.attr.mode = (mode & 0777) |
(is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE);
params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING;
err = vboxsf_create_at_dentry(dentry, &params);
if (err)
return err;
if (params.result != SHFL_FILE_CREATED)
return -EPERM;
vboxsf_close(sbi->root, params.handle);
err = vboxsf_dir_instantiate(parent, dentry, &params.info);
if (err)
return err;
/* parent directory access/change time changed */
sf_parent_i->force_restat = 1;
return 0;
}
static int vboxsf_dir_mkfile(struct inode *parent, struct dentry *dentry,
umode_t mode, bool excl)
{
return vboxsf_dir_create(parent, dentry, mode, 0);
}
static int vboxsf_dir_mkdir(struct inode *parent, struct dentry *dentry,
umode_t mode)
{
return vboxsf_dir_create(parent, dentry, mode, 1);
}
static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
struct inode *inode = d_inode(dentry);
struct shfl_string *path;
u32 flags;
int err;
if (S_ISDIR(inode->i_mode))
flags = SHFL_REMOVE_DIR;
else
flags = SHFL_REMOVE_FILE;
if (S_ISLNK(inode->i_mode))
flags |= SHFL_REMOVE_SYMLINK;
path = vboxsf_path_from_dentry(sbi, dentry);
if (IS_ERR(path))
return PTR_ERR(path);
err = vboxsf_remove(sbi->root, path, flags);
__putname(path);
if (err)
return err;
/* parent directory access/change time changed */
sf_parent_i->force_restat = 1;
return 0;
}
static int vboxsf_dir_rename(struct inode *old_parent,
struct dentry *old_dentry,
struct inode *new_parent,
struct dentry *new_dentry,
unsigned int flags)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(old_parent->i_sb);
struct vboxsf_inode *sf_old_parent_i = VBOXSF_I(old_parent);
struct vboxsf_inode *sf_new_parent_i = VBOXSF_I(new_parent);
u32 shfl_flags = SHFL_RENAME_FILE | SHFL_RENAME_REPLACE_IF_EXISTS;
struct shfl_string *old_path, *new_path;
int err;
if (flags)
return -EINVAL;
old_path = vboxsf_path_from_dentry(sbi, old_dentry);
if (IS_ERR(old_path))
return PTR_ERR(old_path);
new_path = vboxsf_path_from_dentry(sbi, new_dentry);
if (IS_ERR(new_path)) {
err = PTR_ERR(new_path);
goto err_put_old_path;
}
if (d_inode(old_dentry)->i_mode & S_IFDIR)
shfl_flags = 0;
err = vboxsf_rename(sbi->root, old_path, new_path, shfl_flags);
if (err == 0) {
/* parent directories access/change time changed */
sf_new_parent_i->force_restat = 1;
sf_old_parent_i->force_restat = 1;
}
__putname(new_path);
err_put_old_path:
__putname(old_path);
return err;
}
static int vboxsf_dir_symlink(struct inode *parent, struct dentry *dentry,
const char *symname)
{
struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
int symname_size = strlen(symname) + 1;
struct shfl_string *path, *ssymname;
struct shfl_fsobjinfo info;
int err;
path = vboxsf_path_from_dentry(sbi, dentry);
if (IS_ERR(path))
return PTR_ERR(path);
ssymname = kmalloc(SHFLSTRING_HEADER_SIZE + symname_size, GFP_KERNEL);
if (!ssymname) {
__putname(path);
return -ENOMEM;
}
ssymname->length = symname_size - 1;
ssymname->size = symname_size;
memcpy(ssymname->string.utf8, symname, symname_size);
err = vboxsf_symlink(sbi->root, path, ssymname, &info);
kfree(ssymname);
__putname(path);
if (err) {
/* -EROFS means symlinks are note support -> -EPERM */
return (err == -EROFS) ? -EPERM : err;
}
err = vboxsf_dir_instantiate(parent, dentry, &info);
if (err)
return err;
/* parent directory access/change time changed */
sf_parent_i->force_restat = 1;
return 0;
}
const struct inode_operations vboxsf_dir_iops = {
.lookup = vboxsf_dir_lookup,
.create = vboxsf_dir_mkfile,
.mkdir = vboxsf_dir_mkdir,
.rmdir = vboxsf_dir_unlink,
.unlink = vboxsf_dir_unlink,
.rename = vboxsf_dir_rename,
.symlink = vboxsf_dir_symlink,
.getattr = vboxsf_getattr,
.setattr = vboxsf_setattr,
};
// SPDX-License-Identifier: MIT
/*
* VirtualBox Guest Shared Folders support: Regular file inode and file ops.
*
* Copyright (C) 2006-2018 Oracle Corporation
*/
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
#include "vfsmod.h"
struct vboxsf_handle {
u64 handle;
u32 root;
u32 access_flags;
struct kref refcount;
struct list_head head;
};
static int vboxsf_file_open(struct inode *inode, struct file *file)
{
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
struct shfl_createparms params = {};
struct vboxsf_handle *sf_handle;
u32 access_flags = 0;
int err;
sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
if (!sf_handle)
return -ENOMEM;
/*
* We check the value of params.handle afterwards to find out if
* the call succeeded or failed, as the API does not seem to cleanly
* distinguish error and informational messages.
*
* Furthermore, we must set params.handle to SHFL_HANDLE_NIL to
* make the shared folders host service use our mode parameter.
*/
params.handle = SHFL_HANDLE_NIL;
if (file->f_flags & O_CREAT) {
params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW;
/*
* We ignore O_EXCL, as the Linux kernel seems to call create
* beforehand itself, so O_EXCL should always fail.
*/
if (file->f_flags & O_TRUNC)
params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
else
params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
} else {
params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW;
if (file->f_flags & O_TRUNC)
params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
}
switch (file->f_flags & O_ACCMODE) {
case O_RDONLY:
access_flags |= SHFL_CF_ACCESS_READ;
break;
case O_WRONLY:
access_flags |= SHFL_CF_ACCESS_WRITE;
break;
case O_RDWR:
access_flags |= SHFL_CF_ACCESS_READWRITE;
break;
default:
WARN_ON(1);
}
if (file->f_flags & O_APPEND)
access_flags |= SHFL_CF_ACCESS_APPEND;
params.create_flags |= access_flags;
params.info.attr.mode = inode->i_mode;
err = vboxsf_create_at_dentry(file_dentry(file), &params);
if (err == 0 && params.handle == SHFL_HANDLE_NIL)
err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
if (err) {
kfree(sf_handle);
return err;
}
/* the host may have given us different attr then requested */
sf_i->force_restat = 1;
/* init our handle struct and add it to the inode's handles list */
sf_handle->handle = params.handle;
sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
sf_handle->access_flags = access_flags;
kref_init(&sf_handle->refcount);
mutex_lock(&sf_i->handle_list_mutex);
list_add(&sf_handle->head, &sf_i->handle_list);
mutex_unlock(&sf_i->handle_list_mutex);
file->private_data = sf_handle;
return 0;
}
static void vboxsf_handle_release(struct kref *refcount)
{
struct vboxsf_handle *sf_handle =
container_of(refcount, struct vboxsf_handle, refcount);
vboxsf_close(sf_handle->root, sf_handle->handle);
kfree(sf_handle);
}
static int vboxsf_file_release(struct inode *inode, struct file *file)
{
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
struct vboxsf_handle *sf_handle = file->private_data;
/*
* When a file is closed on our (the guest) side, we want any subsequent
* accesses done on the host side to see all changes done from our side.
*/
filemap_write_and_wait(inode->i_mapping);
mutex_lock(&sf_i->handle_list_mutex);
list_del(&sf_handle->head);
mutex_unlock(&sf_i->handle_list_mutex);
kref_put(&sf_handle->refcount, vboxsf_handle_release);
return 0;
}
/*
* Write back dirty pages now, because there may not be any suitable
* open files later
*/
static void vboxsf_vma_close(struct vm_area_struct *vma)
{
filemap_write_and_wait(vma->vm_file->f_mapping);
}
static const struct vm_operations_struct vboxsf_file_vm_ops = {
.close = vboxsf_vma_close,
.fault = filemap_fault,
.map_pages = filemap_map_pages,
};
static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
{
int err;
err = generic_file_mmap(file, vma);
if (!err)
vma->vm_ops = &vboxsf_file_vm_ops;
return err;
}
/*
* Note that since we are accessing files on the host's filesystem, files
* may always be changed underneath us by the host!
*
* The vboxsf API between the guest and the host does not offer any functions
* to deal with this. There is no inode-generation to check for changes, no
* events / callback on changes and no way to lock files.
*
* To avoid returning stale data when a file gets *opened* on our (the guest)
* side, we do a "stat" on the host side, then compare the mtime with the
* last known mtime and invalidate the page-cache if they differ.
* This is done from vboxsf_inode_revalidate().
*
* When reads are done through the read_iter fop, it is possible to do
* further cache revalidation then, there are 3 options to deal with this:
*
* 1) Rely solely on the revalidation done at open time
* 2) Do another "stat" and compare mtime again. Unfortunately the vboxsf
* host API does not allow stat on handles, so we would need to use
* file->f_path.dentry and the stat will then fail if the file was unlinked
* or renamed (and there is no thing like NFS' silly-rename). So we get:
* 2a) "stat" and compare mtime, on stat failure invalidate the cache
* 2b) "stat" and compare mtime, on stat failure do nothing
* 3) Simply always call invalidate_inode_pages2_range on the range of the read
*
* Currently we are keeping things KISS and using option 1. this allows
* directly using generic_file_read_iter without wrapping it.
*
* This means that only data written on the host side before open() on
* the guest side is guaranteed to be seen by the guest. If necessary
* we may provide other read-cache strategies in the future and make this
* configurable through a mount option.
*/
const struct file_operations vboxsf_reg_fops = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = vboxsf_file_mmap,
.open = vboxsf_file_open,
.release = vboxsf_file_release,
.fsync = noop_fsync,
.splice_read = generic_file_splice_read,
};
const struct inode_operations vboxsf_reg_iops = {
.getattr = vboxsf_getattr,
.setattr = vboxsf_setattr
};
static int vboxsf_readpage(struct file *file, struct page *page)
{
struct vboxsf_handle *sf_handle = file->private_data;
loff_t off = page_offset(page);
u32 nread = PAGE_SIZE;
u8 *buf;
int err;
buf = kmap(page);
err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf);
if (err == 0) {
memset(&buf[nread], 0, PAGE_SIZE - nread);
flush_dcache_page(page);
SetPageUptodate(page);
} else {
SetPageError(page);
}
kunmap(page);
unlock_page(page);
return err;
}
static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
{
struct vboxsf_handle *h, *sf_handle = NULL;
mutex_lock(&sf_i->handle_list_mutex);
list_for_each_entry(h, &sf_i->handle_list, head) {
if (h->access_flags == SHFL_CF_ACCESS_WRITE ||
h->access_flags == SHFL_CF_ACCESS_READWRITE) {
kref_get(&h->refcount);
sf_handle = h;
break;
}
}
mutex_unlock(&sf_i->handle_list_mutex);
return sf_handle;
}
static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
struct vboxsf_handle *sf_handle;
loff_t off = page_offset(page);
loff_t size = i_size_read(inode);
u32 nwrite = PAGE_SIZE;
u8 *buf;
int err;
if (off + PAGE_SIZE > size)
nwrite = size & ~PAGE_MASK;
sf_handle = vboxsf_get_write_handle(sf_i);
if (!sf_handle)
return -EBADF;
buf = kmap(page);
err = vboxsf_write(sf_handle->root, sf_handle->handle,
off, &nwrite, buf);
kunmap(page);
kref_put(&sf_handle->refcount, vboxsf_handle_release);
if (err == 0) {
ClearPageError(page);
/* mtime changed */
sf_i->force_restat = 1;
} else {
ClearPageUptodate(page);
}
unlock_page(page);
return err;
}
static int vboxsf_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
struct vboxsf_handle *sf_handle = file->private_data;
unsigned int from = pos & ~PAGE_MASK;
u32 nwritten = len;
u8 *buf;
int err;
buf = kmap(page);
err = vboxsf_write(sf_handle->root, sf_handle->handle,
pos, &nwritten, buf + from);
kunmap(page);
if (err) {
nwritten = 0;
goto out;
}
/* mtime changed */
VBOXSF_I(inode)->force_restat = 1;
if (!PageUptodate(page) && nwritten == PAGE_SIZE)
SetPageUptodate(page);
pos += nwritten;
if (pos > inode->i_size)
i_size_write(inode, pos);
out:
unlock_page(page);
put_page(page);
return nwritten;
}
const struct address_space_operations vboxsf_reg_aops = {
.readpage = vboxsf_readpage,
.writepage = vboxsf_writepage,
.set_page_dirty = __set_page_dirty_nobuffers,
.write_begin = simple_write_begin,
.write_end = vboxsf_write_end,
};
static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *done)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
struct shfl_string *path;
char *link;
int err;
if (!dentry)
return ERR_PTR(-ECHILD);
path = vboxsf_path_from_dentry(sbi, dentry);
if (IS_ERR(path))
return (char *)path;
link = kzalloc(PATH_MAX, GFP_KERNEL);
if (!link) {
__putname(path);
return ERR_PTR(-ENOMEM);
}
err = vboxsf_readlink(sbi->root, path, PATH_MAX, link);
__putname(path);
if (err) {
kfree(link);
return ERR_PTR(err);
}
set_delayed_call(done, kfree_link, link);
return link;
}
const struct inode_operations vboxsf_lnk_iops = {
.get_link = vboxsf_get_link
};
此差异已折叠。
// SPDX-License-Identifier: MIT
/*
* VirtualBox Guest Shared Folders support: Virtual File System.
*
* Module initialization/finalization
* File system registration/deregistration
* Superblock reading
* Few utility functions
*
* Copyright (C) 2006-2018 Oracle Corporation
*/
#include <linux/idr.h>
#include <linux/fs_parser.h>
#include <linux/magic.h>
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/statfs.h>
#include <linux/vbox_utils.h>
#include "vfsmod.h"
#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
static int follow_symlinks;
module_param(follow_symlinks, int, 0444);
MODULE_PARM_DESC(follow_symlinks,
"Let host resolve symlinks rather than showing them");
static DEFINE_IDA(vboxsf_bdi_ida);
static DEFINE_MUTEX(vboxsf_setup_mutex);
static bool vboxsf_setup_done;
static struct super_operations vboxsf_super_ops; /* forward declaration */
static struct kmem_cache *vboxsf_inode_cachep;
static char * const vboxsf_default_nls = CONFIG_NLS_DEFAULT;
enum { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode,
opt_dmask, opt_fmask };
static const struct fs_parameter_spec vboxsf_param_specs[] = {
fsparam_string ("nls", opt_nls),
fsparam_u32 ("uid", opt_uid),
fsparam_u32 ("gid", opt_gid),
fsparam_u32 ("ttl", opt_ttl),
fsparam_u32oct ("dmode", opt_dmode),
fsparam_u32oct ("fmode", opt_fmode),
fsparam_u32oct ("dmask", opt_dmask),
fsparam_u32oct ("fmask", opt_fmask),
{}
};
static const struct fs_parameter_description vboxsf_fs_parameters = {
.name = "vboxsf",
.specs = vboxsf_param_specs,
};
static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct vboxsf_fs_context *ctx = fc->fs_private;
struct fs_parse_result result;
kuid_t uid;
kgid_t gid;
int opt;
opt = fs_parse(fc, &vboxsf_fs_parameters, param, &result);
if (opt < 0)
return opt;
switch (opt) {
case opt_nls:
if (fc->purpose != FS_CONTEXT_FOR_MOUNT) {
vbg_err("vboxsf: Cannot reconfigure nls option\n");
return -EINVAL;
}
ctx->nls_name = param->string;
param->string = NULL;
break;
case opt_uid:
uid = make_kuid(current_user_ns(), result.uint_32);
if (!uid_valid(uid))
return -EINVAL;
ctx->o.uid = uid;
break;
case opt_gid:
gid = make_kgid(current_user_ns(), result.uint_32);
if (!gid_valid(gid))
return -EINVAL;
ctx->o.gid = gid;
break;
case opt_ttl:
ctx->o.ttl = msecs_to_jiffies(result.uint_32);
break;
case opt_dmode:
if (result.uint_32 & ~0777)
return -EINVAL;
ctx->o.dmode = result.uint_32;
ctx->o.dmode_set = true;
break;
case opt_fmode:
if (result.uint_32 & ~0777)
return -EINVAL;
ctx->o.fmode = result.uint_32;
ctx->o.fmode_set = true;
break;
case opt_dmask:
if (result.uint_32 & ~07777)
return -EINVAL;
ctx->o.dmask = result.uint_32;
break;
case opt_fmask:
if (result.uint_32 & ~07777)
return -EINVAL;
ctx->o.fmask = result.uint_32;
break;
default:
return -EINVAL;
}
return 0;
}
static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct vboxsf_fs_context *ctx = fc->fs_private;
struct shfl_string *folder_name, root_path;
struct vboxsf_sbi *sbi;
struct dentry *droot;
struct inode *iroot;
char *nls_name;
size_t size;
int err;
if (!fc->source)
return -EINVAL;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sbi->o = ctx->o;
idr_init(&sbi->ino_idr);
spin_lock_init(&sbi->ino_idr_lock);
sbi->next_generation = 1;
sbi->bdi_id = -1;
/* Load nls if not utf8 */
nls_name = ctx->nls_name ? ctx->nls_name : vboxsf_default_nls;
if (strcmp(nls_name, "utf8") != 0) {
if (nls_name == vboxsf_default_nls)
sbi->nls = load_nls_default();
else
sbi->nls = load_nls(nls_name);
if (!sbi->nls) {
vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
err = -EINVAL;
goto fail_free;
}
}
sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL);
if (sbi->bdi_id < 0) {
err = sbi->bdi_id;
goto fail_free;
}
err = super_setup_bdi_name(sb, "vboxsf-%s.%d", fc->source, sbi->bdi_id);
if (err)
goto fail_free;
/* Turn source into a shfl_string and map the folder */
size = strlen(fc->source) + 1;
folder_name = kmalloc(SHFLSTRING_HEADER_SIZE + size, GFP_KERNEL);
if (!folder_name) {
err = -ENOMEM;
goto fail_free;
}
folder_name->size = size;
folder_name->length = size - 1;
strlcpy(folder_name->string.utf8, fc->source, size);
err = vboxsf_map_folder(folder_name, &sbi->root);
kfree(folder_name);
if (err) {
vbg_err("vboxsf: Host rejected mount of '%s' with error %d\n",
fc->source, err);
goto fail_free;
}
root_path.length = 1;
root_path.size = 2;
root_path.string.utf8[0] = '/';
root_path.string.utf8[1] = 0;
err = vboxsf_stat(sbi, &root_path, &sbi->root_info);
if (err)
goto fail_unmap;
sb->s_magic = VBOXSF_SUPER_MAGIC;
sb->s_blocksize = 1024;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_op = &vboxsf_super_ops;
sb->s_d_op = &vboxsf_dentry_ops;
iroot = iget_locked(sb, 0);
if (!iroot) {
err = -ENOMEM;
goto fail_unmap;
}
vboxsf_init_inode(sbi, iroot, &sbi->root_info);
unlock_new_inode(iroot);
droot = d_make_root(iroot);
if (!droot) {
err = -ENOMEM;
goto fail_unmap;
}
sb->s_root = droot;
sb->s_fs_info = sbi;
return 0;
fail_unmap:
vboxsf_unmap_folder(sbi->root);
fail_free:
if (sbi->bdi_id >= 0)
ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
if (sbi->nls)
unload_nls(sbi->nls);
idr_destroy(&sbi->ino_idr);
kfree(sbi);
return err;
}
static void vboxsf_inode_init_once(void *data)
{
struct vboxsf_inode *sf_i = data;
mutex_init(&sf_i->handle_list_mutex);
inode_init_once(&sf_i->vfs_inode);
}
static struct inode *vboxsf_alloc_inode(struct super_block *sb)
{
struct vboxsf_inode *sf_i;
sf_i = kmem_cache_alloc(vboxsf_inode_cachep, GFP_NOFS);
if (!sf_i)
return NULL;
sf_i->force_restat = 0;
INIT_LIST_HEAD(&sf_i->handle_list);
return &sf_i->vfs_inode;
}
static void vboxsf_free_inode(struct inode *inode)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
unsigned long flags;
spin_lock_irqsave(&sbi->ino_idr_lock, flags);
idr_remove(&sbi->ino_idr, inode->i_ino);
spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
kmem_cache_free(vboxsf_inode_cachep, VBOXSF_I(inode));
}
static void vboxsf_put_super(struct super_block *sb)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
vboxsf_unmap_folder(sbi->root);
if (sbi->bdi_id >= 0)
ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
if (sbi->nls)
unload_nls(sbi->nls);
/*
* vboxsf_free_inode uses the idr, make sure all delayed rcu free
* inodes are flushed.
*/
rcu_barrier();
idr_destroy(&sbi->ino_idr);
kfree(sbi);
}
static int vboxsf_statfs(struct dentry *dentry, struct kstatfs *stat)
{
struct super_block *sb = dentry->d_sb;
struct shfl_volinfo shfl_volinfo;
struct vboxsf_sbi *sbi;
u32 buf_len;
int err;
sbi = VBOXSF_SBI(sb);
buf_len = sizeof(shfl_volinfo);
err = vboxsf_fsinfo(sbi->root, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME,
&buf_len, &shfl_volinfo);
if (err)
return err;
stat->f_type = VBOXSF_SUPER_MAGIC;
stat->f_bsize = shfl_volinfo.bytes_per_allocation_unit;
do_div(shfl_volinfo.total_allocation_bytes,
shfl_volinfo.bytes_per_allocation_unit);
stat->f_blocks = shfl_volinfo.total_allocation_bytes;
do_div(shfl_volinfo.available_allocation_bytes,
shfl_volinfo.bytes_per_allocation_unit);
stat->f_bfree = shfl_volinfo.available_allocation_bytes;
stat->f_bavail = shfl_volinfo.available_allocation_bytes;
stat->f_files = 1000;
/*
* Don't return 0 here since the guest may then think that it is not
* possible to create any more files.
*/
stat->f_ffree = 1000000;
stat->f_fsid.val[0] = 0;
stat->f_fsid.val[1] = 0;
stat->f_namelen = 255;
return 0;
}
static struct super_operations vboxsf_super_ops = {
.alloc_inode = vboxsf_alloc_inode,
.free_inode = vboxsf_free_inode,
.put_super = vboxsf_put_super,
.statfs = vboxsf_statfs,
};
static int vboxsf_setup(void)
{
int err;
mutex_lock(&vboxsf_setup_mutex);
if (vboxsf_setup_done)
goto success;
vboxsf_inode_cachep =
kmem_cache_create("vboxsf_inode_cache",
sizeof(struct vboxsf_inode), 0,
(SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
SLAB_ACCOUNT),
vboxsf_inode_init_once);
if (!vboxsf_inode_cachep) {
err = -ENOMEM;
goto fail_nomem;
}
err = vboxsf_connect();
if (err) {
vbg_err("vboxsf: err %d connecting to guest PCI-device\n", err);
vbg_err("vboxsf: make sure you are inside a VirtualBox VM\n");
vbg_err("vboxsf: and check dmesg for vboxguest errors\n");
goto fail_free_cache;
}
err = vboxsf_set_utf8();
if (err) {
vbg_err("vboxsf_setutf8 error %d\n", err);
goto fail_disconnect;
}
if (!follow_symlinks) {
err = vboxsf_set_symlinks();
if (err)
vbg_warn("vboxsf: Unable to show symlinks: %d\n", err);
}
vboxsf_setup_done = true;
success:
mutex_unlock(&vboxsf_setup_mutex);
return 0;
fail_disconnect:
vboxsf_disconnect();
fail_free_cache:
kmem_cache_destroy(vboxsf_inode_cachep);
fail_nomem:
mutex_unlock(&vboxsf_setup_mutex);
return err;
}
static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
{
char *options = data;
if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
return -EINVAL;
}
return generic_parse_monolithic(fc, data);
}
static int vboxsf_get_tree(struct fs_context *fc)
{
int err;
err = vboxsf_setup();
if (err)
return err;
return vfs_get_super(fc, vfs_get_independent_super, vboxsf_fill_super);
}
static int vboxsf_reconfigure(struct fs_context *fc)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(fc->root->d_sb);
struct vboxsf_fs_context *ctx = fc->fs_private;
struct inode *iroot;
iroot = ilookup(fc->root->d_sb, 0);
if (!iroot)
return -ENOENT;
/* Apply changed options to the root inode */
sbi->o = ctx->o;
vboxsf_init_inode(sbi, iroot, &sbi->root_info);
return 0;
}
static void vboxsf_free_fc(struct fs_context *fc)
{
struct vboxsf_fs_context *ctx = fc->fs_private;
kfree(ctx->nls_name);
kfree(ctx);
}
static const struct fs_context_operations vboxsf_context_ops = {
.free = vboxsf_free_fc,
.parse_param = vboxsf_parse_param,
.parse_monolithic = vboxsf_parse_monolithic,
.get_tree = vboxsf_get_tree,
.reconfigure = vboxsf_reconfigure,
};
static int vboxsf_init_fs_context(struct fs_context *fc)
{
struct vboxsf_fs_context *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
current_uid_gid(&ctx->o.uid, &ctx->o.gid);
fc->fs_private = ctx;
fc->ops = &vboxsf_context_ops;
return 0;
}
static struct file_system_type vboxsf_fs_type = {
.owner = THIS_MODULE,
.name = "vboxsf",
.init_fs_context = vboxsf_init_fs_context,
.parameters = &vboxsf_fs_parameters,
.kill_sb = kill_anon_super
};
/* Module initialization/finalization handlers */
static int __init vboxsf_init(void)
{
return register_filesystem(&vboxsf_fs_type);
}
static void __exit vboxsf_fini(void)
{
unregister_filesystem(&vboxsf_fs_type);
mutex_lock(&vboxsf_setup_mutex);
if (vboxsf_setup_done) {
vboxsf_disconnect();
/*
* Make sure all delayed rcu free inodes are flushed
* before we destroy the cache.
*/
rcu_barrier();
kmem_cache_destroy(vboxsf_inode_cachep);
}
mutex_unlock(&vboxsf_setup_mutex);
}
module_init(vboxsf_init);
module_exit(vboxsf_fini);
MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access");
MODULE_AUTHOR("Oracle Corporation");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_FS("vboxsf");
// SPDX-License-Identifier: MIT
/*
* VirtualBox Guest Shared Folders support: Utility functions.
* Mainly conversion from/to VirtualBox/Linux data structures.
*
* Copyright (C) 2006-2018 Oracle Corporation
*/
#include <linux/namei.h>
#include <linux/nls.h>
#include <linux/sizes.h>
#include <linux/vfs.h>
#include "vfsmod.h"
struct inode *vboxsf_new_inode(struct super_block *sb)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
struct inode *inode;
unsigned long flags;
int cursor, ret;
u32 gen;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
idr_preload(GFP_KERNEL);
spin_lock_irqsave(&sbi->ino_idr_lock, flags);
cursor = idr_get_cursor(&sbi->ino_idr);
ret = idr_alloc_cyclic(&sbi->ino_idr, inode, 1, 0, GFP_ATOMIC);
if (ret >= 0 && ret < cursor)
sbi->next_generation++;
gen = sbi->next_generation;
spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
idr_preload_end();
if (ret < 0) {
iput(inode);
return ERR_PTR(ret);
}
inode->i_ino = ret;
inode->i_generation = gen;
return inode;
}
/* set [inode] attributes based on [info], uid/gid based on [sbi] */
void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
const struct shfl_fsobjinfo *info)
{
const struct shfl_fsobjattr *attr;
s64 allocated;
int mode;
attr = &info->attr;
#define mode_set(r) ((attr->mode & (SHFL_UNIX_##r)) ? (S_##r) : 0)
mode = mode_set(IRUSR);
mode |= mode_set(IWUSR);
mode |= mode_set(IXUSR);
mode |= mode_set(IRGRP);
mode |= mode_set(IWGRP);
mode |= mode_set(IXGRP);
mode |= mode_set(IROTH);
mode |= mode_set(IWOTH);
mode |= mode_set(IXOTH);
#undef mode_set
/* We use the host-side values for these */
inode->i_flags |= S_NOATIME | S_NOCMTIME;
inode->i_mapping->a_ops = &vboxsf_reg_aops;
if (SHFL_IS_DIRECTORY(attr->mode)) {
inode->i_mode = sbi->o.dmode_set ? sbi->o.dmode : mode;
inode->i_mode &= ~sbi->o.dmask;
inode->i_mode |= S_IFDIR;
inode->i_op = &vboxsf_dir_iops;
inode->i_fop = &vboxsf_dir_fops;
/*
* XXX: this probably should be set to the number of entries
* in the directory plus two (. ..)
*/
set_nlink(inode, 1);
} else if (SHFL_IS_SYMLINK(attr->mode)) {
inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
inode->i_mode &= ~sbi->o.fmask;
inode->i_mode |= S_IFLNK;
inode->i_op = &vboxsf_lnk_iops;
set_nlink(inode, 1);
} else {
inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
inode->i_mode &= ~sbi->o.fmask;
inode->i_mode |= S_IFREG;
inode->i_op = &vboxsf_reg_iops;
inode->i_fop = &vboxsf_reg_fops;
set_nlink(inode, 1);
}
inode->i_uid = sbi->o.uid;
inode->i_gid = sbi->o.gid;
inode->i_size = info->size;
inode->i_blkbits = 12;
/* i_blocks always in units of 512 bytes! */
allocated = info->allocated + 511;
do_div(allocated, 512);
inode->i_blocks = allocated;
inode->i_atime = ns_to_timespec64(
info->access_time.ns_relative_to_unix_epoch);
inode->i_ctime = ns_to_timespec64(
info->change_time.ns_relative_to_unix_epoch);
inode->i_mtime = ns_to_timespec64(
info->modification_time.ns_relative_to_unix_epoch);
}
int vboxsf_create_at_dentry(struct dentry *dentry,
struct shfl_createparms *params)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
struct shfl_string *path;
int err;
path = vboxsf_path_from_dentry(sbi, dentry);
if (IS_ERR(path))
return PTR_ERR(path);
err = vboxsf_create(sbi->root, path, params);
__putname(path);
return err;
}
int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
struct shfl_fsobjinfo *info)
{
struct shfl_createparms params = {};
int err;
params.handle = SHFL_HANDLE_NIL;
params.create_flags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
err = vboxsf_create(sbi->root, path, &params);
if (err)
return err;
if (params.result != SHFL_FILE_EXISTS)
return -ENOENT;
if (info)
*info = params.info;
return 0;
}
int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
struct shfl_string *path;
int err;
path = vboxsf_path_from_dentry(sbi, dentry);
if (IS_ERR(path))
return PTR_ERR(path);
err = vboxsf_stat(sbi, path, info);
__putname(path);
return err;
}
int vboxsf_inode_revalidate(struct dentry *dentry)
{
struct vboxsf_sbi *sbi;
struct vboxsf_inode *sf_i;
struct shfl_fsobjinfo info;
struct timespec64 prev_mtime;
struct inode *inode;
int err;
if (!dentry || !d_really_is_positive(dentry))
return -EINVAL;
inode = d_inode(dentry);
prev_mtime = inode->i_mtime;
sf_i = VBOXSF_I(inode);
sbi = VBOXSF_SBI(dentry->d_sb);
if (!sf_i->force_restat) {
if (time_before(jiffies, dentry->d_time + sbi->o.ttl))
return 0;
}
err = vboxsf_stat_dentry(dentry, &info);
if (err)
return err;
dentry->d_time = jiffies;
sf_i->force_restat = 0;
vboxsf_init_inode(sbi, inode, &info);
/*
* If the file was changed on the host side we need to invalidate the
* page-cache for it. Note this also gets triggered by our own writes,
* this is unavoidable.
*/
if (timespec64_compare(&inode->i_mtime, &prev_mtime) > 0)
invalidate_inode_pages2(inode->i_mapping);
return 0;
}
int vboxsf_getattr(const struct path *path, struct kstat *kstat,
u32 request_mask, unsigned int flags)
{
int err;
struct dentry *dentry = path->dentry;
struct inode *inode = d_inode(dentry);
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
switch (flags & AT_STATX_SYNC_TYPE) {
case AT_STATX_DONT_SYNC:
err = 0;
break;
case AT_STATX_FORCE_SYNC:
sf_i->force_restat = 1;
/* fall-through */
default:
err = vboxsf_inode_revalidate(dentry);
}
if (err)
return err;
generic_fillattr(d_inode(dentry), kstat);
return 0;
}
int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct vboxsf_inode *sf_i = VBOXSF_I(d_inode(dentry));
struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
struct shfl_createparms params = {};
struct shfl_fsobjinfo info = {};
u32 buf_len;
int err;
params.handle = SHFL_HANDLE_NIL;
params.create_flags = SHFL_CF_ACT_OPEN_IF_EXISTS |
SHFL_CF_ACT_FAIL_IF_NEW |
SHFL_CF_ACCESS_ATTR_WRITE;
/* this is at least required for Posix hosts */
if (iattr->ia_valid & ATTR_SIZE)
params.create_flags |= SHFL_CF_ACCESS_WRITE;
err = vboxsf_create_at_dentry(dentry, &params);
if (err || params.result != SHFL_FILE_EXISTS)
return err ? err : -ENOENT;
#define mode_set(r) ((iattr->ia_mode & (S_##r)) ? SHFL_UNIX_##r : 0)
/*
* Setting the file size and setting the other attributes has to
* be handled separately.
*/
if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) {
if (iattr->ia_valid & ATTR_MODE) {
info.attr.mode = mode_set(IRUSR);
info.attr.mode |= mode_set(IWUSR);
info.attr.mode |= mode_set(IXUSR);
info.attr.mode |= mode_set(IRGRP);
info.attr.mode |= mode_set(IWGRP);
info.attr.mode |= mode_set(IXGRP);
info.attr.mode |= mode_set(IROTH);
info.attr.mode |= mode_set(IWOTH);
info.attr.mode |= mode_set(IXOTH);
if (iattr->ia_mode & S_IFDIR)
info.attr.mode |= SHFL_TYPE_DIRECTORY;
else
info.attr.mode |= SHFL_TYPE_FILE;
}
if (iattr->ia_valid & ATTR_ATIME)
info.access_time.ns_relative_to_unix_epoch =
timespec64_to_ns(&iattr->ia_atime);
if (iattr->ia_valid & ATTR_MTIME)
info.modification_time.ns_relative_to_unix_epoch =
timespec64_to_ns(&iattr->ia_mtime);
/*
* Ignore ctime (inode change time) as it can't be set
* from userland anyway.
*/
buf_len = sizeof(info);
err = vboxsf_fsinfo(sbi->root, params.handle,
SHFL_INFO_SET | SHFL_INFO_FILE, &buf_len,
&info);
if (err) {
vboxsf_close(sbi->root, params.handle);
return err;
}
/* the host may have given us different attr then requested */
sf_i->force_restat = 1;
}
#undef mode_set
if (iattr->ia_valid & ATTR_SIZE) {
memset(&info, 0, sizeof(info));
info.size = iattr->ia_size;
buf_len = sizeof(info);
err = vboxsf_fsinfo(sbi->root, params.handle,
SHFL_INFO_SET | SHFL_INFO_SIZE, &buf_len,
&info);
if (err) {
vboxsf_close(sbi->root, params.handle);
return err;
}
/* the host may have given us different attr then requested */
sf_i->force_restat = 1;
}
vboxsf_close(sbi->root, params.handle);
/* Update the inode with what the host has actually given us. */
if (sf_i->force_restat)
vboxsf_inode_revalidate(dentry);
return 0;
}
/*
* [dentry] contains string encoded in coding system that corresponds
* to [sbi]->nls, we must convert it to UTF8 here.
* Returns a shfl_string allocated through __getname (must be freed using
* __putname), or an ERR_PTR on error.
*/
struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
struct dentry *dentry)
{
struct shfl_string *shfl_path;
int path_len, out_len, nb;
char *buf, *path;
wchar_t uni;
u8 *out;
buf = __getname();
if (!buf)
return ERR_PTR(-ENOMEM);
path = dentry_path_raw(dentry, buf, PATH_MAX);
if (IS_ERR(path)) {
__putname(buf);
return (struct shfl_string *)path;
}
path_len = strlen(path);
if (sbi->nls) {
shfl_path = __getname();
if (!shfl_path) {
__putname(buf);
return ERR_PTR(-ENOMEM);
}
out = shfl_path->string.utf8;
out_len = PATH_MAX - SHFLSTRING_HEADER_SIZE - 1;
while (path_len) {
nb = sbi->nls->char2uni(path, path_len, &uni);
if (nb < 0) {
__putname(shfl_path);
__putname(buf);
return ERR_PTR(-EINVAL);
}
path += nb;
path_len -= nb;
nb = utf32_to_utf8(uni, out, out_len);
if (nb < 0) {
__putname(shfl_path);
__putname(buf);
return ERR_PTR(-ENAMETOOLONG);
}
out += nb;
out_len -= nb;
}
*out = 0;
shfl_path->length = out - shfl_path->string.utf8;
shfl_path->size = shfl_path->length + 1;
__putname(buf);
} else {
if ((SHFLSTRING_HEADER_SIZE + path_len + 1) > PATH_MAX) {
__putname(buf);
return ERR_PTR(-ENAMETOOLONG);
}
/*
* dentry_path stores the name at the end of buf, but the
* shfl_string string we return must be properly aligned.
*/
shfl_path = (struct shfl_string *)buf;
memmove(shfl_path->string.utf8, path, path_len);
shfl_path->string.utf8[path_len] = 0;
shfl_path->length = path_len;
shfl_path->size = path_len + 1;
}
return shfl_path;
}
int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
const unsigned char *utf8_name, size_t utf8_len)
{
const char *in;
char *out;
size_t out_len;
size_t out_bound_len;
size_t in_bound_len;
in = utf8_name;
in_bound_len = utf8_len;
out = name;
out_len = 0;
/* Reserve space for terminating 0 */
out_bound_len = name_bound_len - 1;
while (in_bound_len) {
int nb;
unicode_t uni;
nb = utf8_to_utf32(in, in_bound_len, &uni);
if (nb < 0)
return -EINVAL;
in += nb;
in_bound_len -= nb;
nb = sbi->nls->uni2char(uni, out, out_bound_len);
if (nb < 0)
return nb;
out += nb;
out_bound_len -= nb;
out_len += nb;
}
*out = 0;
return 0;
}
static struct vboxsf_dir_buf *vboxsf_dir_buf_alloc(struct list_head *list)
{
struct vboxsf_dir_buf *b;
b = kmalloc(sizeof(*b), GFP_KERNEL);
if (!b)
return NULL;
b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL);
if (!b->buf) {
kfree(b);
return NULL;
}
b->entries = 0;
b->used = 0;
b->free = DIR_BUFFER_SIZE;
list_add(&b->head, list);
return b;
}
static void vboxsf_dir_buf_free(struct vboxsf_dir_buf *b)
{
list_del(&b->head);
kfree(b->buf);
kfree(b);
}
struct vboxsf_dir_info *vboxsf_dir_info_alloc(void)
{
struct vboxsf_dir_info *p;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return NULL;
INIT_LIST_HEAD(&p->info_list);
return p;
}
void vboxsf_dir_info_free(struct vboxsf_dir_info *p)
{
struct list_head *list, *pos, *tmp;
list = &p->info_list;
list_for_each_safe(pos, tmp, list) {
struct vboxsf_dir_buf *b;
b = list_entry(pos, struct vboxsf_dir_buf, head);
vboxsf_dir_buf_free(b);
}
kfree(p);
}
int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
u64 handle)
{
struct vboxsf_dir_buf *b;
u32 entries, size;
int err = 0;
void *buf;
/* vboxsf_dirinfo returns 1 on end of dir */
while (err == 0) {
b = vboxsf_dir_buf_alloc(&sf_d->info_list);
if (!b) {
err = -ENOMEM;
break;
}
buf = b->buf;
size = b->free;
err = vboxsf_dirinfo(sbi->root, handle, NULL, 0, 0,
&size, buf, &entries);
if (err < 0)
break;
b->entries += entries;
b->free -= size;
b->used += size;
}
if (b && b->used == 0)
vboxsf_dir_buf_free(b);
/* -EILSEQ means the host could not translate a filename, ignore */
if (err > 0 || err == -EILSEQ)
err = 0;
return err;
}
// SPDX-License-Identifier: MIT
/*
* Wrapper functions for the shfl host calls.
*
* Copyright (C) 2006-2018 Oracle Corporation
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vbox_err.h>
#include <linux/vbox_utils.h>
#include "vfsmod.h"
#define SHFL_REQUEST \
(VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER | \
VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
static u32 vboxsf_client_id;
int vboxsf_connect(void)
{
struct vbg_dev *gdev;
struct vmmdev_hgcm_service_location loc;
int err, vbox_status;
loc.type = VMMDEV_HGCM_LOC_LOCALHOST_EXISTING;
strcpy(loc.u.localhost.service_name, "VBoxSharedFolders");
gdev = vbg_get_gdev();
if (IS_ERR(gdev))
return -ENODEV; /* No guest-device */
err = vbg_hgcm_connect(gdev, SHFL_REQUEST, &loc,
&vboxsf_client_id, &vbox_status);
vbg_put_gdev(gdev);
return err ? err : vbg_status_code_to_errno(vbox_status);
}
void vboxsf_disconnect(void)
{
struct vbg_dev *gdev;
int vbox_status;
gdev = vbg_get_gdev();
if (IS_ERR(gdev))
return; /* guest-device is gone, already disconnected */
vbg_hgcm_disconnect(gdev, SHFL_REQUEST, vboxsf_client_id, &vbox_status);
vbg_put_gdev(gdev);
}
static int vboxsf_call(u32 function, void *parms, u32 parm_count, int *status)
{
struct vbg_dev *gdev;
int err, vbox_status;
gdev = vbg_get_gdev();
if (IS_ERR(gdev))
return -ESHUTDOWN; /* guest-dev removed underneath us */
err = vbg_hgcm_call(gdev, SHFL_REQUEST, vboxsf_client_id, function,
U32_MAX, parms, parm_count, &vbox_status);
vbg_put_gdev(gdev);
if (err < 0)
return err;
if (status)
*status = vbox_status;
return vbg_status_code_to_errno(vbox_status);
}
int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root)
{
struct shfl_map_folder parms;
int err, status;
parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
parms.path.u.pointer.size = shfl_string_buf_size(folder_name);
parms.path.u.pointer.u.linear_addr = (uintptr_t)folder_name;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = 0;
parms.delimiter.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.delimiter.u.value32 = '/';
parms.case_sensitive.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.case_sensitive.u.value32 = 1;
err = vboxsf_call(SHFL_FN_MAP_FOLDER, &parms, SHFL_CPARMS_MAP_FOLDER,
&status);
if (err == -ENOSYS && status == VERR_NOT_IMPLEMENTED)
vbg_err("%s: Error host is too old\n", __func__);
*root = parms.root.u.value32;
return err;
}
int vboxsf_unmap_folder(u32 root)
{
struct shfl_unmap_folder parms;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
return vboxsf_call(SHFL_FN_UNMAP_FOLDER, &parms,
SHFL_CPARMS_UNMAP_FOLDER, NULL);
}
/**
* vboxsf_create - Create a new file or folder
* @root: Root of the shared folder in which to create the file
* @parsed_path: The path of the file or folder relative to the shared folder
* @param: create_parms Parameters for file/folder creation.
*
* Create a new file or folder or open an existing one in a shared folder.
* Note this function always returns 0 / success unless an exceptional condition
* occurs - out of memory, invalid arguments, etc. If the file or folder could
* not be opened or created, create_parms->handle will be set to
* SHFL_HANDLE_NIL on return. In this case the value in create_parms->result
* provides information as to why (e.g. SHFL_FILE_EXISTS), create_parms->result
* is also set on success as additional information.
*
* Returns:
* 0 or negative errno value.
*/
int vboxsf_create(u32 root, struct shfl_string *parsed_path,
struct shfl_createparms *create_parms)
{
struct shfl_create parms;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
parms.parms.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
parms.parms.u.pointer.size = sizeof(struct shfl_createparms);
parms.parms.u.pointer.u.linear_addr = (uintptr_t)create_parms;
return vboxsf_call(SHFL_FN_CREATE, &parms, SHFL_CPARMS_CREATE, NULL);
}
int vboxsf_close(u32 root, u64 handle)
{
struct shfl_close parms;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
parms.handle.u.value64 = handle;
return vboxsf_call(SHFL_FN_CLOSE, &parms, SHFL_CPARMS_CLOSE, NULL);
}
int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags)
{
struct shfl_remove parms;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.flags.u.value32 = flags;
return vboxsf_call(SHFL_FN_REMOVE, &parms, SHFL_CPARMS_REMOVE, NULL);
}
int vboxsf_rename(u32 root, struct shfl_string *src_path,
struct shfl_string *dest_path, u32 flags)
{
struct shfl_rename parms;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.src.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
parms.src.u.pointer.size = shfl_string_buf_size(src_path);
parms.src.u.pointer.u.linear_addr = (uintptr_t)src_path;
parms.dest.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
parms.dest.u.pointer.size = shfl_string_buf_size(dest_path);
parms.dest.u.pointer.u.linear_addr = (uintptr_t)dest_path;
parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.flags.u.value32 = flags;
return vboxsf_call(SHFL_FN_RENAME, &parms, SHFL_CPARMS_RENAME, NULL);
}
int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
{
struct shfl_read parms;
int err;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
parms.handle.u.value64 = handle;
parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
parms.offset.u.value64 = offset;
parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.cb.u.value32 = *buf_len;
parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
parms.buffer.u.pointer.size = *buf_len;
parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
err = vboxsf_call(SHFL_FN_READ, &parms, SHFL_CPARMS_READ, NULL);
*buf_len = parms.cb.u.value32;
return err;
}
int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
{
struct shfl_write parms;
int err;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
parms.handle.u.value64 = handle;
parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
parms.offset.u.value64 = offset;
parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.cb.u.value32 = *buf_len;
parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
parms.buffer.u.pointer.size = *buf_len;
parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
err = vboxsf_call(SHFL_FN_WRITE, &parms, SHFL_CPARMS_WRITE, NULL);
*buf_len = parms.cb.u.value32;
return err;
}
/* Returns 0 on success, 1 on end-of-dir, negative errno otherwise */
int vboxsf_dirinfo(u32 root, u64 handle,
struct shfl_string *parsed_path, u32 flags, u32 index,
u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count)
{
struct shfl_list parms;
int err, status;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
parms.handle.u.value64 = handle;
parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.flags.u.value32 = flags;
parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.cb.u.value32 = *buf_len;
if (parsed_path) {
parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
} else {
parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_IN;
parms.path.u.pointer.size = 0;
parms.path.u.pointer.u.linear_addr = 0;
}
parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
parms.buffer.u.pointer.size = *buf_len;
parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
parms.resume_point.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.resume_point.u.value32 = index;
parms.file_count.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.file_count.u.value32 = 0; /* out parameter only */
err = vboxsf_call(SHFL_FN_LIST, &parms, SHFL_CPARMS_LIST, &status);
if (err == -ENODATA && status == VERR_NO_MORE_FILES)
err = 1;
*buf_len = parms.cb.u.value32;
*file_count = parms.file_count.u.value32;
return err;
}
int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
u32 *buf_len, void *buf)
{
struct shfl_information parms;
int err;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
parms.handle.u.value64 = handle;
parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.flags.u.value32 = flags;
parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.cb.u.value32 = *buf_len;
parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
parms.info.u.pointer.size = *buf_len;
parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
err = vboxsf_call(SHFL_FN_INFORMATION, &parms, SHFL_CPARMS_INFORMATION,
NULL);
*buf_len = parms.cb.u.value32;
return err;
}
int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
u32 buf_len, u8 *buf)
{
struct shfl_readLink parms;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
parms.buffer.u.pointer.size = buf_len;
parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
return vboxsf_call(SHFL_FN_READLINK, &parms, SHFL_CPARMS_READLINK,
NULL);
}
int vboxsf_symlink(u32 root, struct shfl_string *new_path,
struct shfl_string *old_path, struct shfl_fsobjinfo *buf)
{
struct shfl_symlink parms;
parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parms.root.u.value32 = root;
parms.new_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
parms.new_path.u.pointer.size = shfl_string_buf_size(new_path);
parms.new_path.u.pointer.u.linear_addr = (uintptr_t)new_path;
parms.old_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
parms.old_path.u.pointer.size = shfl_string_buf_size(old_path);
parms.old_path.u.pointer.u.linear_addr = (uintptr_t)old_path;
parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
parms.info.u.pointer.size = sizeof(struct shfl_fsobjinfo);
parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
return vboxsf_call(SHFL_FN_SYMLINK, &parms, SHFL_CPARMS_SYMLINK, NULL);
}
int vboxsf_set_utf8(void)
{
return vboxsf_call(SHFL_FN_SET_UTF8, NULL, 0, NULL);
}
int vboxsf_set_symlinks(void)
{
return vboxsf_call(SHFL_FN_SET_SYMLINKS, NULL, 0, NULL);
}
/* SPDX-License-Identifier: MIT */
/*
* VirtualBox Guest Shared Folders support: module header.
*
* Copyright (C) 2006-2018 Oracle Corporation
*/
#ifndef VFSMOD_H
#define VFSMOD_H
#include <linux/backing-dev.h>
#include <linux/idr.h>
#include "shfl_hostintf.h"
#define DIR_BUFFER_SIZE SZ_16K
/* The cast is to prevent assignment of void * to pointers of arbitrary type */
#define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
#define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode)
struct vboxsf_options {
unsigned long ttl;
kuid_t uid;
kgid_t gid;
bool dmode_set;
bool fmode_set;
umode_t dmode;
umode_t fmode;
umode_t dmask;
umode_t fmask;
};
struct vboxsf_fs_context {
struct vboxsf_options o;
char *nls_name;
};
/* per-shared folder information */
struct vboxsf_sbi {
struct vboxsf_options o;
struct shfl_fsobjinfo root_info;
struct idr ino_idr;
spinlock_t ino_idr_lock; /* This protects ino_idr */
struct nls_table *nls;
u32 next_generation;
u32 root;
int bdi_id;
};
/* per-inode information */
struct vboxsf_inode {
/* some information was changed, update data on next revalidate */
int force_restat;
/* list of open handles for this inode + lock protecting it */
struct list_head handle_list;
/* This mutex protects handle_list accesses */
struct mutex handle_list_mutex;
/* The VFS inode struct */
struct inode vfs_inode;
};
struct vboxsf_dir_info {
struct list_head info_list;
};
struct vboxsf_dir_buf {
size_t entries;
size_t free;
size_t used;
void *buf;
struct list_head head;
};
/* globals */
extern const struct inode_operations vboxsf_dir_iops;
extern const struct inode_operations vboxsf_lnk_iops;
extern const struct inode_operations vboxsf_reg_iops;
extern const struct file_operations vboxsf_dir_fops;
extern const struct file_operations vboxsf_reg_fops;
extern const struct address_space_operations vboxsf_reg_aops;
extern const struct dentry_operations vboxsf_dentry_ops;
/* from utils.c */
struct inode *vboxsf_new_inode(struct super_block *sb);
void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
const struct shfl_fsobjinfo *info);
int vboxsf_create_at_dentry(struct dentry *dentry,
struct shfl_createparms *params);
int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
struct shfl_fsobjinfo *info);
int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info);
int vboxsf_inode_revalidate(struct dentry *dentry);
int vboxsf_getattr(const struct path *path, struct kstat *kstat,
u32 request_mask, unsigned int query_flags);
int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr);
struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
struct dentry *dentry);
int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
const unsigned char *utf8_name, size_t utf8_len);
struct vboxsf_dir_info *vboxsf_dir_info_alloc(void);
void vboxsf_dir_info_free(struct vboxsf_dir_info *p);
int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
u64 handle);
/* from vboxsf_wrappers.c */
int vboxsf_connect(void);
void vboxsf_disconnect(void);
int vboxsf_create(u32 root, struct shfl_string *parsed_path,
struct shfl_createparms *create_parms);
int vboxsf_close(u32 root, u64 handle);
int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags);
int vboxsf_rename(u32 root, struct shfl_string *src_path,
struct shfl_string *dest_path, u32 flags);
int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
int vboxsf_dirinfo(u32 root, u64 handle,
struct shfl_string *parsed_path, u32 flags, u32 index,
u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count);
int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
u32 *buf_len, void *buf);
int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root);
int vboxsf_unmap_folder(u32 root);
int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
u32 buf_len, u8 *buf);
int vboxsf_symlink(u32 root, struct shfl_string *new_path,
struct shfl_string *old_path, struct shfl_fsobjinfo *buf);
int vboxsf_set_utf8(void);
int vboxsf_set_symlinks(void);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册