diff options
author | Milan Pandurov <milanpa@amazon.de> | 2019-12-13 14:07:21 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-01-21 14:45:33 +0100 |
commit | 09cbcef6c60e77af11c3f27e62ea3f291a5d436c (patch) | |
tree | f72b179f599614c167e1fddad5f532faad072376 /virt | |
parent | e30a7d623dccdb3f880fbcad980b0cb589a1da45 (diff) |
kvm: Refactor handling of VM debugfs files
We can store reference to kvm_stats_debugfs_item instead of copying
its values to kvm_stat_data.
This allows us to remove duplicated code and usage of temporary
kvm_stat_data inside vm_stat_get et al.
Signed-off-by: Milan Pandurov <milanpa@amazon.de>
Reviewed-by: Alexander Graf <graf@amazon.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 142 |
1 files changed, 71 insertions, 71 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1a6d5ebd5c42..483c683408a2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -113,7 +113,7 @@ struct dentry *kvm_debugfs_dir; EXPORT_SYMBOL_GPL(kvm_debugfs_dir); static int kvm_debugfs_num_entries; -static const struct file_operations *stat_fops_per_vm[]; +static const struct file_operations stat_fops_per_vm; static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); @@ -650,11 +650,11 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) return -ENOMEM; stat_data->kvm = kvm; - stat_data->offset = p->offset; - stat_data->mode = p->mode ? p->mode : 0644; + stat_data->dbgfs_item = p; kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; - debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry, - stat_data, stat_fops_per_vm[p->kind]); + debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p), + kvm->debugfs_dentry, stat_data, + &stat_fops_per_vm); } return 0; } @@ -4010,8 +4010,9 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file, return -ENOENT; if (simple_attr_open(inode, file, get, - stat_data->mode & S_IWUGO ? set : NULL, - fmt)) { + KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222 + ? set : NULL, + fmt)) { kvm_put_kvm(stat_data->kvm); return -ENOMEM; } @@ -4030,105 +4031,111 @@ static int kvm_debugfs_release(struct inode *inode, struct file *file) return 0; } -static int vm_stat_get_per_vm(void *data, u64 *val) +static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) { - struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; + *val = *(ulong *)((void *)kvm + offset); - *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset); + return 0; +} + +static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) +{ + *(ulong *)((void *)kvm + offset) = 0; return 0; } -static int vm_stat_clear_per_vm(void *data, u64 val) +static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) { - struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; + int i; + struct kvm_vcpu *vcpu; - if (val) - return -EINVAL; + *val = 0; - *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0; + kvm_for_each_vcpu(i, vcpu, kvm) + *val += *(u64 *)((void *)vcpu + offset); return 0; } -static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file) +static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) { - __simple_attr_check_format("%llu\n", 0ull); - return kvm_debugfs_open(inode, file, vm_stat_get_per_vm, - vm_stat_clear_per_vm, "%llu\n"); -} + int i; + struct kvm_vcpu *vcpu; -static const struct file_operations vm_stat_get_per_vm_fops = { - .owner = THIS_MODULE, - .open = vm_stat_get_per_vm_open, - .release = kvm_debugfs_release, - .read = simple_attr_read, - .write = simple_attr_write, - .llseek = no_llseek, -}; + kvm_for_each_vcpu(i, vcpu, kvm) + *(u64 *)((void *)vcpu + offset) = 0; + + return 0; +} -static int vcpu_stat_get_per_vm(void *data, u64 *val) +static int kvm_stat_data_get(void *data, u64 *val) { - int i; + int r = -EFAULT; struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; - struct kvm_vcpu *vcpu; - - *val = 0; - kvm_for_each_vcpu(i, vcpu, stat_data->kvm) - *val += *(u64 *)((void *)vcpu + stat_data->offset); + switch (stat_data->dbgfs_item->kind) { + case KVM_STAT_VM: + r = kvm_get_stat_per_vm(stat_data->kvm, + stat_data->dbgfs_item->offset, val); + break; + case KVM_STAT_VCPU: + r = kvm_get_stat_per_vcpu(stat_data->kvm, + stat_data->dbgfs_item->offset, val); + break; + } - return 0; + return r; } -static int vcpu_stat_clear_per_vm(void *data, u64 val) +static int kvm_stat_data_clear(void *data, u64 val) { - int i; + int r = -EFAULT; struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; - struct kvm_vcpu *vcpu; if (val) return -EINVAL; - kvm_for_each_vcpu(i, vcpu, stat_data->kvm) - *(u64 *)((void *)vcpu + stat_data->offset) = 0; + switch (stat_data->dbgfs_item->kind) { + case KVM_STAT_VM: + r = kvm_clear_stat_per_vm(stat_data->kvm, + stat_data->dbgfs_item->offset); + break; + case KVM_STAT_VCPU: + r = kvm_clear_stat_per_vcpu(stat_data->kvm, + stat_data->dbgfs_item->offset); + break; + } - return 0; + return r; } -static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file) +static int kvm_stat_data_open(struct inode *inode, struct file *file) { __simple_attr_check_format("%llu\n", 0ull); - return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm, - vcpu_stat_clear_per_vm, "%llu\n"); + return kvm_debugfs_open(inode, file, kvm_stat_data_get, + kvm_stat_data_clear, "%llu\n"); } -static const struct file_operations vcpu_stat_get_per_vm_fops = { - .owner = THIS_MODULE, - .open = vcpu_stat_get_per_vm_open, +static const struct file_operations stat_fops_per_vm = { + .owner = THIS_MODULE, + .open = kvm_stat_data_open, .release = kvm_debugfs_release, - .read = simple_attr_read, - .write = simple_attr_write, - .llseek = no_llseek, -}; - -static const struct file_operations *stat_fops_per_vm[] = { - [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops, - [KVM_STAT_VM] = &vm_stat_get_per_vm_fops, + .read = simple_attr_read, + .write = simple_attr_write, + .llseek = no_llseek, }; static int vm_stat_get(void *_offset, u64 *val) { unsigned offset = (long)_offset; struct kvm *kvm; - struct kvm_stat_data stat_tmp = {.offset = offset}; u64 tmp_val; *val = 0; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { - stat_tmp.kvm = kvm; - vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val); + kvm_get_stat_per_vm(kvm, offset, &tmp_val); *val += tmp_val; } mutex_unlock(&kvm_lock); @@ -4139,15 +4146,13 @@ static int vm_stat_clear(void *_offset, u64 val) { unsigned offset = (long)_offset; struct kvm *kvm; - struct kvm_stat_data stat_tmp = {.offset = offset}; if (val) return -EINVAL; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { - stat_tmp.kvm = kvm; - vm_stat_clear_per_vm((void *)&stat_tmp, 0); + kvm_clear_stat_per_vm(kvm, offset); } mutex_unlock(&kvm_lock); @@ -4160,14 +4165,12 @@ static int vcpu_stat_get(void *_offset, u64 *val) { unsigned offset = (long)_offset; struct kvm *kvm; - struct kvm_stat_data stat_tmp = {.offset = offset}; u64 tmp_val; *val = 0; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { - stat_tmp.kvm = kvm; - vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val); + kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); *val += tmp_val; } mutex_unlock(&kvm_lock); @@ -4178,15 +4181,13 @@ static int vcpu_stat_clear(void *_offset, u64 val) { unsigned offset = (long)_offset; struct kvm *kvm; - struct kvm_stat_data stat_tmp = {.offset = offset}; if (val) return -EINVAL; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { - stat_tmp.kvm = kvm; - vcpu_stat_clear_per_vm((void *)&stat_tmp, 0); + kvm_clear_stat_per_vcpu(kvm, offset); } mutex_unlock(&kvm_lock); @@ -4259,9 +4260,8 @@ static void kvm_init_debug(void) kvm_debugfs_num_entries = 0; for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { - int mode = p->mode ? p->mode : 0644; - debugfs_create_file(p->name, mode, kvm_debugfs_dir, - (void *)(long)p->offset, + debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p), + kvm_debugfs_dir, (void *)(long)p->offset, stat_fops[p->kind]); } } |