diff options
Diffstat (limited to 'include')
277 files changed, 6834 insertions, 2853 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 1b3b36068ca5..3cd9ccdcbd8f 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -30,8 +30,6 @@ #include <acpi/acpi.h> -#define PREFIX "ACPI: " - /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 struct acpi_handle_list { @@ -72,7 +70,6 @@ enum acpi_bus_device_type { ACPI_BUS_TYPE_POWER, ACPI_BUS_TYPE_PROCESSOR, ACPI_BUS_TYPE_THERMAL, - ACPI_BUS_TYPE_SYSTEM, ACPI_BUS_TYPE_POWER_BUTTON, ACPI_BUS_TYPE_SLEEP_BUTTON, ACPI_BUS_DEVICE_TYPE_COUNT @@ -89,7 +86,6 @@ struct acpi_device; typedef int (*acpi_op_add) (struct acpi_device * device); typedef int (*acpi_op_remove) (struct acpi_device * device, int type); typedef int (*acpi_op_start) (struct acpi_device * device); -typedef int (*acpi_op_stop) (struct acpi_device * device, int type); typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state); typedef int (*acpi_op_resume) (struct acpi_device * device); @@ -106,7 +102,6 @@ struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; acpi_op_start start; - acpi_op_stop stop; acpi_op_suspend suspend; acpi_op_resume resume; acpi_op_bind bind; @@ -146,10 +141,7 @@ struct acpi_device_status { struct acpi_device_flags { u32 dynamic_status:1; - u32 hardware_id:1; - u32 compatible_ids:1; u32 bus_address:1; - u32 unique_id:1; u32 removable:1; u32 ejectable:1; u32 lockable:1; @@ -158,7 +150,7 @@ struct acpi_device_flags { u32 performance_manageable:1; u32 wake_capable:1; /* Wakeup(_PRW) supported? */ u32 force_power_state:1; - u32 reserved:19; + u32 reserved:22; }; /* File System */ @@ -173,25 +165,26 @@ struct acpi_device_dir { typedef char acpi_bus_id[8]; typedef unsigned long acpi_bus_address; -typedef char acpi_hardware_id[15]; -typedef char acpi_unique_id[9]; typedef char acpi_device_name[40]; typedef char acpi_device_class[20]; +struct acpi_hardware_id { + struct list_head list; + char *id; +}; + struct acpi_device_pnp { acpi_bus_id bus_id; /* Object name */ acpi_bus_address bus_address; /* _ADR */ - acpi_hardware_id hardware_id; /* _HID */ - struct acpi_compatible_id_list *cid_list; /* _CIDs */ - acpi_unique_id unique_id; /* _UID */ + char *unique_id; /* _UID */ + struct list_head ids; /* _HID and _CIDs */ acpi_device_name device_name; /* Driver-determined */ acpi_device_class device_class; /* " */ }; #define acpi_device_bid(d) ((d)->pnp.bus_id) #define acpi_device_adr(d) ((d)->pnp.bus_address) -#define acpi_device_hid(d) ((d)->pnp.hardware_id) -#define acpi_device_uid(d) ((d)->pnp.unique_id) +char *acpi_device_hid(struct acpi_device *device); #define acpi_device_name(d) ((d)->pnp.device_name) #define acpi_device_class(d) ((d)->pnp.device_class) @@ -268,7 +261,8 @@ struct acpi_device_wakeup { /* Device */ struct acpi_device { - acpi_handle handle; + int device_type; + acpi_handle handle; /* no handle for fixed hardware */ struct acpi_device *parent; struct list_head children; struct list_head node; @@ -314,7 +308,7 @@ struct acpi_bus_event { extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); -void acpi_bus_private_data_handler(acpi_handle, u32, void *); +void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); @@ -327,7 +321,9 @@ extern void unregister_acpi_bus_notifier(struct notifier_block *nb); */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); -void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context); +void acpi_bus_data_handler(acpi_handle handle, void *context); +acpi_status acpi_bus_get_status_handle(acpi_handle handle, + unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_get_power(acpi_handle handle, int *state); int acpi_bus_set_power(acpi_handle handle, int state); diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index ab0b85cf21f3..eb0e7189075f 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -245,6 +245,9 @@ acpi_status acpi_osi_invalidate(char* interface); acpi_status acpi_os_validate_address(u8 space_id, acpi_physical_address address, acpi_size length, char *name); +acpi_status +acpi_os_invalidate_address(u8 space_id, acpi_physical_address address, + acpi_size length); u64 acpi_os_get_timer(void); diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 82ec6a3c0500..e723b0fd8e41 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -47,7 +47,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20090521 +#define ACPI_CA_VERSION 0x20090903 #include "actypes.h" #include "actbl.h" @@ -64,6 +64,7 @@ extern u8 acpi_gbl_enable_interpreter_slack; extern u8 acpi_gbl_all_methods_serialized; extern u8 acpi_gbl_create_osi_method; extern u8 acpi_gbl_leave_wake_gpes_disabled; +extern u8 acpi_gbl_use_default_register_widths; extern acpi_name acpi_gbl_trace_method_name; extern u32 acpi_gbl_trace_flags; @@ -199,7 +200,8 @@ acpi_evaluate_object_typed(acpi_handle object, acpi_object_type return_type); acpi_status -acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer); +acpi_get_object_info(acpi_handle handle, + struct acpi_device_info **return_buffer); acpi_status acpi_install_method(u8 *buffer); @@ -359,9 +361,9 @@ acpi_status acpi_set_firmware_waking_vector(u32 physical_address); acpi_status acpi_set_firmware_waking_vector64(u64 physical_address); #endif -acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg); +acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg); -acpi_status acpi_write(u32 value, struct acpi_generic_address *reg); +acpi_status acpi_write(u64 value, struct acpi_generic_address *reg); acpi_status acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b); diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 222733d01f36..1b6587952604 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -44,9 +44,23 @@ #ifndef __ACTBL_H__ #define __ACTBL_H__ +/******************************************************************************* + * + * Fundamental ACPI tables + * + * This file contains definitions for the ACPI tables that are directly consumed + * by ACPICA. All other tables are consumed by the OS-dependent ACPI-related + * device drivers and other OS support code. + * + * The RSDP and FACS do not use the common ACPI table header. All other ACPI + * tables use the header. + * + ******************************************************************************/ + /* - * Values for description table header signatures. Useful because they make - * it more difficult to inadvertently type in the wrong signature. + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. */ #define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */ #define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */ @@ -65,11 +79,6 @@ #pragma pack(1) /* - * These are the ACPI tables that are directly consumed by the subsystem. - * - * The RSDP and FACS do not use the common ACPI table header. All other ACPI - * tables use the header. - * * Note about bitfields: The u8 type is used for bitfields in ACPI tables. * This is the only type that is even remotely portable. Anything else is not * portable, so do not use any other bitfield types. @@ -77,9 +86,8 @@ /******************************************************************************* * - * ACPI Table Header. This common header is used by all tables except the - * RSDP and FACS. The define is used for direct inclusion of header into - * other ACPI tables + * Master ACPI Table Header. This common header is used by all ACPI tables + * except the RSDP and FACS. * ******************************************************************************/ @@ -95,13 +103,16 @@ struct acpi_table_header { u32 asl_compiler_revision; /* ASL compiler version */ }; -/* +/******************************************************************************* + * * GAS - Generic Address Structure (ACPI 2.0+) * * Note: Since this structure is used in the ACPI tables, it is byte aligned. - * If misalignment is not supported, access to the Address field must be - * performed with care. - */ + * If misaliged access is not supported by the hardware, accesses to the + * 64-bit Address field must be performed with care. + * + ******************************************************************************/ + struct acpi_generic_address { u8 space_id; /* Address space where struct or register exists */ u8 bit_width; /* Size in bits of given register */ @@ -113,6 +124,7 @@ struct acpi_generic_address { /******************************************************************************* * * RSDP - Root System Description Pointer (Signature is "RSD PTR ") + * Version 2 * ******************************************************************************/ @@ -133,6 +145,7 @@ struct acpi_table_rsdp { /******************************************************************************* * * RSDT/XSDT - Root System Description Tables + * Version 1 (both) * ******************************************************************************/ @@ -161,21 +174,29 @@ struct acpi_table_facs { u32 flags; u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */ u8 version; /* Version of this table (ACPI 2.0+) */ - u8 reserved[31]; /* Reserved, must be zero */ + u8 reserved[3]; /* Reserved, must be zero */ + u32 ospm_flags; /* Flags to be set by OSPM (ACPI 4.0) */ + u8 reserved1[24]; /* Reserved, must be zero */ }; -/* Flag macros */ +/* Masks for global_lock flag field above */ -#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */ +#define ACPI_GLOCK_PENDING (1) /* 00: Pending global lock ownership */ +#define ACPI_GLOCK_OWNED (1<<1) /* 01: Global lock is owned */ -/* Global lock flags */ +/* Masks for Flags field above */ -#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */ -#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */ +#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */ +#define ACPI_FACS_64BIT_WAKE (1<<1) /* 01: 64-bit wake vector supported (ACPI 4.0) */ + +/* Masks for ospm_flags field above */ + +#define ACPI_FACS_64BIT_ENVIRONMENT (1) /* 00: 64-bit wake environment is required (ACPI 4.0) */ /******************************************************************************* * * FADT - Fixed ACPI Description Table (Signature "FACP") + * Version 4 * ******************************************************************************/ @@ -236,7 +257,7 @@ struct acpi_table_fadt { struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ }; -/* FADT Boot Architecture Flags (boot_flags) */ +/* Masks for FADT Boot Architecture Flags (boot_flags) */ #define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */ #define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */ @@ -246,7 +267,7 @@ struct acpi_table_fadt { #define FADT2_REVISION_ID 3 -/* FADT flags */ +/* Masks for FADT flags */ #define ACPI_FADT_WBINVD (1) /* 00: [V1] The wbinvd instruction works properly */ #define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] wbinvd flushes but does not invalidate caches */ @@ -269,7 +290,7 @@ struct acpi_table_fadt { #define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */ #define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */ -/* FADT Prefered Power Management Profiles */ +/* Values for preferred_profile (Prefered Power Management Profiles) */ enum acpi_prefered_pm_profiles { PM_UNSPECIFIED = 0, @@ -287,14 +308,16 @@ enum acpi_prefered_pm_profiles { #define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f) +/* + * Internal table-related structures + */ union acpi_name_union { u32 integer; char ascii[4]; }; -/* - * Internal ACPI Table Descriptor. One per ACPI table - */ +/* Internal ACPI Table Descriptor. One per ACPI table. */ + struct acpi_table_desc { acpi_physical_address address; struct acpi_table_header *pointer; @@ -304,7 +327,7 @@ struct acpi_table_desc { u8 flags; }; -/* Flags for above */ +/* Masks for Flags field above */ #define ACPI_TABLE_ORIGIN_UNKNOWN (0) #define ACPI_TABLE_ORIGIN_MAPPED (1) @@ -318,5 +341,6 @@ struct acpi_table_desc { */ #include <acpi/actbl1.h> +#include <acpi/actbl2.h> #endif /* __ACTBL_H__ */ diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 59ade0752473..0b9b430b092b 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -46,41 +46,31 @@ /******************************************************************************* * - * Additional ACPI Tables + * Additional ACPI Tables (1) * * These tables are not consumed directly by the ACPICA subsystem, but are * included here to support device drivers and the AML disassembler. * + * The tables in this file are fully defined within the ACPI specification. + * ******************************************************************************/ /* - * Values for description table header signatures. Useful because they make - * it more difficult to inadvertently type in the wrong signature. + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. */ -#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ #define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */ -#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ #define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ -#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ -#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ #define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */ #define ACPI_SIG_EINJ "EINJ" /* Error Injection table */ #define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */ #define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */ -#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ -#define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */ #define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ -#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ +#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ -#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */ #define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ -#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ -#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ #define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ -#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ -#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ -#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ -#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ /* * All tables must be byte-packed to match the ACPI specification, since @@ -94,14 +84,20 @@ * portable, so do not use any other bitfield types. */ -/* Common Subtable header (used in MADT, SRAT, etc.) */ +/******************************************************************************* + * + * Common subtable headers + * + ******************************************************************************/ + +/* Generic subtable header (used in MADT, SRAT, etc.) */ struct acpi_subtable_header { u8 type; u8 length; }; -/* Common Subtable header for WHEA tables (EINJ, ERST, WDAT) */ +/* Subtable header for WHEA tables (EINJ, ERST, WDAT) */ struct acpi_whea_header { u8 action; @@ -115,116 +111,8 @@ struct acpi_whea_header { /******************************************************************************* * - * ASF - Alert Standard Format table (Signature "ASF!") - * - * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003 - * - ******************************************************************************/ - -struct acpi_table_asf { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* ASF subtable header */ - -struct acpi_asf_header { - u8 type; - u8 reserved; - u16 length; -}; - -/* Values for Type field above */ - -enum acpi_asf_type { - ACPI_ASF_TYPE_INFO = 0, - ACPI_ASF_TYPE_ALERT = 1, - ACPI_ASF_TYPE_CONTROL = 2, - ACPI_ASF_TYPE_BOOT = 3, - ACPI_ASF_TYPE_ADDRESS = 4, - ACPI_ASF_TYPE_RESERVED = 5 -}; - -/* - * ASF subtables - */ - -/* 0: ASF Information */ - -struct acpi_asf_info { - struct acpi_asf_header header; - u8 min_reset_value; - u8 min_poll_interval; - u16 system_id; - u32 mfg_id; - u8 flags; - u8 reserved2[3]; -}; - -/* 1: ASF Alerts */ - -struct acpi_asf_alert { - struct acpi_asf_header header; - u8 assert_mask; - u8 deassert_mask; - u8 alerts; - u8 data_length; -}; - -struct acpi_asf_alert_data { - u8 address; - u8 command; - u8 mask; - u8 value; - u8 sensor_type; - u8 type; - u8 offset; - u8 source_type; - u8 severity; - u8 sensor_number; - u8 entity; - u8 instance; -}; - -/* 2: ASF Remote Control */ - -struct acpi_asf_remote { - struct acpi_asf_header header; - u8 controls; - u8 data_length; - u16 reserved2; -}; - -struct acpi_asf_control_data { - u8 function; - u8 address; - u8 command; - u8 value; -}; - -/* 3: ASF RMCP Boot Options */ - -struct acpi_asf_rmcp { - struct acpi_asf_header header; - u8 capabilities[7]; - u8 completion_code; - u32 enterprise_id; - u8 command; - u16 parameter; - u16 boot_options; - u16 oem_parameters; -}; - -/* 4: ASF Address */ - -struct acpi_asf_address { - struct acpi_asf_header header; - u8 eprom_address; - u8 devices; -}; - -/******************************************************************************* - * - * BERT - Boot Error Record Table + * BERT - Boot Error Record Table (ACPI 4.0) + * Version 1 * ******************************************************************************/ @@ -234,38 +122,43 @@ struct acpi_table_bert { u64 address; /* Physical addresss of the error region */ }; -/* Boot Error Region */ +/* Boot Error Region (not a subtable, pointed to by Address field above) */ struct acpi_bert_region { - u32 block_status; - u32 raw_data_offset; - u32 raw_data_length; - u32 data_length; - u32 error_severity; + u32 block_status; /* Type of error information */ + u32 raw_data_offset; /* Offset to raw error data */ + u32 raw_data_length; /* Length of raw error data */ + u32 data_length; /* Length of generic error data */ + u32 error_severity; /* Severity code */ }; -/* block_status Flags */ +/* Values for block_status flags above */ #define ACPI_BERT_UNCORRECTABLE (1) -#define ACPI_BERT_CORRECTABLE (2) -#define ACPI_BERT_MULTIPLE_UNCORRECTABLE (4) -#define ACPI_BERT_MULTIPLE_CORRECTABLE (8) +#define ACPI_BERT_CORRECTABLE (1<<1) +#define ACPI_BERT_MULTIPLE_UNCORRECTABLE (1<<2) +#define ACPI_BERT_MULTIPLE_CORRECTABLE (1<<3) +#define ACPI_BERT_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */ -/******************************************************************************* - * - * BOOT - Simple Boot Flag Table - * - ******************************************************************************/ +/* Values for error_severity above */ -struct acpi_table_boot { - struct acpi_table_header header; /* Common ACPI table header */ - u8 cmos_index; /* Index in CMOS RAM for the boot register */ - u8 reserved[3]; +enum acpi_bert_error_severity { + ACPI_BERT_ERROR_CORRECTABLE = 0, + ACPI_BERT_ERROR_FATAL = 1, + ACPI_BERT_ERROR_CORRECTED = 2, + ACPI_BERT_ERROR_NONE = 3, + ACPI_BERT_ERROR_RESERVED = 4 /* 4 and greater are reserved */ }; +/* + * Note: The generic error data that follows the error_severity field above + * uses the struct acpi_hest_generic_data defined under the HEST table below + */ + /******************************************************************************* * - * CPEP - Corrected Platform Error Polling table + * CPEP - Corrected Platform Error Polling table (ACPI 4.0) + * Version 1 * ******************************************************************************/ @@ -277,8 +170,7 @@ struct acpi_table_cpep { /* Subtable */ struct acpi_cpep_polling { - u8 type; - u8 length; + struct acpi_subtable_header header; u8 id; /* Processor ID */ u8 eid; /* Processor EID */ u32 interval; /* Polling interval (msec) */ @@ -286,124 +178,8 @@ struct acpi_cpep_polling { /******************************************************************************* * - * DBGP - Debug Port table - * - ******************************************************************************/ - -struct acpi_table_dbgp { - struct acpi_table_header header; /* Common ACPI table header */ - u8 type; /* 0=full 16550, 1=subset of 16550 */ - u8 reserved[3]; - struct acpi_generic_address debug_port; -}; - -/******************************************************************************* - * - * DMAR - DMA Remapping table - * From "Intel Virtualization Technology for Directed I/O", Sept. 2007 - * - ******************************************************************************/ - -struct acpi_table_dmar { - struct acpi_table_header header; /* Common ACPI table header */ - u8 width; /* Host Address Width */ - u8 flags; - u8 reserved[10]; -}; - -/* Flags */ - -#define ACPI_DMAR_INTR_REMAP (1) - -/* DMAR subtable header */ - -struct acpi_dmar_header { - u16 type; - u16 length; -}; - -/* Values for subtable type in struct acpi_dmar_header */ - -enum acpi_dmar_type { - ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, - ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, - ACPI_DMAR_TYPE_ATSR = 2, - ACPI_DMAR_TYPE_RESERVED = 3 /* 3 and greater are reserved */ -}; - -struct acpi_dmar_device_scope { - u8 entry_type; - u8 length; - u16 reserved; - u8 enumeration_id; - u8 bus; -}; - -/* Values for entry_type in struct acpi_dmar_device_scope */ - -enum acpi_dmar_scope_type { - ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, - ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, - ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, - ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, - ACPI_DMAR_SCOPE_TYPE_HPET = 4, - ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */ -}; - -struct acpi_dmar_pci_path { - u8 dev; - u8 fn; -}; - -/* - * DMAR Sub-tables, correspond to Type in struct acpi_dmar_header - */ - -/* 0: Hardware Unit Definition */ - -struct acpi_dmar_hardware_unit { - struct acpi_dmar_header header; - u8 flags; - u8 reserved; - u16 segment; - u64 address; /* Register Base Address */ -}; - -/* Flags */ - -#define ACPI_DMAR_INCLUDE_ALL (1) - -/* 1: Reserved Memory Defininition */ - -struct acpi_dmar_reserved_memory { - struct acpi_dmar_header header; - u16 reserved; - u16 segment; - u64 base_address; /* 4_k aligned base address */ - u64 end_address; /* 4_k aligned limit address */ -}; - -/* Flags */ - -#define ACPI_DMAR_ALLOW_ALL (1) - - -/* 2: Root Port ATS Capability Reporting Structure */ - -struct acpi_dmar_atsr { - struct acpi_dmar_header header; - u8 flags; - u8 reserved; - u16 segment; -}; - -/* Flags */ - -#define ACPI_DMAR_ALL_PORTS (1) - -/******************************************************************************* - * * ECDT - Embedded Controller Boot Resources Table + * Version 1 * ******************************************************************************/ @@ -418,14 +194,16 @@ struct acpi_table_ecdt { /******************************************************************************* * - * EINJ - Error Injection Table + * EINJ - Error Injection Table (ACPI 4.0) + * Version 1 * ******************************************************************************/ struct acpi_table_einj { struct acpi_table_header header; /* Common ACPI table header */ u32 header_length; - u32 reserved; + u8 flags; + u8 reserved[3]; u32 entries; }; @@ -435,6 +213,10 @@ struct acpi_einj_entry { struct acpi_whea_header whea_header; /* Common header for WHEA tables */ }; +/* Masks for Flags field above */ + +#define ACPI_EINJ_PRESERVE (1) + /* Values for Action field above */ enum acpi_einj_actions { @@ -470,9 +252,34 @@ struct acpi_einj_trigger { u32 entry_count; }; +/* Command status return values */ + +enum acpi_einj_command_status { + ACPI_EINJ_SUCCESS = 0, + ACPI_EINJ_FAILURE = 1, + ACPI_EINJ_INVALID_ACCESS = 2, + ACPI_EINJ_STATUS_RESERVED = 3 /* 3 and greater are reserved */ +}; + +/* Error types returned from ACPI_EINJ_GET_ERROR_TYPE (bitfield) */ + +#define ACPI_EINJ_PROCESSOR_CORRECTABLE (1) +#define ACPI_EINJ_PROCESSOR_UNCORRECTABLE (1<<1) +#define ACPI_EINJ_PROCESSOR_FATAL (1<<2) +#define ACPI_EINJ_MEMORY_CORRECTABLE (1<<3) +#define ACPI_EINJ_MEMORY_UNCORRECTABLE (1<<4) +#define ACPI_EINJ_MEMORY_FATAL (1<<5) +#define ACPI_EINJ_PCIX_CORRECTABLE (1<<6) +#define ACPI_EINJ_PCIX_UNCORRECTABLE (1<<7) +#define ACPI_EINJ_PCIX_FATAL (1<<8) +#define ACPI_EINJ_PLATFORM_CORRECTABLE (1<<9) +#define ACPI_EINJ_PLATFORM_UNCORRECTABLE (1<<10) +#define ACPI_EINJ_PLATFORM_FATAL (1<<11) + /******************************************************************************* * - * ERST - Error Record Serialization Table + * ERST - Error Record Serialization Table (ACPI 4.0) + * Version 1 * ******************************************************************************/ @@ -489,19 +296,23 @@ struct acpi_erst_entry { struct acpi_whea_header whea_header; /* Common header for WHEA tables */ }; +/* Masks for Flags field above */ + +#define ACPI_ERST_PRESERVE (1) + /* Values for Action field above */ enum acpi_erst_actions { - ACPI_ERST_BEGIN_WRITE_OPERATION = 0, - ACPI_ERST_BEGIN_READ_OPERATION = 1, - ACPI_ERST_BETGIN_CLEAR_OPERATION = 2, - ACPI_ERST_END_OPERATION = 3, + ACPI_ERST_BEGIN_WRITE = 0, + ACPI_ERST_BEGIN_READ = 1, + ACPI_ERST_BEGIN_CLEAR = 2, + ACPI_ERST_END = 3, ACPI_ERST_SET_RECORD_OFFSET = 4, ACPI_ERST_EXECUTE_OPERATION = 5, ACPI_ERST_CHECK_BUSY_STATUS = 6, ACPI_ERST_GET_COMMAND_STATUS = 7, - ACPI_ERST_GET_RECORD_IDENTIFIER = 8, - ACPI_ERST_SET_RECORD_IDENTIFIER = 9, + ACPI_ERST_GET_RECORD_ID = 8, + ACPI_ERST_SET_RECORD_ID = 9, ACPI_ERST_GET_RECORD_COUNT = 10, ACPI_ERST_BEGIN_DUMMY_WRIITE = 11, ACPI_ERST_NOT_USED = 12, @@ -536,9 +347,29 @@ enum acpi_erst_instructions { ACPI_ERST_INSTRUCTION_RESERVED = 19 /* 19 and greater are reserved */ }; +/* Command status return values */ + +enum acpi_erst_command_status { + ACPI_ERST_SUCESS = 0, + ACPI_ERST_NO_SPACE = 1, + ACPI_ERST_NOT_AVAILABLE = 2, + ACPI_ERST_FAILURE = 3, + ACPI_ERST_RECORD_EMPTY = 4, + ACPI_ERST_NOT_FOUND = 5, + ACPI_ERST_STATUS_RESERVED = 6 /* 6 and greater are reserved */ +}; + +/* Error Record Serialization Information */ + +struct acpi_erst_info { + u16 signature; /* Should be "ER" */ + u8 data[48]; +}; + /******************************************************************************* * - * HEST - Hardware Error Source Table + * HEST - Hardware Error Source Table (ACPI 4.0) + * Version 1 * ******************************************************************************/ @@ -551,85 +382,69 @@ struct acpi_table_hest { struct acpi_hest_header { u16 type; + u16 source_id; }; /* Values for Type field above for subtables */ enum acpi_hest_types { - ACPI_HEST_TYPE_XPF_MACHINE_CHECK = 0, - ACPI_HEST_TYPE_XPF_CORRECTED_MACHINE_CHECK = 1, - ACPI_HEST_TYPE_XPF_UNUSED = 2, - ACPI_HEST_TYPE_XPF_NON_MASKABLE_INTERRUPT = 3, - ACPI_HEST_TYPE_IPF_CORRECTED_MACHINE_CHECK = 4, - ACPI_HEST_TYPE_IPF_CORRECTED_PLATFORM_ERROR = 5, + ACPI_HEST_TYPE_IA32_CHECK = 0, + ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1, + ACPI_HEST_TYPE_IA32_NMI = 2, + ACPI_HEST_TYPE_NOT_USED3 = 3, + ACPI_HEST_TYPE_NOT_USED4 = 4, + ACPI_HEST_TYPE_NOT_USED5 = 5, ACPI_HEST_TYPE_AER_ROOT_PORT = 6, ACPI_HEST_TYPE_AER_ENDPOINT = 7, ACPI_HEST_TYPE_AER_BRIDGE = 8, - ACPI_HEST_TYPE_GENERIC_HARDWARE_ERROR_SOURCE = 9, + ACPI_HEST_TYPE_GENERIC_ERROR = 9, ACPI_HEST_TYPE_RESERVED = 10 /* 10 and greater are reserved */ }; /* - * HEST Sub-subtables + * HEST substructures contained in subtables */ -/* XPF Machine Check Error Bank */ - -struct acpi_hest_xpf_error_bank { +/* + * IA32 Error Bank(s) - Follows the struct acpi_hest_ia_machine_check and + * struct acpi_hest_ia_corrected structures. + */ +struct acpi_hest_ia_error_bank { u8 bank_number; u8 clear_status_on_init; u8 status_format; - u8 config_write_enable; + u8 reserved; u32 control_register; - u64 control_init_data; + u64 control_data; u32 status_register; u32 address_register; u32 misc_register; }; -/* Generic Error Status */ - -struct acpi_hest_generic_status { - u32 block_status; - u32 raw_data_offset; - u32 raw_data_length; - u32 data_length; - u32 error_severity; -}; - -/* Generic Error Data */ - -struct acpi_hest_generic_data { - u8 section_type[16]; - u32 error_severity; - u16 revision; - u8 validation_bits; - u8 flags; - u32 error_data_length; - u8 fru_id[16]; - u8 fru_text[20]; -}; - -/* Common HEST structure for PCI/AER types below (6,7,8) */ +/* Common HEST sub-structure for PCI/AER structures below (6,7,8) */ struct acpi_hest_aer_common { - u16 source_id; - u16 config_write_enable; + u16 reserved1; u8 flags; u8 enabled; - u32 records_to_pre_allocate; + u32 records_to_preallocate; u32 max_sections_per_record; u32 bus; u16 device; u16 function; u16 device_control; - u16 reserved; - u32 uncorrectable_error_mask; - u32 uncorrectable_error_severity; - u32 correctable_error_mask; - u32 advanced_error_capabilities; + u16 reserved2; + u32 uncorrectable_mask; + u32 uncorrectable_severity; + u32 correctable_mask; + u32 advanced_capabilities; }; +/* Masks for HEST Flags fields */ + +#define ACPI_HEST_FIRMWARE_FIRST (1) +#define ACPI_HEST_GLOBAL (1<<1) + /* Hardware Error Notification */ struct acpi_hest_notify { @@ -655,71 +470,59 @@ enum acpi_hest_notify_types { ACPI_HEST_NOTIFY_RESERVED = 5 /* 5 and greater are reserved */ }; +/* Values for config_write_enable bitfield above */ + +#define ACPI_HEST_TYPE (1) +#define ACPI_HEST_POLL_INTERVAL (1<<1) +#define ACPI_HEST_POLL_THRESHOLD_VALUE (1<<2) +#define ACPI_HEST_POLL_THRESHOLD_WINDOW (1<<3) +#define ACPI_HEST_ERR_THRESHOLD_VALUE (1<<4) +#define ACPI_HEST_ERR_THRESHOLD_WINDOW (1<<5) + /* * HEST subtables - * - * From WHEA Design Document, 16 May 2007. - * Note: There is no subtable type 2 in this version of the document, - * and there are two different subtable type 3s. */ - /* 0: XPF Machine Check Exception */ +/* 0: IA32 Machine Check Exception */ -struct acpi_hest_xpf_machine_check { +struct acpi_hest_ia_machine_check { struct acpi_hest_header header; - u16 source_id; - u16 config_write_enable; + u16 reserved1; u8 flags; - u8 reserved1; - u32 records_to_pre_allocate; + u8 enabled; + u32 records_to_preallocate; u32 max_sections_per_record; u64 global_capability_data; u64 global_control_data; u8 num_hardware_banks; - u8 reserved2[7]; + u8 reserved3[7]; }; -/* 1: XPF Corrected Machine Check */ +/* 1: IA32 Corrected Machine Check */ -struct acpi_table_hest_xpf_corrected { +struct acpi_hest_ia_corrected { struct acpi_hest_header header; - u16 source_id; - u16 config_write_enable; + u16 reserved1; u8 flags; u8 enabled; - u32 records_to_pre_allocate; + u32 records_to_preallocate; u32 max_sections_per_record; struct acpi_hest_notify notify; u8 num_hardware_banks; - u8 reserved[3]; + u8 reserved2[3]; }; -/* 3: XPF Non-Maskable Interrupt */ +/* 2: IA32 Non-Maskable Interrupt */ -struct acpi_hest_xpf_nmi { +struct acpi_hest_ia_nmi { struct acpi_hest_header header; - u16 source_id; u32 reserved; - u32 records_to_pre_allocate; + u32 records_to_preallocate; u32 max_sections_per_record; u32 max_raw_data_length; }; -/* 4: IPF Corrected Machine Check */ - -struct acpi_hest_ipf_corrected { - struct acpi_hest_header header; - u8 enabled; - u8 reserved; -}; - -/* 5: IPF Corrected Platform Error */ - -struct acpi_hest_ipf_corrected_platform { - struct acpi_hest_header header; - u8 enabled; - u8 reserved; -}; +/* 3,4,5: Not used */ /* 6: PCI Express Root Port AER */ @@ -741,143 +544,61 @@ struct acpi_hest_aer { struct acpi_hest_aer_bridge { struct acpi_hest_header header; struct acpi_hest_aer_common aer; - u32 secondary_uncorrectable_error_mask; - u32 secondary_uncorrectable_error_severity; - u32 secondary_advanced_capabilities; + u32 uncorrectable_mask2; + u32 uncorrectable_severity2; + u32 advanced_capabilities2; }; /* 9: Generic Hardware Error Source */ struct acpi_hest_generic { struct acpi_hest_header header; - u16 source_id; u16 related_source_id; - u8 config_write_enable; + u8 reserved; u8 enabled; - u32 records_to_pre_allocate; + u32 records_to_preallocate; u32 max_sections_per_record; u32 max_raw_data_length; struct acpi_generic_address error_status_address; struct acpi_hest_notify notify; - u32 error_status_block_length; + u32 error_block_length; }; -/******************************************************************************* - * - * HPET - High Precision Event Timer table - * - ******************************************************************************/ +/* Generic Error Status block */ -struct acpi_table_hpet { - struct acpi_table_header header; /* Common ACPI table header */ - u32 id; /* Hardware ID of event timer block */ - struct acpi_generic_address address; /* Address of event timer block */ - u8 sequence; /* HPET sequence number */ - u16 minimum_tick; /* Main counter min tick, periodic mode */ - u8 flags; +struct acpi_hest_generic_status { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; }; -/*! Flags */ +/* Values for block_status flags above */ -#define ACPI_HPET_PAGE_PROTECT (1) /* 00: No page protection */ -#define ACPI_HPET_PAGE_PROTECT_4 (1<<1) /* 01: 4KB page protected */ -#define ACPI_HPET_PAGE_PROTECT_64 (1<<2) /* 02: 64KB page protected */ +#define ACPI_HEST_UNCORRECTABLE (1) +#define ACPI_HEST_CORRECTABLE (1<<1) +#define ACPI_HEST_MULTIPLE_UNCORRECTABLE (1<<2) +#define ACPI_HEST_MULTIPLE_CORRECTABLE (1<<3) +#define ACPI_HEST_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */ -/*! [End] no source code translation !*/ +/* Generic Error Data entry */ -/******************************************************************************* - * - * IBFT - Boot Firmware Table - * - ******************************************************************************/ - -struct acpi_table_ibft { - struct acpi_table_header header; /* Common ACPI table header */ - u8 reserved[12]; -}; - -/* IBFT common subtable header */ - -struct acpi_ibft_header { - u8 type; - u8 version; - u16 length; - u8 index; +struct acpi_hest_generic_data { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; u8 flags; -}; - -/* Values for Type field above */ - -enum acpi_ibft_type { - ACPI_IBFT_TYPE_NOT_USED = 0, - ACPI_IBFT_TYPE_CONTROL = 1, - ACPI_IBFT_TYPE_INITIATOR = 2, - ACPI_IBFT_TYPE_NIC = 3, - ACPI_IBFT_TYPE_TARGET = 4, - ACPI_IBFT_TYPE_EXTENSIONS = 5, - ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ -}; - -/* IBFT subtables */ - -struct acpi_ibft_control { - struct acpi_ibft_header header; - u16 extensions; - u16 initiator_offset; - u16 nic0_offset; - u16 target0_offset; - u16 nic1_offset; - u16 target1_offset; -}; - -struct acpi_ibft_initiator { - struct acpi_ibft_header header; - u8 sns_server[16]; - u8 slp_server[16]; - u8 primary_server[16]; - u8 secondary_server[16]; - u16 name_length; - u16 name_offset; -}; - -struct acpi_ibft_nic { - struct acpi_ibft_header header; - u8 ip_address[16]; - u8 subnet_mask_prefix; - u8 origin; - u8 gateway[16]; - u8 primary_dns[16]; - u8 secondary_dns[16]; - u8 dhcp[16]; - u16 vlan; - u8 mac_address[6]; - u16 pci_address; - u16 name_length; - u16 name_offset; -}; - -struct acpi_ibft_target { - struct acpi_ibft_header header; - u8 target_ip_address[16]; - u16 target_ip_socket; - u8 target_boot_lun[8]; - u8 chap_type; - u8 nic_association; - u16 target_name_length; - u16 target_name_offset; - u16 chap_name_length; - u16 chap_name_offset; - u16 chap_secret_length; - u16 chap_secret_offset; - u16 reverse_chap_name_length; - u16 reverse_chap_name_offset; - u16 reverse_chap_secret_length; - u16 reverse_chap_secret_offset; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; }; /******************************************************************************* * * MADT - Multiple APIC Description Table + * Version 3 * ******************************************************************************/ @@ -887,16 +608,16 @@ struct acpi_table_madt { u32 flags; }; -/* Flags */ +/* Masks for Flags field above */ -#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */ +#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */ /* Values for PCATCompat flag */ #define ACPI_MADT_DUAL_PIC 0 #define ACPI_MADT_MULTIPLE_APIC 1 -/* Values for subtable type in struct acpi_subtable_header */ +/* Values for MADT subtable type in struct acpi_subtable_header */ enum acpi_madt_type { ACPI_MADT_TYPE_LOCAL_APIC = 0, @@ -1007,11 +728,11 @@ struct acpi_madt_interrupt_source { u32 flags; /* Interrupt Source Flags */ }; -/* Flags field above */ +/* Masks for Flags field above */ #define ACPI_MADT_CPEI_OVERRIDE (1) -/* 9: Processor Local X2_APIC (07/2008) */ +/* 9: Processor Local X2APIC (ACPI 4.0) */ struct acpi_madt_local_x2apic { struct acpi_subtable_header header; @@ -1021,7 +742,7 @@ struct acpi_madt_local_x2apic { u32 uid; /* ACPI processor UID */ }; -/* 10: Local X2APIC NMI (07/2008) */ +/* 10: Local X2APIC NMI (ACPI 4.0) */ struct acpi_madt_local_x2apic_nmi { struct acpi_subtable_header header; @@ -1058,28 +779,34 @@ struct acpi_madt_local_x2apic_nmi { /******************************************************************************* * - * MCFG - PCI Memory Mapped Configuration table and sub-table + * MSCT - Maximum System Characteristics Table (ACPI 4.0) + * Version 1 * ******************************************************************************/ -struct acpi_table_mcfg { +struct acpi_table_msct { struct acpi_table_header header; /* Common ACPI table header */ - u8 reserved[8]; + u32 proximity_offset; /* Location of proximity info struct(s) */ + u32 max_proximity_domains; /* Max number of proximity domains */ + u32 max_clock_domains; /* Max number of clock domains */ + u64 max_address; /* Max physical address in system */ }; -/* Subtable */ +/* Subtable - Maximum Proximity Domain Information. Version 1 */ -struct acpi_mcfg_allocation { - u64 address; /* Base address, processor-relative */ - u16 pci_segment; /* PCI segment group number */ - u8 start_bus_number; /* Starting PCI Bus number */ - u8 end_bus_number; /* Final PCI Bus number */ - u32 reserved; +struct acpi_msct_proximity { + u8 revision; + u8 length; + u32 range_start; /* Start of domain range */ + u32 range_end; /* End of domain range */ + u32 processor_capacity; + u64 memory_capacity; /* In bytes */ }; /******************************************************************************* * * SBST - Smart Battery Specification Table + * Version 1 * ******************************************************************************/ @@ -1093,6 +820,7 @@ struct acpi_table_sbst { /******************************************************************************* * * SLIT - System Locality Distance Information Table + * Version 1 * ******************************************************************************/ @@ -1104,60 +832,8 @@ struct acpi_table_slit { /******************************************************************************* * - * SPCR - Serial Port Console Redirection table - * - ******************************************************************************/ - -struct acpi_table_spcr { - struct acpi_table_header header; /* Common ACPI table header */ - u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ - u8 reserved[3]; - struct acpi_generic_address serial_port; - u8 interrupt_type; - u8 pc_interrupt; - u32 interrupt; - u8 baud_rate; - u8 parity; - u8 stop_bits; - u8 flow_control; - u8 terminal_type; - u8 reserved1; - u16 pci_device_id; - u16 pci_vendor_id; - u8 pci_bus; - u8 pci_device; - u8 pci_function; - u32 pci_flags; - u8 pci_segment; - u32 reserved2; -}; - -/******************************************************************************* - * - * SPMI - Server Platform Management Interface table - * - ******************************************************************************/ - -struct acpi_table_spmi { - struct acpi_table_header header; /* Common ACPI table header */ - u8 reserved; - u8 interface_type; - u16 spec_revision; /* Version of IPMI */ - u8 interrupt_type; - u8 gpe_number; /* GPE assigned */ - u8 reserved1; - u8 pci_device_flag; - u32 interrupt; - struct acpi_generic_address ipmi_register; - u8 pci_segment; - u8 pci_bus; - u8 pci_device; - u8 pci_function; -}; - -/******************************************************************************* - * * SRAT - System Resource Affinity Table + * Version 3 * ******************************************************************************/ @@ -1192,6 +868,10 @@ struct acpi_srat_cpu_affinity { u32 reserved; /* Reserved, must be zero */ }; +/* Flags */ + +#define ACPI_SRAT_CPU_USE_AFFINITY (1) /* 00: Use affinity structure */ + /* 1: Memory Affinity */ struct acpi_srat_mem_affinity { @@ -1211,7 +891,7 @@ struct acpi_srat_mem_affinity { #define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */ #define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */ -/* 2: Processor Local X2_APIC Affinity (07/2008) */ +/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */ struct acpi_srat_x2apic_cpu_affinity { struct acpi_subtable_header header; @@ -1219,122 +899,14 @@ struct acpi_srat_x2apic_cpu_affinity { u32 proximity_domain; u32 apic_id; u32 flags; + u32 clock_domain; + u32 reserved2; }; /* Flags for struct acpi_srat_cpu_affinity and struct acpi_srat_x2apic_cpu_affinity */ #define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ -/******************************************************************************* - * - * TCPA - Trusted Computing Platform Alliance table - * - ******************************************************************************/ - -struct acpi_table_tcpa { - struct acpi_table_header header; /* Common ACPI table header */ - u16 reserved; - u32 max_log_length; /* Maximum length for the event log area */ - u64 log_address; /* Address of the event log area */ -}; - -/******************************************************************************* - * - * UEFI - UEFI Boot optimization Table - * - ******************************************************************************/ - -struct acpi_table_uefi { - struct acpi_table_header header; /* Common ACPI table header */ - u8 identifier[16]; /* UUID identifier */ - u16 data_offset; /* Offset of remaining data in table */ - u8 data; -}; - -/******************************************************************************* - * - * WDAT - Watchdog Action Table - * - ******************************************************************************/ - -struct acpi_table_wdat { - struct acpi_table_header header; /* Common ACPI table header */ - u32 header_length; /* Watchdog Header Length */ - u16 pci_segment; /* PCI Segment number */ - u8 pci_bus; /* PCI Bus number */ - u8 pci_device; /* PCI Device number */ - u8 pci_function; /* PCI Function number */ - u8 reserved[3]; - u32 timer_period; /* Period of one timer count (msec) */ - u32 max_count; /* Maximum counter value supported */ - u32 min_count; /* Minimum counter value */ - u8 flags; - u8 reserved2[3]; - u32 entries; /* Number of watchdog entries that follow */ -}; - -/* WDAT Instruction Entries (actions) */ - -struct acpi_wdat_entry { - struct acpi_whea_header whea_header; /* Common header for WHEA tables */ -}; - -/* Values for Action field above */ - -enum acpi_wdat_actions { - ACPI_WDAT_RESET = 1, - ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4, - ACPI_WDAT_GET_COUNTDOWN = 5, - ACPI_WDAT_SET_COUNTDOWN = 6, - ACPI_WDAT_GET_RUNNING_STATE = 8, - ACPI_WDAT_SET_RUNNING_STATE = 9, - ACPI_WDAT_GET_STOPPED_STATE = 10, - ACPI_WDAT_SET_STOPPED_STATE = 11, - ACPI_WDAT_GET_REBOOT = 16, - ACPI_WDAT_SET_REBOOT = 17, - ACPI_WDAT_GET_SHUTDOWN = 18, - ACPI_WDAT_SET_SHUTDOWN = 19, - ACPI_WDAT_GET_STATUS = 32, - ACPI_WDAT_SET_STATUS = 33, - ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */ -}; - -/* Values for Instruction field above */ - -enum acpi_wdat_instructions { - ACPI_WDAT_READ_VALUE = 0, - ACPI_WDAT_READ_COUNTDOWN = 1, - ACPI_WDAT_WRITE_VALUE = 2, - ACPI_WDAT_WRITE_COUNTDOWN = 3, - ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */ - ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */ -}; - -/******************************************************************************* - * - * WDRT - Watchdog Resource Table - * - ******************************************************************************/ - -struct acpi_table_wdrt { - struct acpi_table_header header; /* Common ACPI table header */ - u32 header_length; /* Watchdog Header Length */ - u8 pci_segment; /* PCI Segment number */ - u8 pci_bus; /* PCI Bus number */ - u8 pci_device; /* PCI Device number */ - u8 pci_function; /* PCI Function number */ - u32 timer_period; /* Period of one timer count (msec) */ - u32 max_count; /* Maximum counter value supported */ - u32 min_count; /* Minimum counter value */ - u8 flags; - u8 reserved[3]; - u32 entries; /* Number of watchdog entries that follow */ -}; - -/* Flags */ - -#define ACPI_WDRT_TIMER_ENABLED (1) /* 00: Timer enabled */ - /* Reset to default packing */ #pragma pack() diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h new file mode 100644 index 000000000000..6f3dce9991e1 --- /dev/null +++ b/include/acpi/actbl2.h @@ -0,0 +1,868 @@ +#ifndef __ACTBL2_H__ +#define __ACTBL2_H__ + +/******************************************************************************* + * + * Additional ACPI Tables (2) + * + * These tables are not consumed directly by the ACPICA subsystem, but are + * included here to support device drivers and the AML disassembler. + * + * The tables in this file are defined by third-party specifications, and are + * not defined directly by the ACPI specification itself. + * + ******************************************************************************/ + +/* + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. + */ +#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ +#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ +#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ +#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ +#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ +#define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */ +#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ +#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ +#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */ +#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ +#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ +#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ +#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ +#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */ +#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ +#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ + +/* + * All tables must be byte-packed to match the ACPI specification, since + * the tables are provided by the system BIOS. + */ +#pragma pack(1) + +/* + * Note about bitfields: The u8 type is used for bitfields in ACPI tables. + * This is the only type that is even remotely portable. Anything else is not + * portable, so do not use any other bitfield types. + */ + +/******************************************************************************* + * + * ASF - Alert Standard Format table (Signature "ASF!") + * Revision 0x10 + * + * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003 + * + ******************************************************************************/ + +struct acpi_table_asf { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* ASF subtable header */ + +struct acpi_asf_header { + u8 type; + u8 reserved; + u16 length; +}; + +/* Values for Type field above */ + +enum acpi_asf_type { + ACPI_ASF_TYPE_INFO = 0, + ACPI_ASF_TYPE_ALERT = 1, + ACPI_ASF_TYPE_CONTROL = 2, + ACPI_ASF_TYPE_BOOT = 3, + ACPI_ASF_TYPE_ADDRESS = 4, + ACPI_ASF_TYPE_RESERVED = 5 +}; + +/* + * ASF subtables + */ + +/* 0: ASF Information */ + +struct acpi_asf_info { + struct acpi_asf_header header; + u8 min_reset_value; + u8 min_poll_interval; + u16 system_id; + u32 mfg_id; + u8 flags; + u8 reserved2[3]; +}; + +/* Masks for Flags field above */ + +#define ACPI_ASF_SMBUS_PROTOCOLS (1) + +/* 1: ASF Alerts */ + +struct acpi_asf_alert { + struct acpi_asf_header header; + u8 assert_mask; + u8 deassert_mask; + u8 alerts; + u8 data_length; +}; + +struct acpi_asf_alert_data { + u8 address; + u8 command; + u8 mask; + u8 value; + u8 sensor_type; + u8 type; + u8 offset; + u8 source_type; + u8 severity; + u8 sensor_number; + u8 entity; + u8 instance; +}; + +/* 2: ASF Remote Control */ + +struct acpi_asf_remote { + struct acpi_asf_header header; + u8 controls; + u8 data_length; + u16 reserved2; +}; + +struct acpi_asf_control_data { + u8 function; + u8 address; + u8 command; + u8 value; +}; + +/* 3: ASF RMCP Boot Options */ + +struct acpi_asf_rmcp { + struct acpi_asf_header header; + u8 capabilities[7]; + u8 completion_code; + u32 enterprise_id; + u8 command; + u16 parameter; + u16 boot_options; + u16 oem_parameters; +}; + +/* 4: ASF Address */ + +struct acpi_asf_address { + struct acpi_asf_header header; + u8 eprom_address; + u8 devices; +}; + +/******************************************************************************* + * + * BOOT - Simple Boot Flag Table + * Version 1 + * + * Conforms to the "Simple Boot Flag Specification", Version 2.1 + * + ******************************************************************************/ + +struct acpi_table_boot { + struct acpi_table_header header; /* Common ACPI table header */ + u8 cmos_index; /* Index in CMOS RAM for the boot register */ + u8 reserved[3]; +}; + +/******************************************************************************* + * + * DBGP - Debug Port table + * Version 1 + * + * Conforms to the "Debug Port Specification", Version 1.00, 2/9/2000 + * + ******************************************************************************/ + +struct acpi_table_dbgp { + struct acpi_table_header header; /* Common ACPI table header */ + u8 type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address debug_port; +}; + +/******************************************************************************* + * + * DMAR - DMA Remapping table + * Version 1 + * + * Conforms to "Intel Virtualization Technology for Directed I/O", + * Version 1.2, Sept. 2008 + * + ******************************************************************************/ + +struct acpi_table_dmar { + struct acpi_table_header header; /* Common ACPI table header */ + u8 width; /* Host Address Width */ + u8 flags; + u8 reserved[10]; +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_INTR_REMAP (1) + +/* DMAR subtable header */ + +struct acpi_dmar_header { + u16 type; + u16 length; +}; + +/* Values for subtable type in struct acpi_dmar_header */ + +enum acpi_dmar_type { + ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, + ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, + ACPI_DMAR_TYPE_ATSR = 2, + ACPI_DMAR_HARDWARE_AFFINITY = 3, + ACPI_DMAR_TYPE_RESERVED = 4 /* 4 and greater are reserved */ +}; + +/* DMAR Device Scope structure */ + +struct acpi_dmar_device_scope { + u8 entry_type; + u8 length; + u16 reserved; + u8 enumeration_id; + u8 bus; +}; + +/* Values for entry_type in struct acpi_dmar_device_scope */ + +enum acpi_dmar_scope_type { + ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, + ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, + ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, + ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, + ACPI_DMAR_SCOPE_TYPE_HPET = 4, + ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */ +}; + +struct acpi_dmar_pci_path { + u8 dev; + u8 fn; +}; + +/* + * DMAR Sub-tables, correspond to Type in struct acpi_dmar_header + */ + +/* 0: Hardware Unit Definition */ + +struct acpi_dmar_hardware_unit { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; + u64 address; /* Register Base Address */ +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_INCLUDE_ALL (1) + +/* 1: Reserved Memory Defininition */ + +struct acpi_dmar_reserved_memory { + struct acpi_dmar_header header; + u16 reserved; + u16 segment; + u64 base_address; /* 4_k aligned base address */ + u64 end_address; /* 4_k aligned limit address */ +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_ALLOW_ALL (1) + +/* 2: Root Port ATS Capability Reporting Structure */ + +struct acpi_dmar_atsr { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_ALL_PORTS (1) + +/* 3: Remapping Hardware Static Affinity Structure */ + +struct acpi_dmar_rhsa { + struct acpi_dmar_header header; + u32 reserved; + u64 base_address; + u32 proximity_domain; +}; + +/******************************************************************************* + * + * HPET - High Precision Event Timer table + * Version 1 + * + * Conforms to "IA-PC HPET (High Precision Event Timers) Specification", + * Version 1.0a, October 2004 + * + ******************************************************************************/ + +struct acpi_table_hpet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 id; /* Hardware ID of event timer block */ + struct acpi_generic_address address; /* Address of event timer block */ + u8 sequence; /* HPET sequence number */ + u16 minimum_tick; /* Main counter min tick, periodic mode */ + u8 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_HPET_PAGE_PROTECT_MASK (3) + +/* Values for Page Protect flags */ + +enum acpi_hpet_page_protect { + ACPI_HPET_NO_PAGE_PROTECT = 0, + ACPI_HPET_PAGE_PROTECT4 = 1, + ACPI_HPET_PAGE_PROTECT64 = 2 +}; + +/******************************************************************************* + * + * IBFT - Boot Firmware Table + * Version 1 + * + * Conforms to "iSCSI Boot Firmware Table (iBFT) as Defined in ACPI 3.0b + * Specification", Version 1.01, March 1, 2007 + * + * Note: It appears that this table is not intended to appear in the RSDT/XSDT. + * Therefore, it is not currently supported by the disassembler. + * + ******************************************************************************/ + +struct acpi_table_ibft { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved[12]; +}; + +/* IBFT common subtable header */ + +struct acpi_ibft_header { + u8 type; + u8 version; + u16 length; + u8 index; + u8 flags; +}; + +/* Values for Type field above */ + +enum acpi_ibft_type { + ACPI_IBFT_TYPE_NOT_USED = 0, + ACPI_IBFT_TYPE_CONTROL = 1, + ACPI_IBFT_TYPE_INITIATOR = 2, + ACPI_IBFT_TYPE_NIC = 3, + ACPI_IBFT_TYPE_TARGET = 4, + ACPI_IBFT_TYPE_EXTENSIONS = 5, + ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ +}; + +/* IBFT subtables */ + +struct acpi_ibft_control { + struct acpi_ibft_header header; + u16 extensions; + u16 initiator_offset; + u16 nic0_offset; + u16 target0_offset; + u16 nic1_offset; + u16 target1_offset; +}; + +struct acpi_ibft_initiator { + struct acpi_ibft_header header; + u8 sns_server[16]; + u8 slp_server[16]; + u8 primary_server[16]; + u8 secondary_server[16]; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_nic { + struct acpi_ibft_header header; + u8 ip_address[16]; + u8 subnet_mask_prefix; + u8 origin; + u8 gateway[16]; + u8 primary_dns[16]; + u8 secondary_dns[16]; + u8 dhcp[16]; + u16 vlan; + u8 mac_address[6]; + u16 pci_address; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_target { + struct acpi_ibft_header header; + u8 target_ip_address[16]; + u16 target_ip_socket; + u8 target_boot_lun[8]; + u8 chap_type; + u8 nic_association; + u16 target_name_length; + u16 target_name_offset; + u16 chap_name_length; + u16 chap_name_offset; + u16 chap_secret_length; + u16 chap_secret_offset; + u16 reverse_chap_name_length; + u16 reverse_chap_name_offset; + u16 reverse_chap_secret_length; + u16 reverse_chap_secret_offset; +}; + +/******************************************************************************* + * + * IVRS - I/O Virtualization Reporting Structure + * Version 1 + * + * Conforms to "AMD I/O Virtualization Technology (IOMMU) Specification", + * Revision 1.26, February 2009. + * + ******************************************************************************/ + +struct acpi_table_ivrs { + struct acpi_table_header header; /* Common ACPI table header */ + u32 info; /* Common virtualization info */ + u64 reserved; +}; + +/* Values for Info field above */ + +#define ACPI_IVRS_PHYSICAL_SIZE 0x00007F00 /* 7 bits, physical address size */ +#define ACPI_IVRS_VIRTUAL_SIZE 0x003F8000 /* 7 bits, virtual address size */ +#define ACPI_IVRS_ATS_RESERVED 0x00400000 /* ATS address translation range reserved */ + +/* IVRS subtable header */ + +struct acpi_ivrs_header { + u8 type; /* Subtable type */ + u8 flags; + u16 length; /* Subtable length */ + u16 device_id; /* ID of IOMMU */ +}; + +/* Values for subtable Type above */ + +enum acpi_ivrs_type { + ACPI_IVRS_TYPE_HARDWARE = 0x10, + ACPI_IVRS_TYPE_MEMORY1 = 0x20, + ACPI_IVRS_TYPE_MEMORY2 = 0x21, + ACPI_IVRS_TYPE_MEMORY3 = 0x22 +}; + +/* Masks for Flags field above for IVHD subtable */ + +#define ACPI_IVHD_TT_ENABLE (1) +#define ACPI_IVHD_PASS_PW (1<<1) +#define ACPI_IVHD_RES_PASS_PW (1<<2) +#define ACPI_IVHD_ISOC (1<<3) +#define ACPI_IVHD_IOTLB (1<<4) + +/* Masks for Flags field above for IVMD subtable */ + +#define ACPI_IVMD_UNITY (1) +#define ACPI_IVMD_READ (1<<1) +#define ACPI_IVMD_WRITE (1<<2) +#define ACPI_IVMD_EXCLUSION_RANGE (1<<3) + +/* + * IVRS subtables, correspond to Type in struct acpi_ivrs_header + */ + +/* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */ + +struct acpi_ivrs_hardware { + struct acpi_ivrs_header header; + u16 capability_offset; /* Offset for IOMMU control fields */ + u64 base_address; /* IOMMU control registers */ + u16 pci_segment_group; + u16 info; /* MSI number and unit ID */ + u32 reserved; +}; + +/* Masks for Info field above */ + +#define ACPI_IVHD_MSI_NUMBER_MASK 0x001F /* 5 bits, MSI message number */ +#define ACPI_IVHD_UNIT_ID_MASK 0x1F00 /* 5 bits, unit_iD */ + +/* + * Device Entries for IVHD subtable, appear after struct acpi_ivrs_hardware structure. + * Upper two bits of the Type field are the (encoded) length of the structure. + * Currently, only 4 and 8 byte entries are defined. 16 and 32 byte entries + * are reserved for future use but not defined. + */ +struct acpi_ivrs_de_header { + u8 type; + u16 id; + u8 data_setting; +}; + +/* Length of device entry is in the top two bits of Type field above */ + +#define ACPI_IVHD_ENTRY_LENGTH 0xC0 + +/* Values for device entry Type field above */ + +enum acpi_ivrs_device_entry_type { + /* 4-byte device entries, all use struct acpi_ivrs_device4 */ + + ACPI_IVRS_TYPE_PAD4 = 0, + ACPI_IVRS_TYPE_ALL = 1, + ACPI_IVRS_TYPE_SELECT = 2, + ACPI_IVRS_TYPE_START = 3, + ACPI_IVRS_TYPE_END = 4, + + /* 8-byte device entries */ + + ACPI_IVRS_TYPE_PAD8 = 64, + ACPI_IVRS_TYPE_NOT_USED = 65, + ACPI_IVRS_TYPE_ALIAS_SELECT = 66, /* Uses struct acpi_ivrs_device8a */ + ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses struct acpi_ivrs_device8a */ + ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */ + ACPI_IVRS_TYPE_EXT_START = 71, /* Uses struct acpi_ivrs_device8b */ + ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses struct acpi_ivrs_device8c */ +}; + +/* Values for Data field above */ + +#define ACPI_IVHD_INIT_PASS (1) +#define ACPI_IVHD_EINT_PASS (1<<1) +#define ACPI_IVHD_NMI_PASS (1<<2) +#define ACPI_IVHD_SYSTEM_MGMT (3<<4) +#define ACPI_IVHD_LINT0_PASS (1<<6) +#define ACPI_IVHD_LINT1_PASS (1<<7) + +/* Types 0-4: 4-byte device entry */ + +struct acpi_ivrs_device4 { + struct acpi_ivrs_de_header header; +}; + +/* Types 66-67: 8-byte device entry */ + +struct acpi_ivrs_device8a { + struct acpi_ivrs_de_header header; + u8 reserved1; + u16 used_id; + u8 reserved2; +}; + +/* Types 70-71: 8-byte device entry */ + +struct acpi_ivrs_device8b { + struct acpi_ivrs_de_header header; + u32 extended_data; +}; + +/* Values for extended_data above */ + +#define ACPI_IVHD_ATS_DISABLED (1<<31) + +/* Type 72: 8-byte device entry */ + +struct acpi_ivrs_device8c { + struct acpi_ivrs_de_header header; + u8 handle; + u16 used_id; + u8 variety; +}; + +/* Values for Variety field above */ + +#define ACPI_IVHD_IOAPIC 1 +#define ACPI_IVHD_HPET 2 + +/* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */ + +struct acpi_ivrs_memory { + struct acpi_ivrs_header header; + u16 aux_data; + u64 reserved; + u64 start_address; + u64 memory_length; +}; + +/******************************************************************************* + * + * MCFG - PCI Memory Mapped Configuration table and sub-table + * Version 1 + * + * Conforms to "PCI Firmware Specification", Revision 3.0, June 20, 2005 + * + ******************************************************************************/ + +struct acpi_table_mcfg { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved[8]; +}; + +/* Subtable */ + +struct acpi_mcfg_allocation { + u64 address; /* Base address, processor-relative */ + u16 pci_segment; /* PCI segment group number */ + u8 start_bus_number; /* Starting PCI Bus number */ + u8 end_bus_number; /* Final PCI Bus number */ + u32 reserved; +}; + +/******************************************************************************* + * + * SPCR - Serial Port Console Redirection table + * Version 1 + * + * Conforms to "Serial Port Console Redirection Table", + * Version 1.00, January 11, 2002 + * + ******************************************************************************/ + +struct acpi_table_spcr { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address serial_port; + u8 interrupt_type; + u8 pc_interrupt; + u32 interrupt; + u8 baud_rate; + u8 parity; + u8 stop_bits; + u8 flow_control; + u8 terminal_type; + u8 reserved1; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u32 pci_flags; + u8 pci_segment; + u32 reserved2; +}; + +/* Masks for pci_flags field above */ + +#define ACPI_SPCR_DO_NOT_DISABLE (1) + +/******************************************************************************* + * + * SPMI - Server Platform Management Interface table + * Version 5 + * + * Conforms to "Intelligent Platform Management Interface Specification + * Second Generation v2.0", Document Revision 1.0, February 12, 2004 with + * June 12, 2009 markup. + * + ******************************************************************************/ + +struct acpi_table_spmi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; + u8 reserved; /* Must be 1 */ + u16 spec_revision; /* Version of IPMI */ + u8 interrupt_type; + u8 gpe_number; /* GPE assigned */ + u8 reserved1; + u8 pci_device_flag; + u32 interrupt; + struct acpi_generic_address ipmi_register; + u8 pci_segment; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u8 reserved2; +}; + +/* Values for interface_type above */ + +enum acpi_spmi_interface_types { + ACPI_SPMI_NOT_USED = 0, + ACPI_SPMI_KEYBOARD = 1, + ACPI_SPMI_SMI = 2, + ACPI_SPMI_BLOCK_TRANSFER = 3, + ACPI_SPMI_SMBUS = 4, + ACPI_SPMI_RESERVED = 5 /* 5 and above are reserved */ +}; + +/******************************************************************************* + * + * TCPA - Trusted Computing Platform Alliance table + * Version 1 + * + * Conforms to "TCG PC Specific Implementation Specification", + * Version 1.1, August 18, 2003 + * + ******************************************************************************/ + +struct acpi_table_tcpa { + struct acpi_table_header header; /* Common ACPI table header */ + u16 reserved; + u32 max_log_length; /* Maximum length for the event log area */ + u64 log_address; /* Address of the event log area */ +}; + +/******************************************************************************* + * + * UEFI - UEFI Boot optimization Table + * Version 1 + * + * Conforms to "Unified Extensible Firmware Interface Specification", + * Version 2.3, May 8, 2009 + * + ******************************************************************************/ + +struct acpi_table_uefi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 identifier[16]; /* UUID identifier */ + u16 data_offset; /* Offset of remaining data in table */ +}; + +/******************************************************************************* + * + * WAET - Windows ACPI Emulated devices Table + * Version 1 + * + * Conforms to "Windows ACPI Emulated Devices Table", version 1.0, April 6, 2009 + * + ******************************************************************************/ + +struct acpi_table_waet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_WAET_RTC_NO_ACK (1) /* RTC requires no int acknowledge */ +#define ACPI_WAET_TIMER_ONE_READ (1<<1) /* PM timer requires only one read */ + +/******************************************************************************* + * + * WDAT - Watchdog Action Table + * Version 1 + * + * Conforms to "Hardware Watchdog Timers Design Specification", + * Copyright 2006 Microsoft Corporation. + * + ******************************************************************************/ + +struct acpi_table_wdat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; /* Watchdog Header Length */ + u16 pci_segment; /* PCI Segment number */ + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u8 reserved[3]; + u32 timer_period; /* Period of one timer count (msec) */ + u32 max_count; /* Maximum counter value supported */ + u32 min_count; /* Minimum counter value */ + u8 flags; + u8 reserved2[3]; + u32 entries; /* Number of watchdog entries that follow */ +}; + +/* Masks for Flags field above */ + +#define ACPI_WDAT_ENABLED (1) +#define ACPI_WDAT_STOPPED 0x80 + +/* WDAT Instruction Entries (actions) */ + +struct acpi_wdat_entry { + u8 action; + u8 instruction; + u16 reserved; + struct acpi_generic_address register_region; + u32 value; /* Value used with Read/Write register */ + u32 mask; /* Bitmask required for this register instruction */ +}; + +/* Values for Action field above */ + +enum acpi_wdat_actions { + ACPI_WDAT_RESET = 1, + ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4, + ACPI_WDAT_GET_COUNTDOWN = 5, + ACPI_WDAT_SET_COUNTDOWN = 6, + ACPI_WDAT_GET_RUNNING_STATE = 8, + ACPI_WDAT_SET_RUNNING_STATE = 9, + ACPI_WDAT_GET_STOPPED_STATE = 10, + ACPI_WDAT_SET_STOPPED_STATE = 11, + ACPI_WDAT_GET_REBOOT = 16, + ACPI_WDAT_SET_REBOOT = 17, + ACPI_WDAT_GET_SHUTDOWN = 18, + ACPI_WDAT_SET_SHUTDOWN = 19, + ACPI_WDAT_GET_STATUS = 32, + ACPI_WDAT_SET_STATUS = 33, + ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */ +}; + +/* Values for Instruction field above */ + +enum acpi_wdat_instructions { + ACPI_WDAT_READ_VALUE = 0, + ACPI_WDAT_READ_COUNTDOWN = 1, + ACPI_WDAT_WRITE_VALUE = 2, + ACPI_WDAT_WRITE_COUNTDOWN = 3, + ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */ + ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */ +}; + +/******************************************************************************* + * + * WDRT - Watchdog Resource Table + * Version 1 + * + * Conforms to "Watchdog Timer Hardware Requirements for Windows Server 2003", + * Version 1.01, August 28, 2006 + * + ******************************************************************************/ + +struct acpi_table_wdrt { + struct acpi_table_header header; /* Common ACPI table header */ + struct acpi_generic_address control_register; + struct acpi_generic_address count_register; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u8 pci_segment; /* PCI Segment number */ + u16 max_count; /* Maximum counter value supported */ + u8 units; +}; + +/* Reset to default packing */ + +#pragma pack() + +#endif /* __ACTBL2_H__ */ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 37ba576d06e8..153f12dc3373 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -288,7 +288,7 @@ typedef u32 acpi_physical_address; /* * Some compilers complain about unused variables. Sometimes we don't want to * use all the variables (for example, _acpi_module_name). This allows us - * to to tell the compiler in a per-variable manner that a variable + * to tell the compiler in a per-variable manner that a variable * is unused */ #ifndef ACPI_UNUSED_VAR @@ -338,7 +338,7 @@ typedef u32 acpi_physical_address; /* PM Timer ticks per second (HZ) */ -#define PM_TIMER_FREQUENCY 3579545 +#define PM_TIMER_FREQUENCY 3579545 /******************************************************************************* * @@ -732,7 +732,8 @@ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 #define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 #define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 -#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 7 +#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 +#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 8 #define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127 /* @@ -921,7 +922,7 @@ typedef void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); typedef -void (*acpi_object_handler) (acpi_handle object, u32 function, void *data); +void (*acpi_object_handler) (acpi_handle object, void *data); typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function); @@ -969,38 +970,60 @@ acpi_status(*acpi_walk_callback) (acpi_handle obj_handle, #define ACPI_INTERRUPT_NOT_HANDLED 0x00 #define ACPI_INTERRUPT_HANDLED 0x01 -/* Length of _HID, _UID, _CID, and UUID values */ +/* Length of 32-bit EISAID values when converted back to a string */ + +#define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ + +/* Length of UUID (string) values */ -#define ACPI_DEVICE_ID_LENGTH 0x09 -#define ACPI_MAX_CID_LENGTH 48 #define ACPI_UUID_LENGTH 16 -/* Common string version of device HIDs and UIDs */ +/* Structures used for device/processor HID, UID, CID */ struct acpica_device_id { - char value[ACPI_DEVICE_ID_LENGTH]; + u32 length; /* Length of string + null */ + char *string; }; -/* Common string version of device CIDs */ - -struct acpi_compatible_id { - char value[ACPI_MAX_CID_LENGTH]; +struct acpica_device_id_list { + u32 count; /* Number of IDs in Ids array */ + u32 list_size; /* Size of list, including ID strings */ + struct acpica_device_id ids[1]; /* ID array */ }; -struct acpi_compatible_id_list { - u32 count; - u32 size; - struct acpi_compatible_id id[1]; +/* + * Structure returned from acpi_get_object_info. + * Optimized for both 32- and 64-bit builds + */ +struct acpi_device_info { + u32 info_size; /* Size of info, including ID strings */ + u32 name; /* ACPI object Name */ + acpi_object_type type; /* ACPI object Type */ + u8 param_count; /* If a method, required parameter count */ + u8 valid; /* Indicates which optional fields are valid */ + u8 flags; /* Miscellaneous info */ + u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ + u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ + u32 current_status; /* _STA value */ + acpi_integer address; /* _ADR value */ + struct acpica_device_id hardware_id; /* _HID value */ + struct acpica_device_id unique_id; /* _UID value */ + struct acpica_device_id_list compatible_id_list; /* _CID list <must be last> */ }; -/* Structure and flags for acpi_get_object_info */ +/* Values for Flags field above (acpi_get_object_info) */ + +#define ACPI_PCI_ROOT_BRIDGE 0x01 -#define ACPI_VALID_STA 0x0001 -#define ACPI_VALID_ADR 0x0002 -#define ACPI_VALID_HID 0x0004 -#define ACPI_VALID_UID 0x0008 -#define ACPI_VALID_CID 0x0010 -#define ACPI_VALID_SXDS 0x0020 +/* Flags for Valid field above (acpi_get_object_info) */ + +#define ACPI_VALID_STA 0x01 +#define ACPI_VALID_ADR 0x02 +#define ACPI_VALID_HID 0x04 +#define ACPI_VALID_UID 0x08 +#define ACPI_VALID_CID 0x10 +#define ACPI_VALID_SXDS 0x20 +#define ACPI_VALID_SXWS 0x40 /* Flags for _STA method */ @@ -1011,29 +1034,6 @@ struct acpi_compatible_id_list { #define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ #define ACPI_STA_BATTERY_PRESENT 0x10 -#define ACPI_COMMON_OBJ_INFO \ - acpi_object_type type; /* ACPI object type */ \ - acpi_name name /* ACPI object Name */ - -struct acpi_obj_info_header { - ACPI_COMMON_OBJ_INFO; -}; - -/* Structure returned from Get Object Info */ - -struct acpi_device_info { - ACPI_COMMON_OBJ_INFO; - - u32 param_count; /* If a method, required parameter count */ - u32 valid; /* Indicates which fields below are valid */ - u32 current_status; /* _STA value */ - acpi_integer address; /* _ADR value if any */ - struct acpica_device_id hardware_id; /* _HID value if any */ - struct acpica_device_id unique_id; /* _UID value if any */ - u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ - struct acpi_compatible_id_list compatibility_id; /* List of _CIDs if any */ -}; - /* Context structs for address space handlers */ struct acpi_pci_id { diff --git a/include/acpi/button.h b/include/acpi/button.h new file mode 100644 index 000000000000..97eea0e4c016 --- /dev/null +++ b/include/acpi/button.h @@ -0,0 +1,25 @@ +#ifndef ACPI_BUTTON_H +#define ACPI_BUTTON_H + +#include <linux/notifier.h> + +#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) +extern int acpi_lid_notifier_register(struct notifier_block *nb); +extern int acpi_lid_notifier_unregister(struct notifier_block *nb); +extern int acpi_lid_open(void); +#else +static inline int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_open(void) +{ + return 1; +} +#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ + +#endif /* ACPI_BUTTON_H */ diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index 935c5d7fc86e..6aadbf84ae71 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -57,7 +57,7 @@ /* * Some compilers complain about unused variables. Sometimes we don't want to * use all the variables (for example, _acpi_module_name). This allows us - * to to tell the compiler warning in a per-variable manner that a variable + * to tell the compiler warning in a per-variable manner that a variable * is unused. */ #define ACPI_UNUSED_VAR __attribute__ ((unused)) diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index fcb8e4b159b1..9d7febde10a1 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -149,10 +149,10 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache) #define ACPI_FREE(a) kfree(a) /* Used within ACPICA to show where it is safe to preempt execution */ - +#include <linux/hardirq.h> #define ACPI_PREEMPTION_POINT() \ do { \ - if (!irqs_disabled()) \ + if (!in_atomic_preempt_off()) \ cond_resched(); \ } while (0) diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 1c1fa422d18a..ca0f239f0e13 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -7,6 +7,7 @@ typedef unsigned long cputime_t; #define cputime_zero (0UL) +#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_max ((~0UL >> 1) - 1) #define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_sub(__a, __b) ((__a) - (__b)) diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index 4d3e48373e74..495dc8af4044 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h @@ -74,6 +74,28 @@ #define F_GETSIG 11 /* for sockets. */ #endif +#ifndef CONFIG_64BIT +#ifndef F_GETLK64 +#define F_GETLK64 12 /* using 'struct flock64' */ +#define F_SETLK64 13 +#define F_SETLKW64 14 +#endif +#endif + +#ifndef F_SETOWN_EX +#define F_SETOWN_EX 15 +#define F_GETOWN_EX 16 +#endif + +#define F_OWNER_TID 0 +#define F_OWNER_PID 1 +#define F_OWNER_PGRP 2 + +struct f_owner_ex { + int type; + pid_t pid; +}; + /* for F_[GET|SET]FL */ #define FD_CLOEXEC 1 /* actually anything with low bit set goes */ @@ -126,12 +148,6 @@ struct flock { #ifndef CONFIG_64BIT -#ifndef F_GETLK64 -#define F_GETLK64 12 /* using 'struct flock64' */ -#define F_SETLK64 13 -#define F_SETLKW64 14 -#endif - #ifndef HAVE_ARCH_STRUCT_FLOCK64 #ifndef __ARCH_FLOCK64_PAD #define __ARCH_FLOCK64_PAD diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index d6c379dc64fa..66d6106a2067 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -1,6 +1,7 @@ #ifndef _ASM_GENERIC_GPIO_H #define _ASM_GENERIC_GPIO_H +#include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> @@ -141,6 +142,8 @@ extern int __gpio_to_irq(unsigned gpio); * but more typically is configured entirely from userspace. */ extern int gpio_export(unsigned gpio, bool direction_may_change); +extern int gpio_export_link(struct device *dev, const char *name, + unsigned gpio); extern void gpio_unexport(unsigned gpio); #endif /* CONFIG_GPIO_SYSFS */ @@ -185,6 +188,12 @@ static inline int gpio_export(unsigned gpio, bool direction_may_change) return -ENOSYS; } +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + return -ENOSYS; +} + static inline void gpio_unexport(unsigned gpio) { } diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h index 23bb4dad4962..62f59080e5cc 100644 --- a/include/asm-generic/hardirq.h +++ b/include/asm-generic/hardirq.h @@ -6,7 +6,7 @@ #include <linux/irq.h> typedef struct { - unsigned long __softirq_pending; + unsigned int __softirq_pending; } ____cacheline_aligned irq_cpustat_t; #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h index eddbce0f9fb9..e5f234a08540 100644 --- a/include/asm-generic/kmap_types.h +++ b/include/asm-generic/kmap_types.h @@ -2,34 +2,35 @@ #define _ASM_GENERIC_KMAP_TYPES_H #ifdef __WITH_KM_FENCE -# define D(n) __KM_FENCE_##n , +# define KMAP_D(n) __KM_FENCE_##n , #else -# define D(n) +# define KMAP_D(n) #endif enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_SYNC_ICACHE, -D(14) KM_SYNC_DCACHE, -D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ -D(16) KM_IRQ_PTE, -D(17) KM_NMI, -D(18) KM_NMI_PTE, -D(19) KM_TYPE_NR +KMAP_D(0) KM_BOUNCE_READ, +KMAP_D(1) KM_SKB_SUNRPC_DATA, +KMAP_D(2) KM_SKB_DATA_SOFTIRQ, +KMAP_D(3) KM_USER0, +KMAP_D(4) KM_USER1, +KMAP_D(5) KM_BIO_SRC_IRQ, +KMAP_D(6) KM_BIO_DST_IRQ, +KMAP_D(7) KM_PTE0, +KMAP_D(8) KM_PTE1, +KMAP_D(9) KM_IRQ0, +KMAP_D(10) KM_IRQ1, +KMAP_D(11) KM_SOFTIRQ0, +KMAP_D(12) KM_SOFTIRQ1, +KMAP_D(13) KM_SYNC_ICACHE, +KMAP_D(14) KM_SYNC_DCACHE, +/* UML specific, for copy_*_user - used in do_op_one_page */ +KMAP_D(15) KM_UML_USERCOPY, +KMAP_D(16) KM_IRQ_PTE, +KMAP_D(17) KM_NMI, +KMAP_D(18) KM_NMI_PTE, +KMAP_D(19) KM_TYPE_NR }; -#undef D +#undef KMAP_D #endif diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h index 3b69ad34189a..5ee13b2fd223 100644 --- a/include/asm-generic/mman-common.h +++ b/include/asm-generic/mman-common.h @@ -34,6 +34,10 @@ #define MADV_REMOVE 9 /* remove these pages & resources */ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_HWPOISON 100 /* poison a page for testing */ + +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h index 7cab4de2bca6..32c8bd6a196d 100644 --- a/include/asm-generic/mman.h +++ b/include/asm-generic/mman.h @@ -11,6 +11,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index d083561337f2..b3bfabc258f3 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -23,4 +23,20 @@ extern char __ctors_start[], __ctors_end[]; #define dereference_function_descriptor(p) (p) #endif +/* random extra sections (if any). Override + * in asm/sections.h */ +#ifndef arch_is_kernel_text +static inline int arch_is_kernel_text(unsigned long addr) +{ + return 0; +} +#endif + +#ifndef arch_is_kernel_data +static inline int arch_is_kernel_data(unsigned long addr) +{ + return 0; +} +#endif + #endif /* _ASM_GENERIC_SECTIONS_H_ */ diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index c840719a8c59..942d30b5aab1 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h @@ -82,6 +82,7 @@ typedef struct siginfo { #ifdef __ARCH_SI_TRAPNO int _trapno; /* TRAP # which caused the signal */ #endif + short _addr_lsb; /* LSB of the reported address */ } _sigfault; /* SIGPOLL */ @@ -112,6 +113,7 @@ typedef struct siginfo { #ifdef __ARCH_SI_TRAPNO #define si_trapno _sifields._sigfault._trapno #endif +#define si_addr_lsb _sifields._sigfault._addr_lsb #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd @@ -192,7 +194,11 @@ typedef struct siginfo { #define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */ #define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */ #define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */ -#define NSIGBUS 3 +/* hardware memory error consumed on a machine check: action required */ +#define BUS_MCEERR_AR (__SI_FAULT|4) +/* hardware memory error detected in process but not consumed: action optional*/ +#define BUS_MCEERR_AO (__SI_FAULT|5) +#define NSIGBUS 5 /* * SIGTRAP si_codes diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index ea8087b55ffc..5c122ae6bfa6 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -1,7 +1,7 @@ /* * Access to user system call parameters and results * - * Copyright (C) 2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions @@ -32,9 +32,13 @@ struct pt_regs; * If @task is not executing a system call, i.e. it's blocked * inside the kernel for a fault or signal, returns -1. * + * Note this returns int even on 64-bit machines. Only 32 bits of + * system call number can be meaningful. If the actual arch value + * is 64 bits, this truncates to 32 bits so 0xffffffff means -1. + * * It's only valid to call this when @task is known to be blocked. */ -long syscall_get_nr(struct task_struct *task, struct pt_regs *regs); +int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); /** * syscall_rollback - roll back registers after an aborted system call diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 88bada2ebc4b..510df36dd5d4 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -37,9 +37,6 @@ #ifndef parent_node #define parent_node(node) ((void)(node),0) #endif -#ifndef node_to_cpumask -#define node_to_cpumask(node) ((void)node, cpu_online_map) -#endif #ifndef cpumask_of_node #define cpumask_of_node(node) ((void)node, cpu_online_mask) #endif @@ -55,18 +52,4 @@ #endif /* CONFIG_NUMA */ -/* - * returns pointer to cpumask for specified node - * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" - */ -#ifndef node_to_cpumask_ptr - -#define node_to_cpumask_ptr(v, node) \ - cpumask_t _##v = node_to_cpumask(node); \ - const cpumask_t *v = &_##v - -#define node_to_cpumask_ptr_next(v, node) \ - _##v = node_to_cpumask(node) -#endif - #endif /* _ASM_GENERIC_TOPOLOGY_H */ diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 1125e5a1ee5d..d76b66acea95 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -620,8 +620,8 @@ __SYSCALL(__NR_move_pages, sys_move_pages) #define __NR_rt_tgsigqueueinfo 240 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) -#define __NR_perf_counter_open 241 -__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) +#define __NR_perf_event_open 241 +__SYSCALL(__NR_perf_event_open, sys_perf_event_open) #undef __NR_syscalls #define __NR_syscalls 242 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 29ca8f53ffbe..b6e818f4b247 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -721,12 +721,12 @@ . = ALIGN(PAGE_SIZE); \ .data : AT(ADDR(.data) - LOAD_OFFSET) { \ INIT_TASK_DATA(inittask) \ + NOSAVE_DATA \ + PAGE_ALIGNED_DATA(pagealigned) \ CACHELINE_ALIGNED_DATA(cacheline) \ READ_MOSTLY_DATA(cacheline) \ DATA_DATA \ CONSTRUCTORS \ - NOSAVE_DATA \ - PAGE_ALIGNED_DATA(pagealigned) \ } #define INIT_TEXT_SECTION(inittext_align) \ diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index ae1e9e166959..b69347b8904f 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -387,6 +387,7 @@ struct drm_crtc { * @get_modes: get mode list for this connector * @set_property: property for this connector may need update * @destroy: make object go away + * @force: notify the driver the connector is forced on * * Each CRTC may have one or more connectors attached to it. The functions * below allow the core DRM code to control connectors, enumerate available modes, @@ -401,6 +402,7 @@ struct drm_connector_funcs { int (*set_property)(struct drm_connector *connector, struct drm_property *property, uint64_t val); void (*destroy)(struct drm_connector *connector); + void (*force)(struct drm_connector *connector); }; struct drm_encoder_funcs { @@ -429,6 +431,13 @@ struct drm_encoder { void *helper_private; }; +enum drm_connector_force { + DRM_FORCE_UNSPECIFIED, + DRM_FORCE_OFF, + DRM_FORCE_ON, /* force on analog part normally */ + DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ +}; + /** * drm_connector - central DRM connector control structure * @crtc: CRTC this connector is currently connected to, NULL if none @@ -478,9 +487,12 @@ struct drm_connector { void *helper_private; + /* forced on connector */ + enum drm_connector_force force; uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; uint32_t force_encoder_id; struct drm_encoder *encoder; /* currently active encoder */ + void *fb_helper_private; }; /** @@ -746,7 +758,7 @@ extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, extern bool drm_detect_hdmi_monitor(struct edid *edid); extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, - bool reduced, bool interlaced); + bool reduced, bool interlaced, bool margins); extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool interlaced, int margins); diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 4c8dacaf4f58..b29e20168b5f 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -39,6 +39,7 @@ #include <linux/fb.h> +#include "drm_fb_helper.h" struct drm_crtc_helper_funcs { /* * Control power levels on the CRTC. If the mode passed in is @@ -60,6 +61,9 @@ struct drm_crtc_helper_funcs { /* Move the crtc on the current fb to the given position *optional* */ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); + + /* reload the current crtc LUT */ + void (*load_lut)(struct drm_crtc *crtc); }; struct drm_encoder_helper_funcs { @@ -119,10 +123,11 @@ static inline void drm_encoder_helper_add(struct drm_encoder *encoder, encoder->helper_private = (void *)funcs; } -static inline void drm_connector_helper_add(struct drm_connector *connector, +static inline int drm_connector_helper_add(struct drm_connector *connector, const struct drm_connector_helper_funcs *funcs) { connector->helper_private = (void *)funcs; + return drm_fb_helper_add_connector(connector); } extern int drm_helper_resume_force_mode(struct drm_device *dev); diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 88fffbdfa26f..58c892a2cbfa 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -35,9 +35,30 @@ struct drm_fb_helper_crtc { struct drm_mode_set mode_set; }; + struct drm_fb_helper_funcs { void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); + void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno); +}; + +/* mode specified on the command line */ +struct drm_fb_helper_cmdline_mode { + bool specified; + bool refresh_specified; + bool bpp_specified; + int xres, yres; + int bpp; + int refresh; + bool rb; + bool interlace; + bool cvt; + bool margins; +}; + +struct drm_fb_helper_connector { + struct drm_fb_helper_cmdline_mode cmdline_mode; }; struct drm_fb_helper { @@ -52,11 +73,14 @@ struct drm_fb_helper { }; int drm_fb_helper_single_fb_probe(struct drm_device *dev, + int preferred_bpp, int (*fb_create)(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height, uint32_t surface_width, uint32_t surface_height, + uint32_t surface_depth, + uint32_t surface_bpp, struct drm_framebuffer **fb_ptr)); int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn); @@ -77,6 +101,11 @@ int drm_fb_helper_setcolreg(unsigned regno, void drm_fb_helper_restore(void); void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, uint32_t fb_width, uint32_t fb_height); -void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch); +void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth); + +int drm_fb_helper_add_connector(struct drm_connector *connector); +int drm_fb_helper_parse_command_line(struct drm_device *dev); +int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); #endif diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 853508499d20..e6f3b120f51a 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -80,7 +80,7 @@ {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ - {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ + {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ @@ -113,7 +113,7 @@ {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ - {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ + {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ @@ -552,6 +552,7 @@ {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 8e1e92583fbc..7e0cb1da92e6 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -185,6 +185,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_GET_APERTURE 0x23 #define DRM_I915_GEM_MMAP_GTT 0x24 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 +#define DRM_I915_GEM_MADVISE 0x26 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -221,6 +222,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) +#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -667,4 +669,21 @@ struct drm_i915_get_pipe_from_crtc_id { __u32 pipe; }; +#define I915_MADV_WILLNEED 0 +#define I915_MADV_DONTNEED 1 +#define __I915_MADV_PURGED 2 /* internal state */ + +struct drm_i915_gem_madvise { + /** Handle of the buffer to change the backing store advice */ + __u32 handle; + + /* Advice: either the buffer will be needed again in the near future, + * or wont be and could be discarded under memory pressure. + */ + __u32 madv; + + /** Whether the backing store still exists. */ + __u32 retained; +}; + #endif /* _I915_DRM_H_ */ diff --git a/include/linux/Kbuild b/include/linux/Kbuild index cff4a101f266..5a5385749e16 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -126,6 +126,7 @@ header-y += nfs_mount.h header-y += nl80211.h header-y += param.h header-y += pci_regs.h +header-y += perf_event.h header-y += pfkeyv2.h header-y += pg.h header-y += phantom.h @@ -329,6 +330,7 @@ unifdef-y += scc.h unifdef-y += sched.h unifdef-y += screen_info.h unifdef-y += sdla.h +unifdef-y += securebits.h unifdef-y += selinux_netlink.h unifdef-y += sem.h unifdef-y += serial_core.h @@ -363,6 +365,7 @@ unifdef-y += utsname.h unifdef-y += videodev2.h unifdef-y += videodev.h unifdef-y += virtio_config.h +unifdef-y += virtio_ids.h unifdef-y += virtio_blk.h unifdef-y += virtio_net.h unifdef-y += virtio_9p.h diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 34321cfffeab..dfcd920c3e54 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -41,8 +41,6 @@ #include <acpi/acpi_drivers.h> #include <acpi/acpi_numa.h> #include <asm/acpi.h> -#include <linux/dmi.h> - enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, @@ -219,10 +217,8 @@ static inline int acpi_video_display_switch_support(void) #endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */ extern int acpi_blacklisted(void); -#ifdef CONFIG_DMI extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); extern int acpi_osi_setup(char *str); -#endif #ifdef CONFIG_ACPI_NUMA int acpi_get_pxm(acpi_handle handle); @@ -292,7 +288,10 @@ void __init acpi_s4_no_nvs(void); extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags); extern void acpi_early_init(void); -#else /* CONFIG_ACPI */ +#else /* !CONFIG_ACPI */ + +#define acpi_disabled 1 + static inline void acpi_early_init(void) { } static inline int early_acpi_boot_init(void) @@ -331,5 +330,11 @@ static inline int acpi_check_mem_region(resource_size_t start, return 0; } +struct acpi_table_header; +static inline int acpi_table_parse(char *id, + int (*handler)(struct acpi_table_header *)) +{ + return -1; +} #endif /* !CONFIG_ACPI */ #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 880130f7311f..9101ed64f803 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -53,7 +53,7 @@ struct agp_kern_info { int current_memory; bool cant_use_aperture; unsigned long page_mask; - struct vm_operations_struct *vm_ops; + const struct vm_operations_struct *vm_ops; }; /* diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h new file mode 100644 index 000000000000..6b4241748dda --- /dev/null +++ b/include/linux/amba/mmci.h @@ -0,0 +1,18 @@ +/* + * include/linux/amba/mmci.h + */ +#ifndef AMBA_MMCI_H +#define AMBA_MMCI_H + +#include <linux/mmc/host.h> + +struct mmci_platform_data { + unsigned int ocr_mask; /* available voltages */ + u32 (*translate_vdd)(struct device *, unsigned int); + unsigned int (*status)(struct device *); + int gpio_wp; + int gpio_cd; + unsigned long capabilities; +}; + +#endif diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h index dcad0ffd1755..e4836c6b3dd7 100644 --- a/include/linux/amba/pl022.h +++ b/include/linux/amba/pl022.h @@ -136,12 +136,12 @@ enum ssp_tx_level_trig { /** * enum SPI Clock Phase - clock phase (Motorola SPI interface only) - * @SSP_CLK_RISING_EDGE: Receive data on rising edge - * @SSP_CLK_FALLING_EDGE: Receive data on falling edge + * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity) + * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity) */ enum ssp_spi_clk_phase { - SSP_CLK_RISING_EDGE, - SSP_CLK_FALLING_EDGE + SSP_CLK_FIRST_EDGE, + SSP_CLK_SECOND_EDGE }; /** diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index e0a0cdc2da43..69a21e0ebd33 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -8,6 +8,9 @@ #ifndef _LINUX_ANON_INODES_H #define _LINUX_ANON_INODES_H +struct file *anon_inode_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags); int anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 5fc2ef8d97fa..a1c486a88e88 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -58,25 +58,60 @@ struct dma_chan_ref { * array. * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a * dependency chain - * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. + * @ASYNC_TX_FENCE: specify that the next operation in the dependency + * chain uses this operation's result as an input */ enum async_tx_flags { ASYNC_TX_XOR_ZERO_DST = (1 << 0), ASYNC_TX_XOR_DROP_DST = (1 << 1), - ASYNC_TX_ACK = (1 << 3), - ASYNC_TX_DEP_ACK = (1 << 4), + ASYNC_TX_ACK = (1 << 2), + ASYNC_TX_FENCE = (1 << 3), +}; + +/** + * struct async_submit_ctl - async_tx submission/completion modifiers + * @flags: submission modifiers + * @depend_tx: parent dependency of the current operation being submitted + * @cb_fn: callback routine to run at operation completion + * @cb_param: parameter for the callback routine + * @scribble: caller provided space for dma/page address conversions + */ +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; }; #ifdef CONFIG_DMA_ENGINE #define async_tx_issue_pending_all dma_issue_pending_all + +/** + * async_tx_issue_pending - send pending descriptor to the hardware channel + * @tx: descriptor handle to retrieve hardware context + * + * Note: any dependent operations will have already been issued by + * async_tx_channel_switch, or (in the case of no channel switch) will + * be already pending on this channel. + */ +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + if (likely(tx)) { + struct dma_chan *chan = tx->chan; + struct dma_device *dma = chan->device; + + dma->device_issue_pending(chan); + } +} #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL #include <asm/async_tx.h> #else #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ __async_tx_find_channel(dep, type) struct dma_chan * -__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, - enum dma_transaction_type tx_type); +__async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type); #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ #else static inline void async_tx_issue_pending_all(void) @@ -84,10 +119,16 @@ static inline void async_tx_issue_pending_all(void) do { } while (0); } +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + do { } while (0); +} + static inline struct dma_chan * -async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, - enum dma_transaction_type tx_type, struct page **dst, int dst_count, - struct page **src, int src_count, size_t len) +async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type, struct page **dst, + int dst_count, struct page **src, int src_count, + size_t len) { return NULL; } @@ -99,46 +140,70 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, * @cb_fn_param: parameter to pass to the callback routine */ static inline void -async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) +async_tx_sync_epilog(struct async_submit_ctl *submit) { - if (cb_fn) - cb_fn(cb_fn_param); + if (submit->cb_fn) + submit->cb_fn(submit->cb_param); } -void -async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, - enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); +typedef union { + unsigned long addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +static inline void +init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, + struct dma_async_tx_descriptor *tx, + dma_async_tx_callback cb_fn, void *cb_param, + addr_conv_t *scribble) +{ + args->flags = flags; + args->depend_tx = tx; + args->cb_fn = cb_fn; + args->cb_param = cb_param; + args->scribble = scribble; +} + +void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); + int src_cnt, size_t len, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * -async_xor_zero_sum(struct page *dest, struct page **src_list, - unsigned int offset, int src_cnt, size_t len, - u32 *result, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, enum sum_check_flags *result, + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, - unsigned int src_offset, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); + unsigned int src_offset, size_t len, + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_memset(struct page *dest, int val, unsigned int offset, - size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); + size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, enum sum_check_flags *pqres, struct page *spare, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, + struct page **ptrs, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * -async_trigger_callback(enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); +async_raid6_datap_recov(int src_num, size_t bytes, int faila, + struct page **ptrs, struct async_submit_ctl *submit); void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/ata.h b/include/linux/ata.h index 6299a259ed19..4fb357312b3b 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -334,9 +334,12 @@ enum { SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */ /* SETFEATURE Sector counts for SATA features */ - SATA_AN = 0x05, /* Asynchronous Notification */ - SATA_DIPM = 0x03, /* Device Initiated Power Management */ - SATA_FPDMA_AA = 0x02, /* DMA Setup FIS Auto-Activate */ + SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */ + SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */ + SATA_DIPM = 0x03, /* Device Initiated Power Management */ + SATA_FPDMA_IN_ORDER = 0x04, /* FPDMA in-order data delivery */ + SATA_AN = 0x05, /* Asynchronous Notification */ + SATA_SSP = 0x06, /* Software Settings Preservation */ /* feature values for SET_MAX */ ATA_SET_MAX_ADDR = 0x00, diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 086e5c362d3a..817b23705c91 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -397,7 +397,7 @@ struct atmdev_ops { /* only send is required */ int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, void __user *optval,int optlen); int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen); + void __user *optval,unsigned int optlen); int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); void (*phy_put)(struct atm_dev *dev,unsigned char value, diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 0ee33c2e6129..b449e738533a 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -101,7 +101,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); void bdi_unregister(struct backing_dev_info *bdi); -void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); +void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, + long nr_pages); int bdi_writeback_task(struct bdi_writeback *wb); int bdi_has_dirty_io(struct backing_dev_info *bdi); diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 79ca2da81c87..0f5f57858a23 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -27,6 +27,11 @@ * Any other use of the locks below is probably wrong. */ +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY, + BACKLIGHT_UPDATE_SYSFS, +}; + struct backlight_device; struct fb_info; @@ -100,6 +105,8 @@ static inline void backlight_update_status(struct backlight_device *bd) extern struct backlight_device *backlight_device_register(const char *name, struct device *dev, void *devdata, struct backlight_ops *ops); extern void backlight_device_unregister(struct backlight_device *bd); +extern void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 2046b5b8af48..aece486ac734 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -120,7 +120,7 @@ extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); extern int prepare_bprm_creds(struct linux_binprm *bprm); extern void install_exec_creds(struct linux_binprm *bprm); extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); -extern int set_binfmt(struct linux_binfmt *new); +extern void set_binfmt(struct linux_binfmt *new); extern void free_bprm(struct linux_binprm *); #endif /* __KERNEL__ */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e23a86cae5ac..221cecd86bd3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -82,7 +82,6 @@ enum rq_cmd_type_bits { enum { REQ_LB_OP_EJECT = 0x40, /* eject request */ REQ_LB_OP_FLUSH = 0x41, /* flush request */ - REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ }; /* @@ -261,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q); typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); typedef int (prep_rq_fn) (struct request_queue *, struct request *); typedef void (unplug_fn) (struct request_queue *); -typedef int (prepare_discard_fn) (struct request_queue *, struct request *); struct bio_vec; struct bvec_merge_data { @@ -313,6 +311,7 @@ struct queue_limits { unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; + unsigned int max_discard_sectors; unsigned short logical_block_size; unsigned short max_hw_segments; @@ -340,7 +339,6 @@ struct request_queue make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; unplug_fn *unplug_fn; - prepare_discard_fn *prepare_discard_fn; merge_bvec_fn *merge_bvec_fn; prepare_flush_fn *prepare_flush_fn; softirq_done_fn *softirq_done_fn; @@ -460,6 +458,7 @@ struct request_queue #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ +#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_CLUSTER) | \ @@ -591,6 +590,7 @@ enum { #define blk_queue_flushing(q) ((q)->ordseq) #define blk_queue_stackable(q) \ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) +#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) @@ -929,6 +929,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); +extern void blk_queue_max_discard_sectors(struct request_queue *q, + unsigned int max_discard_sectors); extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); extern void blk_queue_alignment_offset(struct request_queue *q, @@ -955,7 +957,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); -extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); @@ -1080,25 +1081,37 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) return q->limits.physical_block_size; } +static inline int bdev_physical_block_size(struct block_device *bdev) +{ + return queue_physical_block_size(bdev_get_queue(bdev)); +} + static inline unsigned int queue_io_min(struct request_queue *q) { return q->limits.io_min; } +static inline int bdev_io_min(struct block_device *bdev) +{ + return queue_io_min(bdev_get_queue(bdev)); +} + static inline unsigned int queue_io_opt(struct request_queue *q) { return q->limits.io_opt; } +static inline int bdev_io_opt(struct block_device *bdev) +{ + return queue_io_opt(bdev_get_queue(bdev)); +} + static inline int queue_alignment_offset(struct request_queue *q) { - if (q && q->limits.misaligned) + if (q->limits.misaligned) return -1; - if (q && q->limits.alignment_offset) - return q->limits.alignment_offset; - - return 0; + return q->limits.alignment_offset; } static inline int queue_sector_alignment_offset(struct request_queue *q, @@ -1108,6 +1121,19 @@ static inline int queue_sector_alignment_offset(struct request_queue *q, & (q->limits.io_min - 1); } +static inline int bdev_alignment_offset(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q->limits.misaligned) + return -1; + + if (bdev != bdev->bd_contains) + return bdev->bd_part->alignment_offset; + + return q->limits.alignment_offset; +} + static inline int queue_dma_alignment(struct request_queue *q) { return q ? q->dma_alignment : 511; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7e4350ece0f8..3b73b9992b26 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -198,6 +198,7 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, char __user *arg); extern int blk_trace_startstop(struct request_queue *q, int start); extern int blk_trace_remove(struct request_queue *q); +extern void blk_trace_remove_sysfs(struct device *dev); extern int blk_trace_init_sysfs(struct device *dev); extern struct attribute_group blk_trace_attr_group; @@ -211,6 +212,7 @@ extern struct attribute_group blk_trace_attr_group; # define blk_trace_startstop(q, start) (-ENOTTY) # define blk_trace_remove(q) (-ENOTTY) # define blk_add_trace_msg(q, fmt, ...) do { } while (0) +# define blk_trace_remove_sysfs(dev) do { } while (0) static inline int blk_trace_init_sysfs(struct device *dev) { return 0; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index bc3ab7073695..b10ec49ee2dd 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -53,6 +53,7 @@ extern void free_bootmem_node(pg_data_t *pgdat, unsigned long addr, unsigned long size); extern void free_bootmem(unsigned long addr, unsigned long size); +extern void free_bootmem_late(unsigned long addr, unsigned long size); /* * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, @@ -132,9 +133,6 @@ static inline void *alloc_remap(int nid, unsigned long size) } #endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */ -extern unsigned long __meminitdata nr_kernel_pages; -extern unsigned long __meminitdata nr_all_pages; - extern void *alloc_large_system_hash(const char *tablename, unsigned long bucketsize, unsigned long numentries, @@ -145,6 +143,8 @@ extern void *alloc_large_system_hash(const char *tablename, unsigned long limit); #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ +#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min + * shift passed via *_hash_shift */ /* Only NUMA needs hash distribution. 64bit NUMA architectures have * sufficient vmalloc space. diff --git a/include/linux/capability.h b/include/linux/capability.h index c3021105edc0..39e5ff512fbe 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -7,7 +7,7 @@ * * See here for the libcap library ("POSIX draft" compliance): * - * ftp://linux.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/ + * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/ */ #ifndef _LINUX_CAPABILITY_H @@ -92,9 +92,7 @@ struct vfs_cap_data { #define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 #define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 -#ifdef CONFIG_SECURITY_FILE_CAPABILITIES extern int file_caps_enabled; -#endif typedef struct kernel_cap_struct { __u32 cap[_KERNEL_CAPABILITY_U32S]; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 90bba9e62286..0008dee66514 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -37,7 +37,7 @@ extern void cgroup_exit(struct task_struct *p, int run_callbacks); extern int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); -extern struct file_operations proc_cgroup_operations; +extern const struct file_operations proc_cgroup_operations; /* Define the enumeration of all cgroup subsystems */ #define SUBSYS(_x) _x ## _subsys_id, @@ -141,6 +141,38 @@ enum { CGRP_WAIT_ON_RMDIR, }; +/* which pidlist file are we talking about? */ +enum cgroup_filetype { + CGROUP_FILE_PROCS, + CGROUP_FILE_TASKS, +}; + +/* + * A pidlist is a list of pids that virtually represents the contents of one + * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, + * a pair (one each for procs, tasks) for each pid namespace that's relevant + * to the cgroup. + */ +struct cgroup_pidlist { + /* + * used to find which pidlist is wanted. doesn't change as long as + * this particular list stays in the list. + */ + struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; + /* array of xids */ + pid_t *list; + /* how many elements the above list has */ + int length; + /* how many files are using the current array */ + int use_count; + /* each of these stored in a list by its cgroup */ + struct list_head links; + /* pointer to the cgroup we belong to, for list removal purposes */ + struct cgroup *owner; + /* protects the other fields */ + struct rw_semaphore mutex; +}; + struct cgroup { unsigned long flags; /* "unsigned long" so bitops work */ @@ -179,11 +211,12 @@ struct cgroup { */ struct list_head release_list; - /* pids_mutex protects pids_list and cached pid arrays. */ - struct rw_semaphore pids_mutex; - - /* Linked list of struct cgroup_pids */ - struct list_head pids_list; + /* + * list of pidlists, up to two for each namespace (one for procs, one + * for tasks); created on demand. + */ + struct list_head pidlists; + struct mutex pidlist_mutex; /* For RCU-protected deletion */ struct rcu_head rcu_head; @@ -227,6 +260,9 @@ struct css_set { * during subsystem registration (at boot time). */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + /* For RCU-protected deletion */ + struct rcu_head rcu_head; }; /* @@ -389,10 +425,11 @@ struct cgroup_subsys { struct cgroup *cgrp); int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); - int (*can_attach)(struct cgroup_subsys *ss, - struct cgroup *cgrp, struct task_struct *tsk); + int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, + struct task_struct *tsk, bool threadgroup); void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, - struct cgroup *old_cgrp, struct task_struct *tsk); + struct cgroup *old_cgrp, struct task_struct *tsk, + bool threadgroup); void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); void (*exit)(struct cgroup_subsys *ss, struct task_struct *task); int (*populate)(struct cgroup_subsys *ss, diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h index b8125b2eb665..47dac5ea8d3a 100644 --- a/include/linux/cn_proc.h +++ b/include/linux/cn_proc.h @@ -52,6 +52,7 @@ struct proc_event { PROC_EVENT_EXEC = 0x00000002, PROC_EVENT_UID = 0x00000004, PROC_EVENT_GID = 0x00000040, + PROC_EVENT_SID = 0x00000080, /* "next" should be 0x00000400 */ /* "last" is the last process event: exit */ PROC_EVENT_EXIT = 0x80000000 @@ -89,6 +90,11 @@ struct proc_event { } e; } id; + struct sid_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + } sid; + struct exit_proc_event { __kernel_pid_t process_pid; __kernel_pid_t process_tgid; @@ -102,6 +108,7 @@ struct proc_event { void proc_fork_connector(struct task_struct *task); void proc_exec_connector(struct task_struct *task); void proc_id_connector(struct task_struct *task, int which_id); +void proc_sid_connector(struct task_struct *task); void proc_exit_connector(struct task_struct *task); #else static inline void proc_fork_connector(struct task_struct *task) @@ -114,6 +121,9 @@ static inline void proc_id_connector(struct task_struct *task, int which_id) {} +static inline void proc_sid_connector(struct task_struct *task) +{} + static inline void proc_exit_connector(struct task_struct *task) {} #endif /* CONFIG_PROC_EVENTS */ diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 450fa597c94d..ab3af40a53c6 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -36,4 +36,18 @@ the kernel context */ #define __cold __attribute__((__cold__)) + +#if __GNUC_MINOR__ >= 5 +/* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer + * control elsewhere. + * + * Early snapshots of gcc 4.5 don't support this and we can't detect + * this in the preprocessor, but we can live with this because they're + * unreleased. Really, we need to have autoconf for the kernel. + */ +#define unreachable() __builtin_unreachable() +#endif + #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 04fb5135b4e1..59f208926d13 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -144,6 +144,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define barrier() __memory_barrier() #endif +/* Unreachable code */ +#ifndef unreachable +# define unreachable() do { } while (1) +#endif + #ifndef RELOC_HIDE # define RELOC_HIDE(ptr, off) \ ({ unsigned long __ptr; \ diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 7f627775c947..ddb7a97c78c2 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -27,8 +27,8 @@ * * configfs Copyright (C) 2005 Oracle. All rights reserved. * - * Please read Documentation/filesystems/configfs.txt before using the - * configfs interface, ESPECIALLY the parts about reference counts and + * Please read Documentation/filesystems/configfs/configfs.txt before using + * the configfs interface, ESPECIALLY the parts about reference counts and * item destructors. */ diff --git a/include/linux/connector.h b/include/linux/connector.h index 47ebf416f512..3a14615fd35c 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -132,11 +132,8 @@ struct cn_callback_id { }; struct cn_callback_data { - void (*destruct_data) (void *); - void *ddata; - - void *callback_priv; - void (*callback) (struct cn_msg *); + struct sk_buff *skb; + void (*callback) (struct cn_msg *, struct netlink_skb_parms *); void *free; }; @@ -167,11 +164,11 @@ struct cn_dev { struct cn_queue_dev *cbdev; }; -int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *)); +int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *, struct netlink_skb_parms *)); void cn_del_callback(struct cb_id *); int cn_netlink_send(struct cn_msg *, u32, gfp_t); -int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *)); +int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 44717eb47639..79a2340d83cd 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -291,8 +291,15 @@ struct global_attr { int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); +#ifdef CONFIG_CPU_FREQ /* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ unsigned int cpufreq_get(unsigned int cpu); +#else +static inline unsigned int cpufreq_get(unsigned int cpu) +{ + return 0; +} +#endif /* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ #ifdef CONFIG_CPU_FREQ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 796df12091b7..789cf5f920ce 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -3,444 +3,37 @@ /* * Cpumasks provide a bitmap suitable for representing the - * set of CPU's in a system, one bit position per CPU number. - * - * The new cpumask_ ops take a "struct cpumask *"; the old ones - * use cpumask_t. - * - * See detailed comments in the file linux/bitmap.h describing the - * data type on which these cpumasks are based. - * - * For details of cpumask_scnprintf() and cpumask_parse_user(), - * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c. - * For details of cpulist_scnprintf() and cpulist_parse(), see - * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. - * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c - * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c. - * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. - * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. - * - * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . - * Note: The alternate operations with the suffix "_nr" are used - * to limit the range of the loop to nr_cpu_ids instead of - * NR_CPUS when NR_CPUS > 64 for performance reasons. - * If NR_CPUS is <= 64 then most assembler bitmask - * operators execute faster with a constant range, so - * the operator will continue to use NR_CPUS. - * - * Another consideration is that nr_cpu_ids is initialized - * to NR_CPUS and isn't lowered until the possible cpus are - * discovered (including any disabled cpus). So early uses - * will span the entire range of NR_CPUS. - * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . - * - * The obsolescent cpumask operations are: - * - * void cpu_set(cpu, mask) turn on bit 'cpu' in mask - * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask - * void cpus_setall(mask) set all bits - * void cpus_clear(mask) clear all bits - * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask - * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask - * - * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection] - * void cpus_or(dst, src1, src2) dst = src1 | src2 [union] - * void cpus_xor(dst, src1, src2) dst = src1 ^ src2 - * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2 - * void cpus_complement(dst, src) dst = ~src - * - * int cpus_equal(mask1, mask2) Does mask1 == mask2? - * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect? - * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2? - * int cpus_empty(mask) Is mask empty (no bits sets)? - * int cpus_full(mask) Is mask full (all bits sets)? - * int cpus_weight(mask) Hamming weigh - number of set bits - * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS - * - * void cpus_shift_right(dst, src, n) Shift right - * void cpus_shift_left(dst, src, n) Shift left - * - * int first_cpu(mask) Number lowest set bit, or NR_CPUS - * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS - * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids - * - * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set - * (can be used as an lvalue) - * CPU_MASK_ALL Initializer - all bits set - * CPU_MASK_NONE Initializer - no bits set - * unsigned long *cpus_addr(mask) Array of unsigned long's in mask - * - * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t - * variables, and CPUMASK_PTR provides pointers to each field. - * - * The structure should be defined something like this: - * struct my_cpumasks { - * cpumask_t mask1; - * cpumask_t mask2; - * }; - * - * Usage is then: - * CPUMASK_ALLOC(my_cpumasks); - * CPUMASK_PTR(mask1, my_cpumasks); - * CPUMASK_PTR(mask2, my_cpumasks); - * - * --- DO NOT reference cpumask_t pointers until this check --- - * if (my_cpumasks == NULL) - * "kmalloc failed"... - * - * References are now pointers to the cpumask_t variables (*mask1, ...) - * - *if NR_CPUS > BITS_PER_LONG - * CPUMASK_ALLOC(m) Declares and allocates struct m *m = - * kmalloc(sizeof(*m), GFP_KERNEL) - * CPUMASK_FREE(m) Macro for kfree(m) - *else - * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m - * CPUMASK_FREE(m) Nop - *endif - * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v) - * ------------------------------------------------------------------------ - * - * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing - * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask - * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing - * int cpulist_parse(buf, map) Parse ascii string as cpulist - * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit) - * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src) - * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap - * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz - * - * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS - * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids - * - * int num_online_cpus() Number of online CPUs - * int num_possible_cpus() Number of all possible CPUs - * int num_present_cpus() Number of present CPUs - * - * int cpu_online(cpu) Is some cpu online? - * int cpu_possible(cpu) Is some cpu possible? - * int cpu_present(cpu) Is some cpu present (can schedule)? - * - * int any_online_cpu(mask) First online cpu in mask - * - * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map - * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map - * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map - * - * Subtlety: - * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway) - * to generate slightly worse code. Note for example the additional - * 40 lines of assembly code compiling the "for each possible cpu" - * loops buried in the disk_stat_read() macros calls when compiling - * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple - * one-line #define for cpu_isset(), instead of wrapping an inline - * inside a macro, the way we do the other calls. + * set of CPU's in a system, one bit position per CPU number. In general, + * only nr_cpu_ids (<= NR_CPUS) bits are valid. */ - #include <linux/kernel.h> #include <linux/threads.h> #include <linux/bitmap.h> typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; -extern cpumask_t _unused_cpumask_arg_; - -#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS -#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) -static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) -{ - set_bit(cpu, dstp->bits); -} - -#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) -static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) -{ - clear_bit(cpu, dstp->bits); -} - -#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) -static inline void __cpus_setall(cpumask_t *dstp, int nbits) -{ - bitmap_fill(dstp->bits, nbits); -} - -#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) -static inline void __cpus_clear(cpumask_t *dstp, int nbits) -{ - bitmap_zero(dstp->bits, nbits); -} - -/* No static inline type checking - see Subtlety (1) above. */ -#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) - -#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) -static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) -{ - return test_and_set_bit(cpu, addr->bits); -} - -#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) -static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) -static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) -static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_andnot(dst, src1, src2) \ - __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) -static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS) -static inline void __cpus_complement(cpumask_t *dstp, - const cpumask_t *srcp, int nbits) -{ - bitmap_complement(dstp->bits, srcp->bits, nbits); -} - -#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) -static inline int __cpus_equal(const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_equal(src1p->bits, src2p->bits, nbits); -} - -#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) -static inline int __cpus_intersects(const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_intersects(src1p->bits, src2p->bits, nbits); -} - -#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) -static inline int __cpus_subset(const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_subset(src1p->bits, src2p->bits, nbits); -} - -#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) -static inline int __cpus_empty(const cpumask_t *srcp, int nbits) -{ - return bitmap_empty(srcp->bits, nbits); -} - -#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS) -static inline int __cpus_full(const cpumask_t *srcp, int nbits) -{ - return bitmap_full(srcp->bits, nbits); -} - -#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) -static inline int __cpus_weight(const cpumask_t *srcp, int nbits) -{ - return bitmap_weight(srcp->bits, nbits); -} - -#define cpus_shift_right(dst, src, n) \ - __cpus_shift_right(&(dst), &(src), (n), NR_CPUS) -static inline void __cpus_shift_right(cpumask_t *dstp, - const cpumask_t *srcp, int n, int nbits) -{ - bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); -} - -#define cpus_shift_left(dst, src, n) \ - __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) -static inline void __cpus_shift_left(cpumask_t *dstp, - const cpumask_t *srcp, int n, int nbits) -{ - bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); -} -#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ /** - * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * - * @bitmap: the bitmap - * - * There are a few places where cpumask_var_t isn't appropriate and - * static cpumasks must be used (eg. very early boot), yet we don't - * expose the definition of 'struct cpumask'. + * cpumask_bits - get the bits in a cpumask + * @maskp: the struct cpumask * * - * This does the conversion, and can be used as a constant initializer. + * You should only assume nr_cpu_ids bits of this mask are valid. This is + * a macro so it's const-correct. */ -#define to_cpumask(bitmap) \ - ((struct cpumask *)(1 ? (bitmap) \ - : (void *)sizeof(__check_is_bitmap(bitmap)))) - -static inline int __check_is_bitmap(const unsigned long *bitmap) -{ - return 1; -} - -/* - * Special-case data structure for "single bit set only" constant CPU masks. - * - * We pre-generate all the 64 (or 32) possible bit positions, with enough - * padding to the left and the right, and return the constant pointer - * appropriately offset. - */ -extern const unsigned long - cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; - -static inline const struct cpumask *get_cpu_mask(unsigned int cpu) -{ - const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; - p -= cpu / BITS_PER_LONG; - return to_cpumask(p); -} - -#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS -/* - * In cases where we take the address of the cpumask immediately, - * gcc optimizes it out (it's a constant) and there's no huge stack - * variable created: - */ -#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) - - -#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) - -#if NR_CPUS <= BITS_PER_LONG - -#define CPU_MASK_ALL \ -(cpumask_t) { { \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ -} } - -#define CPU_MASK_ALL_PTR (&CPU_MASK_ALL) - -#else - -#define CPU_MASK_ALL \ -(cpumask_t) { { \ - [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ -} } - -/* cpu_mask_all is in init/main.c */ -extern cpumask_t cpu_mask_all; -#define CPU_MASK_ALL_PTR (&cpu_mask_all) - -#endif - -#define CPU_MASK_NONE \ -(cpumask_t) { { \ - [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ -} } - -#define CPU_MASK_CPU0 \ -(cpumask_t) { { \ - [0] = 1UL \ -} } - -#define cpus_addr(src) ((src).bits) - -#if NR_CPUS > BITS_PER_LONG -#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL) -#define CPUMASK_FREE(m) kfree(m) -#else -#define CPUMASK_ALLOC(m) struct m _m, *m = &_m -#define CPUMASK_FREE(m) -#endif -#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) - -#define cpu_remap(oldbit, old, new) \ - __cpu_remap((oldbit), &(old), &(new), NR_CPUS) -static inline int __cpu_remap(int oldbit, - const cpumask_t *oldp, const cpumask_t *newp, int nbits) -{ - return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); -} - -#define cpus_remap(dst, src, old, new) \ - __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS) -static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp, - const cpumask_t *oldp, const cpumask_t *newp, int nbits) -{ - bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); -} - -#define cpus_onto(dst, orig, relmap) \ - __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS) -static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp, - const cpumask_t *relmapp, int nbits) -{ - bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); -} - -#define cpus_fold(dst, orig, sz) \ - __cpus_fold(&(dst), &(orig), sz, NR_CPUS) -static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, - int sz, int nbits) -{ - bitmap_fold(dstp->bits, origp->bits, sz, nbits); -} -#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ +#define cpumask_bits(maskp) ((maskp)->bits) #if NR_CPUS == 1 - #define nr_cpu_ids 1 -#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS -#define first_cpu(src) ({ (void)(src); 0; }) -#define next_cpu(n, src) ({ (void)(src); 1; }) -#define any_online_cpu(mask) 0 -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ -#else /* NR_CPUS > 1 */ - +#else extern int nr_cpu_ids; -#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS -int __first_cpu(const cpumask_t *srcp); -int __next_cpu(int n, const cpumask_t *srcp); -int __any_online_cpu(const cpumask_t *mask); - -#define first_cpu(src) __first_cpu(&(src)) -#define next_cpu(n, src) __next_cpu((n), &(src)) -#define any_online_cpu(mask) __any_online_cpu(&(mask)) -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = -1; \ - (cpu) = next_cpu((cpu), (mask)), \ - (cpu) < NR_CPUS; ) -#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ #endif -#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS -#if NR_CPUS <= 64 - -#define next_cpu_nr(n, src) next_cpu(n, src) -#define cpus_weight_nr(cpumask) cpus_weight(cpumask) -#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) - -#else /* NR_CPUS > 64 */ - -int __next_cpu_nr(int n, const cpumask_t *srcp); -#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) -#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) -#define for_each_cpu_mask_nr(cpu, mask) \ - for ((cpu) = -1; \ - (cpu) = next_cpu_nr((cpu), (mask)), \ - (cpu) < nr_cpu_ids; ) - -#endif /* NR_CPUS > 64 */ -#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ +#ifdef CONFIG_CPUMASK_OFFSTACK +/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, + * not all bits may be allocated. */ +#define nr_cpumask_bits nr_cpu_ids +#else +#define nr_cpumask_bits NR_CPUS +#endif /* * The following particular system cpumasks and operations manage @@ -487,12 +80,6 @@ extern const struct cpumask *const cpu_online_mask; extern const struct cpumask *const cpu_present_mask; extern const struct cpumask *const cpu_active_mask; -/* These strip const, as traditionally they weren't const. */ -#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) -#define cpu_online_map (*(cpumask_t *)cpu_online_mask) -#define cpu_present_map (*(cpumask_t *)cpu_present_mask) -#define cpu_active_map (*(cpumask_t *)cpu_active_mask) - #if NR_CPUS > 1 #define num_online_cpus() cpumask_weight(cpu_online_mask) #define num_possible_cpus() cpumask_weight(cpu_possible_mask) @@ -511,35 +98,6 @@ extern const struct cpumask *const cpu_active_mask; #define cpu_active(cpu) ((cpu) == 0) #endif -#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) - -/* These are the new versions of the cpumask operators: passed by pointer. - * The older versions will be implemented in terms of these, then deleted. */ -#define cpumask_bits(maskp) ((maskp)->bits) - -#if NR_CPUS <= BITS_PER_LONG -#define CPU_BITS_ALL \ -{ \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ -} - -#else /* NR_CPUS > BITS_PER_LONG */ - -#define CPU_BITS_ALL \ -{ \ - [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ -} -#endif /* NR_CPUS > BITS_PER_LONG */ - -#ifdef CONFIG_CPUMASK_OFFSTACK -/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, - * not all bits may be allocated. */ -#define nr_cpumask_bits nr_cpu_ids -#else -#define nr_cpumask_bits NR_CPUS -#endif - /* verify cpu argument to cpumask_* operators */ static inline unsigned int cpumask_check(unsigned int cpu) { @@ -715,6 +273,18 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) } /** + * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * test_and_clear_bit wrapper for cpumasks. + */ +static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) +{ + return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); +} + +/** * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask * @dstp: the cpumask pointer */ @@ -1088,4 +658,241 @@ void set_cpu_active(unsigned int cpu, bool active); void init_cpu_present(const struct cpumask *src); void init_cpu_possible(const struct cpumask *src); void init_cpu_online(const struct cpumask *src); + +/** + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * + * @bitmap: the bitmap + * + * There are a few places where cpumask_var_t isn't appropriate and + * static cpumasks must be used (eg. very early boot), yet we don't + * expose the definition of 'struct cpumask'. + * + * This does the conversion, and can be used as a constant initializer. + */ +#define to_cpumask(bitmap) \ + ((struct cpumask *)(1 ? (bitmap) \ + : (void *)sizeof(__check_is_bitmap(bitmap)))) + +static inline int __check_is_bitmap(const unsigned long *bitmap) +{ + return 1; +} + +/* + * Special-case data structure for "single bit set only" constant CPU masks. + * + * We pre-generate all the 64 (or 32) possible bit positions, with enough + * padding to the left and the right, and return the constant pointer + * appropriately offset. + */ +extern const unsigned long + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; + +static inline const struct cpumask *get_cpu_mask(unsigned int cpu) +{ + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; + p -= cpu / BITS_PER_LONG; + return to_cpumask(p); +} + +#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) + +#if NR_CPUS <= BITS_PER_LONG +#define CPU_BITS_ALL \ +{ \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} + +#else /* NR_CPUS > BITS_PER_LONG */ + +#define CPU_BITS_ALL \ +{ \ + [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} +#endif /* NR_CPUS > BITS_PER_LONG */ + +/* + * + * From here down, all obsolete. Use cpumask_ variants! + * + */ +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS +/* These strip const, as traditionally they weren't const. */ +#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) +#define cpu_online_map (*(cpumask_t *)cpu_online_mask) +#define cpu_present_map (*(cpumask_t *)cpu_present_mask) +#define cpu_active_map (*(cpumask_t *)cpu_active_mask) + +#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) + +#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) + +#if NR_CPUS <= BITS_PER_LONG + +#define CPU_MASK_ALL \ +(cpumask_t) { { \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} } + +#else + +#define CPU_MASK_ALL \ +(cpumask_t) { { \ + [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} } + +#endif + +#define CPU_MASK_NONE \ +(cpumask_t) { { \ + [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ +} } + +#define CPU_MASK_CPU0 \ +(cpumask_t) { { \ + [0] = 1UL \ +} } + +#if NR_CPUS == 1 +#define first_cpu(src) ({ (void)(src); 0; }) +#define next_cpu(n, src) ({ (void)(src); 1; }) +#define any_online_cpu(mask) 0 +#define for_each_cpu_mask(cpu, mask) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) +#else /* NR_CPUS > 1 */ +int __first_cpu(const cpumask_t *srcp); +int __next_cpu(int n, const cpumask_t *srcp); +int __any_online_cpu(const cpumask_t *mask); + +#define first_cpu(src) __first_cpu(&(src)) +#define next_cpu(n, src) __next_cpu((n), &(src)) +#define any_online_cpu(mask) __any_online_cpu(&(mask)) +#define for_each_cpu_mask(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu((cpu), (mask)), \ + (cpu) < NR_CPUS; ) +#endif /* SMP */ + +#if NR_CPUS <= 64 + +#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) + +#else /* NR_CPUS > 64 */ + +int __next_cpu_nr(int n, const cpumask_t *srcp); +#define for_each_cpu_mask_nr(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = __next_cpu_nr((cpu), &(mask)), \ + (cpu) < nr_cpu_ids; ) + +#endif /* NR_CPUS > 64 */ + +#define cpus_addr(src) ((src).bits) + +#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) +static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) +{ + set_bit(cpu, dstp->bits); +} + +#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) +static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) +{ + clear_bit(cpu, dstp->bits); +} + +#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) +static inline void __cpus_setall(cpumask_t *dstp, int nbits) +{ + bitmap_fill(dstp->bits, nbits); +} + +#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) +static inline void __cpus_clear(cpumask_t *dstp, int nbits) +{ + bitmap_zero(dstp->bits, nbits); +} + +/* No static inline type checking - see Subtlety (1) above. */ +#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) + +#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) +static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) +{ + return test_and_set_bit(cpu, addr->bits); +} + +#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) +static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) +static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) +static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define cpus_andnot(dst, src1, src2) \ + __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) +static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) +static inline int __cpus_equal(const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_equal(src1p->bits, src2p->bits, nbits); +} + +#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) +static inline int __cpus_intersects(const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_intersects(src1p->bits, src2p->bits, nbits); +} + +#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) +static inline int __cpus_subset(const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_subset(src1p->bits, src2p->bits, nbits); +} + +#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) +static inline int __cpus_empty(const cpumask_t *srcp, int nbits) +{ + return bitmap_empty(srcp->bits, nbits); +} + +#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) +static inline int __cpus_weight(const cpumask_t *srcp, int nbits) +{ + return bitmap_weight(srcp->bits, nbits); +} + +#define cpus_shift_left(dst, src, n) \ + __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) +static inline void __cpus_shift_left(cpumask_t *dstp, + const cpumask_t *srcp, int n, int nbits) +{ + bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); +} +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ + #endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index fb371601a3b4..4e3387a89cb9 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -176,23 +176,7 @@ extern void __invalid_creds(const struct cred *, const char *, unsigned); extern void __validate_process_creds(struct task_struct *, const char *, unsigned); -static inline bool creds_are_invalid(const struct cred *cred) -{ - if (cred->magic != CRED_MAGIC) - return true; - if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers)) - return true; -#ifdef CONFIG_SECURITY_SELINUX - if (selinux_is_enabled()) { - if ((unsigned long) cred->security < PAGE_SIZE) - return true; - if ((*(u32 *)cred->security & 0xffffff00) == - (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)) - return true; - } -#endif - return false; -} +extern bool creds_are_invalid(const struct cred *cred); static inline void __validate_creds(const struct cred *cred, const char *file, unsigned line) diff --git a/include/linux/dca.h b/include/linux/dca.h index 9c20c7e87d0a..d27a7a05718d 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -20,6 +20,9 @@ */ #ifndef DCA_H #define DCA_H + +#include <linux/pci.h> + /* DCA Provider API */ /* DCA Notifier Interface */ @@ -36,6 +39,12 @@ struct dca_provider { int id; }; +struct dca_domain { + struct list_head node; + struct list_head dca_providers; + struct pci_bus *pci_rc; +}; + struct dca_ops { int (*add_requester) (struct dca_provider *, struct device *); int (*remove_requester) (struct dca_provider *, struct device *); @@ -47,7 +56,7 @@ struct dca_ops { struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); void free_dca_provider(struct dca_provider *dca); int register_dca_provider(struct dca_provider *dca, struct device *dev); -void unregister_dca_provider(struct dca_provider *dca); +void unregister_dca_provider(struct dca_provider *dca, struct device *dev); static inline void *dca_priv(struct dca_provider *dca) { diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index eb5c2ba2f81a..fc1b930f246c 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -9,7 +9,7 @@ * 2 as published by the Free Software Foundation. * * debugfs is for people to use instead of /proc or /sys. - * See Documentation/DocBook/kernel-api for more details. + * See Documentation/DocBook/filesystems for more details. */ #ifndef _DEBUGFS_H_ diff --git a/include/linux/device.h b/include/linux/device.h index aca31bf7d8ed..2ea3e4921812 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -124,7 +124,9 @@ struct device_driver { struct bus_type *bus; struct module *owner; - const char *mod_name; /* used for built-in modules */ + const char *mod_name; /* used for built-in modules */ + + bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ int (*probe) (struct device *dev); int (*remove) (struct device *dev); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index ffefba81c818..2b9f2ac7ed60 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -48,19 +48,20 @@ enum dma_status { /** * enum dma_transaction_type - DMA transaction types/indexes + * + * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is + * automatically set as dma devices are registered. */ enum dma_transaction_type { DMA_MEMCPY, DMA_XOR, - DMA_PQ_XOR, - DMA_DUAL_XOR, - DMA_PQ_UPDATE, - DMA_ZERO_SUM, - DMA_PQ_ZERO_SUM, + DMA_PQ, + DMA_XOR_VAL, + DMA_PQ_VAL, DMA_MEMSET, - DMA_MEMCPY_CRC32C, DMA_INTERRUPT, DMA_PRIVATE, + DMA_ASYNC_TX, DMA_SLAVE, }; @@ -70,18 +71,25 @@ enum dma_transaction_type { /** * enum dma_ctrl_flags - DMA flags to augment operation preparation, - * control completion, and communicate status. + * control completion, and communicate status. * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of - * this transaction + * this transaction * @DMA_CTRL_ACK - the descriptor cannot be reused until the client - * acknowledges receipt, i.e. has has a chance to establish any - * dependency chains + * acknowledges receipt, i.e. has has a chance to establish any dependency + * chains * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single * (if not set, do the source dma-unmapping as page) * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single * (if not set, do the destination dma-unmapping as page) + * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q + * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P + * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as + * sources that were the result of a previous operation, in the case of a PQ + * operation it continues the calculation with new sources + * @DMA_PREP_FENCE - tell the driver that subsequent operations depend + * on the result of this operation */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), @@ -90,9 +98,32 @@ enum dma_ctrl_flags { DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), + DMA_PREP_PQ_DISABLE_P = (1 << 6), + DMA_PREP_PQ_DISABLE_Q = (1 << 7), + DMA_PREP_CONTINUE = (1 << 8), + DMA_PREP_FENCE = (1 << 9), }; /** + * enum sum_check_bits - bit position of pq_check_flags + */ +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +/** + * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations + * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise + * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise + */ +enum sum_check_flags { + SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), + SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), +}; + + +/** * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. * See linux/cpumask.h */ @@ -180,8 +211,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); * @flags: flags to augment operation preparation, control completion, and * communicate status * @phys: physical address of the descriptor - * @tx_list: driver common field for operations that require multiple - * descriptors * @chan: target channel for this operation * @tx_submit: set the prepared descriptor(s) to be executed by the engine * @callback: routine to call after this operation is complete @@ -195,7 +224,6 @@ struct dma_async_tx_descriptor { dma_cookie_t cookie; enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ dma_addr_t phys; - struct list_head tx_list; struct dma_chan *chan; dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; @@ -213,6 +241,11 @@ struct dma_async_tx_descriptor { * @global_node: list_head for global dma_device_list * @cap_mask: one or more dma_capability flags * @max_xor: maximum number of xor sources, 0 if no capability + * @max_pq: maximum number of PQ sources and PQ-continue capability + * @copy_align: alignment shift for memcpy operations + * @xor_align: alignment shift for xor operations + * @pq_align: alignment shift for pq operations + * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api * @device_alloc_chan_resources: allocate resources and return the @@ -220,7 +253,9 @@ struct dma_async_tx_descriptor { * @device_free_chan_resources: release DMA channel's resources * @device_prep_dma_memcpy: prepares a memcpy operation * @device_prep_dma_xor: prepares a xor operation - * @device_prep_dma_zero_sum: prepares a zero_sum operation + * @device_prep_dma_xor_val: prepares a xor validation operation + * @device_prep_dma_pq: prepares a pq operation + * @device_prep_dma_pq_val: prepares a pqzero_sum operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_slave_sg: prepares a slave dma operation @@ -235,7 +270,13 @@ struct dma_device { struct list_head channels; struct list_head global_node; dma_cap_mask_t cap_mask; - int max_xor; + unsigned short max_xor; + unsigned short max_pq; + u8 copy_align; + u8 xor_align; + u8 pq_align; + u8 fill_align; + #define DMA_HAS_PQ_CONTINUE (1 << 15) int dev_id; struct device *dev; @@ -249,9 +290,17 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_xor)( struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags); - struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( + struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, - size_t len, u32 *result, unsigned long flags); + size_t len, enum sum_check_flags *result, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq)( + struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( + struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_memset)( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags); @@ -270,6 +319,96 @@ struct dma_device { void (*device_issue_pending)(struct dma_chan *chan); }; +static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) +{ + size_t mask; + + if (!align) + return true; + mask = (1 << align) - 1; + if (mask & (off1 | off2 | len)) + return false; + return true; +} + +static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->copy_align, off1, off2, len); +} + +static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->xor_align, off1, off2, len); +} + +static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->pq_align, off1, off2, len); +} + +static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->fill_align, off1, off2, len); +} + +static inline void +dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) +{ + dma->max_pq = maxpq; + if (has_pq_continue) + dma->max_pq |= DMA_HAS_PQ_CONTINUE; +} + +static inline bool dmaf_continue(enum dma_ctrl_flags flags) +{ + return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; +} + +static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) +{ + enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; + + return (flags & mask) == mask; +} + +static inline bool dma_dev_has_pq_continue(struct dma_device *dma) +{ + return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; +} + +static unsigned short dma_dev_to_maxpq(struct dma_device *dma) +{ + return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; +} + +/* dma_maxpq - reduce maxpq in the face of continued operations + * @dma - dma device with PQ capability + * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set + * + * When an engine does not support native continuation we need 3 extra + * source slots to reuse P and Q with the following coefficients: + * 1/ {00} * P : remove P from Q', but use it as a source for P' + * 2/ {01} * Q : use Q to continue Q' calculation + * 3/ {00} * Q : subtract Q from P' to cancel (2) + * + * In the case where P is disabled we only need 1 extra source: + * 1/ {01} * Q : use Q to continue Q' calculation + */ +static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) +{ + if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) + return dma_dev_to_maxpq(dma); + else if (dmaf_p_disabled_continue(flags)) + return dma_dev_to_maxpq(dma) - 1; + else if (dmaf_continue(flags)) + return dma_dev_to_maxpq(dma) - 3; + BUG(); +} + /* --- public DMA engine API --- */ #ifdef CONFIG_DMA_ENGINE @@ -299,7 +438,11 @@ static inline void net_dmaengine_put(void) #ifdef CONFIG_ASYNC_TX_DMA #define async_dmaengine_get() dmaengine_get() #define async_dmaengine_put() dmaengine_put() +#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH +#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) +#else #define async_dma_find_channel(type) dma_find_channel(type) +#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ #else static inline void async_dmaengine_get(void) { @@ -312,7 +455,7 @@ async_dma_find_channel(enum dma_transaction_type type) { return NULL; } -#endif +#endif /* CONFIG_ASYNC_TX_DMA */ dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 4a2b162c256a..5de4c9e5856d 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -208,16 +208,9 @@ struct dmar_atsr_unit { u8 include_all:1; /* include all ports */ }; -/* Intel DMAR initialization functions */ extern int intel_iommu_init(void); -#else -static inline int intel_iommu_init(void) -{ -#ifdef CONFIG_INTR_REMAP - return dmar_dev_scope_init(); -#else - return -ENODEV; -#endif -} -#endif /* !CONFIG_DMAR */ +#else /* !CONFIG_DMAR: */ +static inline int intel_iommu_init(void) { return -ENODEV; } +#endif /* CONFIG_DMAR */ + #endif /* __DMAR_H__ */ diff --git a/include/linux/elf.h b/include/linux/elf.h index 45a937be6d38..90a4ed0ea0e5 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -361,6 +361,7 @@ typedef struct elf64_shdr { #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ +#define NT_PRXSTATUS 0x300 /* s390 upper register halves */ /* Note header in a PT_NOTE section */ diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 3b85ba6479f4..94dd10366a78 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -27,6 +27,7 @@ #ifdef CONFIG_EVENTFD +struct file *eventfd_file_create(unsigned int count, int flags); struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx); void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); @@ -40,6 +41,11 @@ int eventfd_signal(struct eventfd_ctx *ctx, int n); * Ugly ugly ugly error layer to support modules that uses eventfd but * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. */ +static inline struct file *eventfd_file_create(unsigned int count, int flags) +{ + return ERR_PTR(-ENOSYS); +} + static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) { return ERR_PTR(-ENOSYS); diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h index ca1bfe90004f..93e7428156ba 100644 --- a/include/linux/ext3_fs_i.h +++ b/include/linux/ext3_fs_i.h @@ -137,6 +137,14 @@ struct ext3_inode_info { * by other means, so we have truncate_mutex. */ struct mutex truncate_mutex; + + /* + * Transactions that contain inode's metadata needed to complete + * fsync and fdatasync, respectively. + */ + atomic_t i_sync_tid; + atomic_t i_datasync_tid; + struct inode vfs_inode; }; diff --git a/include/linux/fb.h b/include/linux/fb.h index f847df9e99b6..de9c722e7b90 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -133,6 +133,7 @@ struct dentry; #define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ #define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ #define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ +#define FB_ACCEL_PXA3XX 99 /* PXA3xx */ #define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ #define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ @@ -668,12 +669,6 @@ struct fb_ops { /* perform fb specific mmap */ int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); - /* save current hardware state */ - void (*fb_save_state)(struct fb_info *info); - - /* restore saved state */ - void (*fb_restore_state)(struct fb_info *info); - /* get capability given var */ void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, struct fb_var_screeninfo *var); diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 192d1e43c43c..7e1d4dec83e7 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -134,20 +134,6 @@ struct fw_card { u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; }; -static inline struct fw_card *fw_card_get(struct fw_card *card) -{ - kref_get(&card->kref); - - return card; -} - -void fw_card_release(struct kref *kref); - -static inline void fw_card_put(struct fw_card *card) -{ - kref_put(&card->kref, fw_card_release); -} - struct fw_attribute_group { struct attribute_group *groups[2]; struct attribute_group group; diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h index 45ff18491514..1d747f72298b 100644 --- a/include/linux/flex_array.h +++ b/include/linux/flex_array.h @@ -31,10 +31,32 @@ struct flex_array { }; }; -#define FLEX_ARRAY_INIT(size, total) { { {\ - .element_size = (size), \ - .total_nr_elements = (total), \ -} } } +/* Number of bytes left in base struct flex_array, excluding metadata */ +#define FLEX_ARRAY_BASE_BYTES_LEFT \ + (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts)) + +/* Number of pointers in base to struct flex_array_part pages */ +#define FLEX_ARRAY_NR_BASE_PTRS \ + (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *)) + +/* Number of elements of size that fit in struct flex_array_part */ +#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \ + (FLEX_ARRAY_PART_SIZE / size) + +/* + * Defines a statically allocated flex array and ensures its parameters are + * valid. + */ +#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \ + struct flex_array __arrayname = { { { \ + .element_size = (__element_size), \ + .total_nr_elements = (__total), \ + } } }; \ + static inline void __arrayname##_invalid_parameter(void) \ + { \ + BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \ + FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \ + } struct flex_array *flex_array_alloc(int element_size, unsigned int total, gfp_t flags); @@ -44,6 +66,8 @@ void flex_array_free(struct flex_array *fa); void flex_array_free_parts(struct flex_array *fa); int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, gfp_t flags); +int flex_array_clear(struct flex_array *fa, unsigned int element_nr); void *flex_array_get(struct flex_array *fa, unsigned int element_nr); +int flex_array_shrink(struct flex_array *fa); #endif /* _FLEX_ARRAY_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 90162fb3bf04..2620a8c63571 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -300,6 +300,10 @@ struct inodes_stat_t { #define BLKTRACESTOP _IO(0x12,117) #define BLKTRACETEARDOWN _IO(0x12,118) #define BLKDISCARD _IO(0x12,119) +#define BLKIOMIN _IO(0x12,120) +#define BLKIOOPT _IO(0x12,121) +#define BLKALIGNOFF _IO(0x12,122) +#define BLKPBSZGET _IO(0x12,123) #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define FIBMAP _IO(0x00,1) /* bmap access */ @@ -595,6 +599,7 @@ struct address_space_operations { int (*launder_page) (struct page *); int (*is_partially_uptodate) (struct page *, read_descriptor_t *, unsigned long); + int (*error_remove_page)(struct address_space *, struct page *); }; /* @@ -640,7 +645,6 @@ struct block_device { struct super_block * bd_super; int bd_openers; struct mutex bd_mutex; /* open/close mutex */ - struct semaphore bd_mount_sem; struct list_head bd_inodes; void * bd_holder; int bd_holders; @@ -1066,8 +1070,8 @@ struct file_lock { struct fasync_struct * fl_fasync; /* for lease break notifications */ unsigned long fl_break_time; /* for nonblocking lease breaks */ - struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ - struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ + const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ + const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ union { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; @@ -1315,11 +1319,11 @@ struct super_block { unsigned long s_blocksize; unsigned char s_blocksize_bits; unsigned char s_dirt; - unsigned long long s_maxbytes; /* Max file size */ + loff_t s_maxbytes; /* Max file size */ struct file_system_type *s_type; const struct super_operations *s_op; - struct dquot_operations *dq_op; - struct quotactl_ops *s_qcop; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; @@ -2156,6 +2160,7 @@ extern ino_t iunique(struct super_block *, ino_t); extern int inode_needs_sync(struct inode *inode); extern void generic_delete_inode(struct inode *inode); extern void generic_drop_inode(struct inode *inode); +extern int generic_detach_inode(struct inode *inode); extern struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), @@ -2334,6 +2339,7 @@ extern void get_filesystem(struct file_system_type *fs); extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); extern struct super_block *get_super(struct block_device *); +extern struct super_block *get_active_super(struct block_device *bdev); extern struct super_block *user_get_super(dev_t); extern void drop_super(struct super_block *sb); @@ -2381,7 +2387,8 @@ extern int buffer_migrate_page(struct address_space *, #define buffer_migrate_page NULL #endif -extern int inode_change_ok(struct inode *, struct iattr *); +extern int inode_change_ok(const struct inode *, struct iattr *); +extern int inode_newsize_ok(const struct inode *, loff_t offset); extern int __must_check inode_setattr(struct inode *, struct iattr *); extern void file_update_time(struct file *file); @@ -2443,7 +2450,7 @@ static int __fops ## _open(struct inode *inode, struct file *file) \ __simple_attr_check_format(__fmt, 0ull); \ return simple_attr_open(inode, file, __get, __set, __fmt); \ } \ -static struct file_operations __fops = { \ +static const struct file_operations __fops = { \ .owner = THIS_MODULE, \ .open = __fops ## _open, \ .release = simple_attr_release, \ @@ -2467,7 +2474,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); struct ctl_table; -int proc_nr_files(struct ctl_table *table, int write, struct file *filp, +int proc_nr_files(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); int __init get_filesystem_list(char *buf); diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 84d3532dd3ea..7be0c6fbe880 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -91,6 +91,8 @@ struct fscache_operation { #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ #define FSCACHE_OP_DEAD 6 /* op is now dead */ +#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */ +#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */ atomic_t usage; unsigned debug_id; /* debugging ID */ @@ -102,6 +104,16 @@ struct fscache_operation { /* operation releaser */ fscache_operation_release_t release; + +#ifdef CONFIG_SLOW_WORK_PROC + const char *name; /* operation name */ + const char *state; /* operation state */ +#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) +#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0) +#else +#define fscache_set_op_name(OP, N) do { } while(0) +#define fscache_set_op_state(OP, S) do { } while(0) +#endif }; extern atomic_t fscache_op_debug_id; @@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, op->debug_id = atomic_inc_return(&fscache_op_debug_id); op->release = release; INIT_LIST_HEAD(&op->pend_link); + fscache_set_op_state(op, "Init"); } /** @@ -221,8 +234,10 @@ struct fscache_cache_ops { struct fscache_object *(*alloc_object)(struct fscache_cache *cache, struct fscache_cookie *cookie); - /* look up the object for a cookie */ - void (*lookup_object)(struct fscache_object *object); + /* look up the object for a cookie + * - return -ETIMEDOUT to be requeued + */ + int (*lookup_object)(struct fscache_object *object); /* finished looking up */ void (*lookup_complete)(struct fscache_object *object); @@ -297,12 +312,14 @@ struct fscache_cookie { atomic_t usage; /* number of users of this cookie */ atomic_t n_children; /* number of children of this cookie */ spinlock_t lock; + spinlock_t stores_lock; /* lock on page store tree */ struct hlist_head backing_objects; /* object(s) backing this file/index */ const struct fscache_cookie_def *def; /* definition */ struct fscache_cookie *parent; /* parent of this entry */ void *netfs_data; /* back pointer to netfs */ struct radix_tree_root stores; /* pages to be stored on this cookie */ #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ +#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */ unsigned long flags; #define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ @@ -337,6 +354,7 @@ struct fscache_object { FSCACHE_OBJECT_RECYCLING, /* retiring object */ FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ FSCACHE_OBJECT_DEAD, /* object is now dead */ + FSCACHE_OBJECT__NSTATES } state; int debug_id; /* debugging ID */ @@ -345,6 +363,7 @@ struct fscache_object { int n_obj_ops; /* number of object ops outstanding on object */ int n_in_progress; /* number of ops in progress */ int n_exclusive; /* number of exclusive ops queued */ + atomic_t n_reads; /* number of read ops in progress */ spinlock_t lock; /* state and operations lock */ unsigned long lookup_jif; /* time at which lookup started */ @@ -358,6 +377,7 @@ struct fscache_object { #define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ #define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ #define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ +#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/ unsigned long flags; #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ @@ -373,7 +393,11 @@ struct fscache_object { struct list_head dependents; /* FIFO of dependent objects */ struct list_head dep_link; /* link in parent's dependents list */ struct list_head pending_ops; /* unstarted operations on this object */ +#ifdef CONFIG_FSCACHE_OBJECT_LIST + struct rb_node objlist_link; /* link in global object list */ +#endif pgoff_t store_limit; /* current storage limit */ + loff_t store_limit_l; /* current storage limit */ }; extern const char *fscache_object_states[]; @@ -383,6 +407,10 @@ extern const char *fscache_object_states[]; (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ (obj)->state < FSCACHE_OBJECT_DYING) +#define fscache_object_is_dead(obj) \ + (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ + (obj)->state >= FSCACHE_OBJECT_DYING) + extern const struct slow_work_ops fscache_object_slow_work_ops; /** @@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object, object->events = object->event_mask = 0; object->flags = 0; object->store_limit = 0; + object->store_limit_l = 0; object->cache = cache; object->cookie = cookie; object->parent = NULL; @@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object, extern void fscache_object_lookup_negative(struct fscache_object *object); extern void fscache_obtained_object(struct fscache_object *object); +#ifdef CONFIG_FSCACHE_OBJECT_LIST +extern void fscache_object_destroy(struct fscache_object *object); +#else +#define fscache_object_destroy(object) do {} while(0) +#endif + /** * fscache_object_destroyed - Note destruction of an object in a cache * @cache: The cache from which the object came @@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object) static inline void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) { + object->store_limit_l = i_size; object->store_limit = i_size >> PAGE_SHIFT; if (i_size & ~PAGE_MASK) object->store_limit++; diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 6d8ee466e0a0..595ce49288b7 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); +extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *, + gfp_t); /** * fscache_register_netfs - Register a filesystem as desiring caching services @@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie, __fscache_wait_on_page_write(cookie, page); } +/** + * fscache_maybe_release_page - Consider releasing a page, cancelling a store + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * @gfp: The gfp flags passed to releasepage() + * + * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's + * releasepage() call. A storage request on the page may cancelled if it is + * not currently being processed. + * + * The function returns true if the page no longer has a storage request on it, + * and false if a storage request is left in place. If true is returned, the + * page will have been passed to fscache_uncache_page(). If false is returned + * the page cannot be freed yet. + */ +static inline +bool fscache_maybe_release_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie) && PageFsCache(page)) + return __fscache_maybe_release_page(cookie, page, gfp); + return false; +} + #endif /* _LINUX_FSCACHE_H */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 3c0924a18daf..0b4f97d24d7f 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -19,7 +19,7 @@ extern int ftrace_enabled; extern int ftrace_enable_sysctl(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, size_t *lenp, + void __user *buffer, size_t *lenp, loff_t *ppos); typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); @@ -94,7 +94,7 @@ static inline void ftrace_start(void) { } extern int stack_tracer_enabled; int stack_trace_sysctl(struct ctl_table *table, int write, - struct file *file, void __user *buffer, size_t *lenp, + void __user *buffer, size_t *lenp, loff_t *ppos); #endif @@ -241,7 +241,7 @@ extern void ftrace_enable_daemon(void); # define ftrace_set_filter(buf, len, reset) do { } while (0) # define ftrace_disable_daemon() do { } while (0) # define ftrace_enable_daemon() do { } while (0) -static inline void ftrace_release(void *start, unsigned long size) { } +static inline void ftrace_release_mod(struct module *mod) {} static inline int register_ftrace_command(struct ftrace_func_command *cmd) { return -EINVAL; diff --git a/include/linux/futex.h b/include/linux/futex.h index 34956c8fdebf..1e5a26d79232 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -4,11 +4,6 @@ #include <linux/compiler.h> #include <linux/types.h> -struct inode; -struct mm_struct; -struct task_struct; -union ktime; - /* Second argument to futex syscall */ @@ -38,8 +33,8 @@ union ktime; #define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) -#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG) -#define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG) #define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \ FUTEX_PRIVATE_FLAG) #define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \ @@ -129,6 +124,11 @@ struct robust_list_head { #define FUTEX_BITSET_MATCH_ANY 0xffffffff #ifdef __KERNEL__ +struct inode; +struct mm_struct; +struct task_struct; +union ktime; + long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, u32 __user *uaddr2, u32 val2, u32 val3); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 109d179adb93..297df45ffd0a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -151,7 +151,7 @@ struct gendisk { struct disk_part_tbl *part_tbl; struct hd_struct part0; - struct block_device_operations *fops; + const struct block_device_operations *fops; struct request_queue *queue; void *private_data; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 7c777a0da17a..557bdad320b6 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -220,7 +220,7 @@ static inline enum zone_type gfp_zone(gfp_t flags) ((1 << ZONES_SHIFT) - 1); if (__builtin_constant_p(bit)) - BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); + MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); else { #ifdef CONFIG_DEBUG_VM BUG_ON((GFP_ZONE_BAD >> bit) & 1); @@ -326,7 +326,6 @@ void free_pages_exact(void *virt, size_t size); extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); extern void free_hot_page(struct page *page); -extern void free_cold_page(struct page *page); #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr),0) @@ -336,18 +335,6 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(void); void drain_local_pages(void *dummy); -extern bool oom_killer_disabled; - -static inline void oom_killer_disable(void) -{ - oom_killer_disabled = true; -} - -static inline void oom_killer_enable(void) -{ - oom_killer_disabled = false; -} - extern gfp_t gfp_allowed_mask; static inline void set_gfp_allowed_mask(gfp_t mask) diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h index b80c88dedbbb..81f90a59cda6 100644 --- a/include/linux/gfs2_ondisk.h +++ b/include/linux/gfs2_ondisk.h @@ -81,7 +81,11 @@ struct gfs2_meta_header { __be32 mh_type; __be64 __pad0; /* Was generation number in gfs1 */ __be32 mh_format; - __be32 __pad1; /* Was incarnation number in gfs1 */ + /* This union is to keep userspace happy */ + union { + __be32 mh_jid; /* Was incarnation number in gfs1 */ + __be32 __pad1; + }; }; /* diff --git a/include/linux/gpio.h b/include/linux/gpio.h index e10c49a5b96e..059bd189d35d 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -12,6 +12,8 @@ #include <linux/types.h> #include <linux/errno.h> +struct device; + /* * Some platforms don't support the GPIO programming interface. * @@ -89,6 +91,15 @@ static inline int gpio_export(unsigned gpio, bool direction_may_change) return -EINVAL; } +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + /* GPIO can never have been exported */ + WARN_ON(1); + return -EINVAL; +} + + static inline void gpio_unexport(unsigned gpio) { /* GPIO can never have been exported */ diff --git a/include/linux/hid.h b/include/linux/hid.h index a0ebdace7baa..10f628416740 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -494,6 +494,7 @@ struct hid_device { /* device report descriptor */ /* hiddev event handler */ int (*hiddev_connect)(struct hid_device *, unsigned int); + void (*hiddev_disconnect)(struct hid_device *); void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field, struct hid_usage *, __s32); void (*hiddev_report_event) (struct hid_device *, struct hid_report *); @@ -691,6 +692,7 @@ struct hid_device *hid_allocate_device(void); int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); +void hid_disconnect(struct hid_device *hid); /** * hid_map_usage - map usage input bits @@ -800,6 +802,7 @@ static inline int __must_check hid_hw_start(struct hid_device *hdev, */ static inline void hid_hw_stop(struct hid_device *hdev) { + hid_disconnect(hdev); hdev->ll_driver->stop(hdev); } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 5cbc620bdfe0..41a59afc70fa 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -3,15 +3,15 @@ #include <linux/fs.h> +struct ctl_table; +struct user_struct; + #ifdef CONFIG_HUGETLB_PAGE #include <linux/mempolicy.h> #include <linux/shm.h> #include <asm/tlbflush.h> -struct ctl_table; -struct user_struct; - int PageHuge(struct page *page); static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) @@ -20,11 +20,13 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) } void reset_vma_resv_huge_pages(struct vm_area_struct *vma); -int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); -int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); -int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); +int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); -int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int); +int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, + struct page **, struct vm_area_struct **, + unsigned long *, int *, int, unsigned int flags); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *); void __unmap_hugepage_range(struct vm_area_struct *, @@ -110,6 +112,21 @@ static inline void hugetlb_report_meminfo(struct seq_file *m) #endif /* !CONFIG_HUGETLB_PAGE */ +#define HUGETLB_ANON_FILE "anon_hugepage" + +enum { + /* + * The file will be used as an shm file so shmfs accounting rules + * apply + */ + HUGETLB_SHMFS_INODE = 1, + /* + * The file is being created on the internal vfs mount and shmfs + * accounting rules do not apply + */ + HUGETLB_ANONHUGE_INODE = 2, +}; + #ifdef CONFIG_HUGETLBFS struct hugetlbfs_config { uid_t uid; @@ -146,9 +163,9 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) } extern const struct file_operations hugetlbfs_file_operations; -extern struct vm_operations_struct hugetlb_vm_ops; +extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, int acct, - struct user_struct **user); + struct user_struct **user, int creat_flags); int hugetlb_get_quota(struct address_space *mapping, long delta); void hugetlb_put_quota(struct address_space *mapping, long delta); @@ -170,7 +187,11 @@ static inline void set_file_hugepages(struct file *file) #define is_file_hugepages(file) 0 #define set_file_hugepages(file) BUG() -#define hugetlb_file_setup(name,size,acct,user) ERR_PTR(-ENOSYS) +static inline struct file *hugetlb_file_setup(const char *name, size_t size, + int acctflag, struct user_struct **user, int creat_flags) +{ + return ERR_PTR(-ENOSYS); +} #endif /* !CONFIG_HUGETLBFS */ @@ -185,7 +206,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, #define HSTATE_NAME_LEN 32 /* Defines one hugetlb page size */ struct hstate { - int hugetlb_next_nid; + int next_nid_to_alloc; + int next_nid_to_free; unsigned int order; unsigned long mask; unsigned long max_huge_pages; diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index c9087de5c6c6..e844a0b18695 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -28,17 +28,6 @@ identify a legacy client. If you don't need them, just don't set them. */ /* - * ---- Driver types ----------------------------------------------------- - */ - -#define I2C_DRIVERID_MSP3400 1 -#define I2C_DRIVERID_TUNER 2 -#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ -#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ -#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ -#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ - -/* * ---- Adapter types ---------------------------------------------------- */ diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h index f13255e06406..9eb07bbc6522 100644 --- a/include/linux/i2c-pnx.h +++ b/include/linux/i2c-pnx.h @@ -21,7 +21,7 @@ struct i2c_pnx_mif { int mode; /* Interface mode */ struct completion complete; /* I/O completion */ struct timer_list timer; /* Timeout */ - char * buf; /* Data buffer */ + u8 * buf; /* Data buffer */ int len; /* Length of data buffer */ }; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index f4784c0fe975..7b40cda57a70 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -98,7 +98,6 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, /** * struct i2c_driver - represent an I2C device driver - * @id: Unique driver ID (optional) * @class: What kind of i2c device we instantiate (for detect) * @attach_adapter: Callback for bus addition (for legacy drivers) * @detach_adapter: Callback for bus removal (for legacy drivers) @@ -135,7 +134,6 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, * not allowed. */ struct i2c_driver { - int id; unsigned int class; /* Notifies the driver that a new bus has appeared or is about to be @@ -363,6 +361,24 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) dev_set_drvdata(&dev->dev, data); } +/** + * i2c_lock_adapter - Prevent access to an I2C bus segment + * @adapter: Target I2C bus segment + */ +static inline void i2c_lock_adapter(struct i2c_adapter *adapter) +{ + mutex_lock(&adapter->bus_lock); +} + +/** + * i2c_unlock_adapter - Reauthorize access to an I2C bus segment + * @adapter: Target I2C bus segment + */ +static inline void i2c_unlock_adapter(struct i2c_adapter *adapter) +{ + mutex_unlock(&adapter->bus_lock); +} + /*flags for the client struct: */ #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h new file mode 100644 index 000000000000..fc5db826b48e --- /dev/null +++ b/include/linux/i2c/adp5588.h @@ -0,0 +1,92 @@ +/* + * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller + * + * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _ADP5588_H +#define _ADP5588_H + +#define DEV_ID 0x00 /* Device ID */ +#define CFG 0x01 /* Configuration Register1 */ +#define INT_STAT 0x02 /* Interrupt Status Register */ +#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */ +#define Key_EVENTA 0x04 /* Key Event Register A */ +#define Key_EVENTB 0x05 /* Key Event Register B */ +#define Key_EVENTC 0x06 /* Key Event Register C */ +#define Key_EVENTD 0x07 /* Key Event Register D */ +#define Key_EVENTE 0x08 /* Key Event Register E */ +#define Key_EVENTF 0x09 /* Key Event Register F */ +#define Key_EVENTG 0x0A /* Key Event Register G */ +#define Key_EVENTH 0x0B /* Key Event Register H */ +#define Key_EVENTI 0x0C /* Key Event Register I */ +#define Key_EVENTJ 0x0D /* Key Event Register J */ +#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */ +#define UNLOCK1 0x0F /* Unlock Key1 */ +#define UNLOCK2 0x10 /* Unlock Key2 */ +#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */ +#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */ +#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */ +#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */ +#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */ +#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */ +#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */ +#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */ +#define GPI_EM1 0x20 /* GPI Event Mode 1 */ +#define GPI_EM2 0x21 /* GPI Event Mode 2 */ +#define GPI_EM3 0x22 /* GPI Event Mode 3 */ +#define GPIO_DIR1 0x23 /* GPIO Data Direction */ +#define GPIO_DIR2 0x24 /* GPIO Data Direction */ +#define GPIO_DIR3 0x25 /* GPIO Data Direction */ +#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */ +#define Debounce_DIS1 0x29 /* Debounce Disable */ +#define Debounce_DIS2 0x2A /* Debounce Disable */ +#define Debounce_DIS3 0x2B /* Debounce Disable */ +#define GPIO_PULL1 0x2C /* GPIO Pull Disable */ +#define GPIO_PULL2 0x2D /* GPIO Pull Disable */ +#define GPIO_PULL3 0x2E /* GPIO Pull Disable */ +#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */ +#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */ +#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */ +#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */ +#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */ +#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */ +#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */ +#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */ +#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */ +#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */ +#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */ +#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */ +#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */ +#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */ +#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */ + +#define ADP5588_DEVICE_ID_MASK 0xF + +/* Put one of these structures in i2c_board_info platform_data */ + +#define ADP5588_KEYMAPSIZE 80 + +struct adp5588_kpad_platform_data { + int rows; /* Number of rows */ + int cols; /* Number of columns */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + unsigned repeat:1; /* Enable key repeat */ + unsigned en_keylock:1; /* Enable Key Lock feature */ + unsigned short unlock_key1; /* Unlock Key 1 */ + unsigned short unlock_key2; /* Unlock Key 2 */ +}; + +#endif diff --git a/include/linux/i2c/mcs5000_ts.h b/include/linux/i2c/mcs5000_ts.h new file mode 100644 index 000000000000..5a117b5ca15e --- /dev/null +++ b/include/linux/i2c/mcs5000_ts.h @@ -0,0 +1,24 @@ +/* + * mcs5000_ts.h + * + * Copyright (C) 2009 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim <jy0922.shim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MCS5000_TS_H +#define __LINUX_MCS5000_TS_H + +/* platform data for the MELFAS MCS-5000 touchscreen driver */ +struct mcs5000_ts_platform_data { + void (*cfg_pin)(void); + int x_size; + int y_size; +}; + +#endif /* __LINUX_MCS5000_TS_H */ diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index 2d02dfd7076c..508824ee35e6 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -349,11 +349,11 @@ struct twl4030_madc_platform_data { int irq_line; }; -/* Boards have uniqe mappings of {col, row} --> keycode. - * Column and row are 4 bits, but range only from 0..7. +/* Boards have uniqe mappings of {row, col} --> keycode. + * Column and row are 8 bits each, but range only from 0..7. * a PERSISTENT_KEY is "always on" and never reported. */ -#define PERSISTENT_KEY(c, r) KEY((c), (r), KEY_RESERVED) +#define PERSISTENT_KEY(r, c) KEY((r), (c), KEY_RESERVED) struct twl4030_keypad_data { const struct matrix_keymap_data *keymap_data; diff --git a/include/linux/i8042.h b/include/linux/i8042.h index 7907a72403ee..60c3360ef6ad 100644 --- a/include/linux/i8042.h +++ b/include/linux/i8042.h @@ -7,6 +7,7 @@ * the Free Software Foundation. */ +#include <linux/types.h> /* * Standard commands. @@ -30,6 +31,35 @@ #define I8042_CMD_MUX_PFX 0x0090 #define I8042_CMD_MUX_SEND 0x1090 +struct serio; + +#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE) + +void i8042_lock_chip(void); +void i8042_unlock_chip(void); int i8042_command(unsigned char *param, int command); +bool i8042_check_port_owner(const struct serio *); + +#else + +void i8042_lock_chip(void) +{ +} + +void i8042_unlock_chip(void) +{ +} + +int i8042_command(unsigned char *param, int command) +{ + return -ENOSYS; +} + +bool i8042_check_port_owner(const struct serio *serio) +{ + return false; +} + +#endif #endif diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index 5eb9b0f857e0..5a9aae4adb44 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h @@ -44,7 +44,7 @@ struct ip_tunnel_prl { __u16 flags; __u16 __reserved; __u32 datalen; - __u32 rs_delay; + __u32 __reserved2; /* data follows */ }; diff --git a/include/linux/init.h b/include/linux/init.h index 400adbb45414..ff8bde520d03 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -271,6 +271,7 @@ void __init parse_early_options(char *cmdline); #else /* MODULE */ /* Don't use these in modules, but some people do... */ +#define early_initcall(fn) module_init(fn) #define core_initcall(fn) module_init(fn) #define postcore_initcall(fn) module_init(fn) #define arch_initcall(fn) module_init(fn) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 9e7f2e8fc66e..8d10aa7fd4c9 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -83,16 +83,12 @@ extern struct group_info init_groups; #define INIT_IDS #endif -#ifdef CONFIG_SECURITY_FILE_CAPABILITIES /* * Because of the reduced scope of CAP_SETPCAP when filesystem * capabilities are in effect, it is safe to allow CAP_SETPCAP to * be available in the default configuration. */ # define CAP_INIT_BSET CAP_FULL_SET -#else -# define CAP_INIT_BSET CAP_INIT_EFF_SET -#endif #ifdef CONFIG_TREE_PREEMPT_RCU #define INIT_TASK_RCU_PREEMPT(tsk) \ @@ -106,13 +102,13 @@ extern struct group_info init_groups; extern struct cred init_cred; -#ifdef CONFIG_PERF_COUNTERS -# define INIT_PERF_COUNTERS(tsk) \ - .perf_counter_mutex = \ - __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ - .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), +#ifdef CONFIG_PERF_EVENTS +# define INIT_PERF_EVENTS(tsk) \ + .perf_event_mutex = \ + __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ + .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), #else -# define INIT_PERF_COUNTERS(tsk) +# define INIT_PERF_EVENTS(tsk) #endif /* @@ -178,7 +174,7 @@ extern struct cred init_cred; }, \ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ INIT_IDS \ - INIT_PERF_COUNTERS(tsk) \ + INIT_PERF_EVENTS(tsk) \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ INIT_FTRACE_GRAPH \ diff --git a/include/linux/input.h b/include/linux/input.h index 8b3bc3e0d146..c2b1a7d244d9 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1123,7 +1123,7 @@ struct input_dev { struct mutex mutex; unsigned int users; - int going_away; + bool going_away; struct device dev; @@ -1377,6 +1377,10 @@ extern struct class input_class; * methods; erase() is optional. set_gain() and set_autocenter() need * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER * bits. + * + * Note that playback(), set_gain() and set_autocenter() are called with + * dev->event_lock spinlock held and interrupts off and thus may not + * sleep. */ struct ff_device { int (*upload)(struct input_dev *dev, struct ff_effect *effect, diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 482dc91fd53a..4f0a72a9740c 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); +extern int dmar_ir_support(void); + #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 8e9e151f811e..7ca72b74eec7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -10,7 +10,6 @@ #include <linux/irqreturn.h> #include <linux/irqnr.h> #include <linux/hardirq.h> -#include <linux/sched.h> #include <linux/irqflags.h> #include <linux/smp.h> #include <linux/percpu.h> @@ -84,7 +83,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * struct irqaction - per interrupt action descriptor * @handler: interrupt handler function * @flags: flags (see IRQF_* above) - * @mask: no comment as it is useless and about to be removed * @name: name of the device * @dev_id: cookie to identify the device * @next: pointer to the next irqaction for shared interrupts @@ -97,7 +95,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); struct irqaction { irq_handler_t handler; unsigned long flags; - cpumask_t mask; const char *name; void *dev_id; struct irqaction *next; @@ -612,6 +609,7 @@ extern void debug_poll_all_shared_irqs(void); static inline void debug_poll_all_shared_irqs(void) { } #endif +struct seq_file; int show_interrupts(struct seq_file *p, void *v); struct irq_desc; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 786e7b8cece9..83aa81297ea3 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -184,5 +184,9 @@ extern void __devm_release_region(struct device *dev, struct resource *parent, extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); extern int iomem_is_exclusive(u64 addr); +extern int +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)); + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 228f6c94b69c..76a0759e88ec 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -28,7 +28,6 @@ struct iova { /* holds all the iova translations for a domain */ struct iova_domain { - spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ struct rb_root rbroot; /* iova domain rbtree root */ struct rb_node *cached32_node; /* Save last alloced node */ diff --git a/include/linux/ipc.h b/include/linux/ipc.h index b8826107b518..3b1594d662b0 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -78,8 +78,6 @@ struct ipc_kludge { #define IPCCALL(version,op) ((version)<<16 | (op)) #ifdef __KERNEL__ - -#include <linux/kref.h> #include <linux/spinlock.h> #define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */ diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h index 4c218ee7587a..8687a7dc0632 100644 --- a/include/linux/isdn_ppp.h +++ b/include/linux/isdn_ppp.h @@ -157,7 +157,7 @@ typedef struct { typedef struct { int mp_mrru; /* unused */ - struct sk_buff_head frags; /* fragments sl list */ + struct sk_buff * frags; /* fragments sl list -- use skb->next */ long frames; /* number of frames in the frame list */ unsigned int seq; /* last processed packet seq #: any packets * with smaller seq # will be dropped diff --git a/include/linux/jbd.h b/include/linux/jbd.h index a1187a0c99b4..331530cd3cc6 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -556,7 +556,7 @@ struct transaction_s * This transaction is being forced and some process is * waiting for it to finish. */ - int t_synchronous_commit:1; + unsigned int t_synchronous_commit:1; }; /** diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 52695d3dfd0b..f1011f7f3d41 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -464,9 +464,9 @@ struct handle_s */ struct transaction_chp_stats_s { unsigned long cs_chp_time; - unsigned long cs_forced_to_close; - unsigned long cs_written; - unsigned long cs_dropped; + __u32 cs_forced_to_close; + __u32 cs_written; + __u32 cs_dropped; }; /* The transaction_t type is the guts of the journaling mechanism. It @@ -668,23 +668,16 @@ struct transaction_run_stats_s { unsigned long rs_flushing; unsigned long rs_logging; - unsigned long rs_handle_count; - unsigned long rs_blocks; - unsigned long rs_blocks_logged; + __u32 rs_handle_count; + __u32 rs_blocks; + __u32 rs_blocks_logged; }; struct transaction_stats_s { - int ts_type; unsigned long ts_tid; - union { - struct transaction_run_stats_s run; - struct transaction_chp_stats_s chp; - } u; + struct transaction_run_stats_s run; }; -#define JBD2_STATS_RUN 1 -#define JBD2_STATS_CHECKPOINT 2 - static inline unsigned long jbd2_time_diff(unsigned long start, unsigned long end) { @@ -988,12 +981,6 @@ struct journal_s /* * Journal statistics */ - struct transaction_stats_s *j_history; - int j_history_max; - int j_history_cur; - /* - * Protect the transactions statistics history - */ spinlock_t j_history_lock; struct proc_dir_entry *j_proc_entry; struct transaction_stats_s j_stats; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 21d0d822c716..3fa4c590cf12 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -145,7 +145,7 @@ extern int _cond_resched(void); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) #define abs(x) ({ \ - int __x = (x); \ + long __x = (x); \ (__x < 0) ? -__x : __x; \ }) @@ -245,14 +245,16 @@ extern int __printk_ratelimit(const char *func); extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); +extern int printk_delay_msec; + /* * Print a one-time message (analogous to WARN_ONCE() et al): */ #define printk_once(x...) ({ \ - static int __print_once = 1; \ + static bool __print_once = true; \ \ if (__print_once) { \ - __print_once = 0; \ + __print_once = false; \ printk(x); \ } \ }) @@ -656,6 +658,12 @@ extern int do_sysinfo(struct sysinfo *info); #endif /* __KERNEL__ */ +#ifndef __EXPORTED_HEADERS__ +#ifndef __KERNEL__ +#warning Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders +#endif /* __KERNEL__ */ +#endif /* __EXPORTED_HEADERS__ */ + #define SI_LOAD_SHIFT 16 struct sysinfo { long uptime; /* Seconds since boot */ @@ -675,13 +683,17 @@ struct sysinfo { }; /* Force a compilation error if condition is true */ -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition)) + +/* Force a compilation error if condition is constant and true */ +#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)])) /* Force a compilation error if condition is true, but also produce a result (of value 0 and type size_t), so the expression can be used e.g. in a structure initializer (or where-ever else comma expressions aren't permitted). */ -#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) +#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) /* Trap pasters of __FUNCTION__ at compile-time */ #define __FUNCTION__ (__func__) diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index dc2fd545db00..e880d4cf9e22 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -144,10 +144,15 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) int name##_end[0]; #define kmemcheck_annotate_bitfield(ptr, name) \ - do if (ptr) { \ - int _n = (long) &((ptr)->name##_end) \ + do { \ + int _n; \ + \ + if (!ptr) \ + break; \ + \ + _n = (long) &((ptr)->name##_end) \ - (long) &((ptr)->name##_begin); \ - BUILD_BUG_ON(_n < 0); \ + MAYBE_BUILD_BUG_ON(_n < 0); \ \ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ } while (0) diff --git a/include/linux/kref.h b/include/linux/kref.h index 0cef6badd6fb..b0cb0ebad9e6 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -16,7 +16,6 @@ #define _KREF_H_ #include <linux/types.h> -#include <asm/atomic.h> struct kref { atomic_t refcount; diff --git a/include/linux/ksm.h b/include/linux/ksm.h new file mode 100644 index 000000000000..a485c14ecd5d --- /dev/null +++ b/include/linux/ksm.h @@ -0,0 +1,79 @@ +#ifndef __LINUX_KSM_H +#define __LINUX_KSM_H +/* + * Memory merging support. + * + * This code enables dynamic sharing of identical pages found in different + * memory areas, even if they are not shared by fork(). + */ + +#include <linux/bitops.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/vmstat.h> + +#ifdef CONFIG_KSM +int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags); +int __ksm_enter(struct mm_struct *mm); +void __ksm_exit(struct mm_struct *mm); + +static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) + return __ksm_enter(mm); + return 0; +} + +static inline void ksm_exit(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) + __ksm_exit(mm); +} + +/* + * A KSM page is one of those write-protected "shared pages" or "merged pages" + * which KSM maps into multiple mms, wherever identical anonymous page content + * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. + */ +static inline int PageKsm(struct page *page) +{ + return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); +} + +/* + * But we have to avoid the checking which page_add_anon_rmap() performs. + */ +static inline void page_add_ksm_rmap(struct page *page) +{ + if (atomic_inc_and_test(&page->_mapcount)) { + page->mapping = (void *) PAGE_MAPPING_ANON; + __inc_zone_page_state(page, NR_ANON_PAGES); + } +} +#else /* !CONFIG_KSM */ + +static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags) +{ + return 0; +} + +static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + return 0; +} + +static inline void ksm_exit(struct mm_struct *mm) +{ +} + +static inline int PageKsm(struct page *page) +{ + return 0; +} + +/* No stub required for page_add_ksm_rmap(page) */ +#endif /* !CONFIG_KSM */ + +#endif diff --git a/include/linux/libata.h b/include/linux/libata.h index 76319bf03e37..87698640c091 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -418,6 +418,17 @@ enum { ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | ATA_TIMING_DMACK_HOLD | ATA_TIMING_CYCLE | ATA_TIMING_UDMA, + + /* ACPI constants */ + ATA_ACPI_FILTER_SETXFER = 1 << 0, + ATA_ACPI_FILTER_LOCK = 1 << 1, + ATA_ACPI_FILTER_DIPM = 1 << 2, + ATA_ACPI_FILTER_FPDMA_OFFSET = 1 << 3, /* FPDMA non-zero offset */ + ATA_ACPI_FILTER_FPDMA_AA = 1 << 4, /* FPDMA auto activate */ + + ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER | + ATA_ACPI_FILTER_LOCK | + ATA_ACPI_FILTER_DIPM, }; enum ata_xfer_mask { @@ -587,6 +598,7 @@ struct ata_device { #ifdef CONFIG_ATA_ACPI acpi_handle acpi_handle; union acpi_object *gtf_cache; + unsigned int gtf_filter; #endif /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ u64 n_sectors; /* size of device, if ATA */ diff --git a/include/linux/libps2.h b/include/linux/libps2.h index fcf5fbe6a50c..79603a6c356f 100644 --- a/include/linux/libps2.h +++ b/include/linux/libps2.h @@ -44,6 +44,8 @@ struct ps2dev { void ps2_init(struct ps2dev *ps2dev, struct serio *serio); int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout); void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout); +void ps2_begin_command(struct ps2dev *ps2dev); +void ps2_end_command(struct ps2dev *ps2dev); int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data); diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 691f59171c6c..5126cceb6ae9 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -57,6 +57,7 @@ #ifdef __ASSEMBLY__ +#ifndef LINKER_SCRIPT #define ALIGN __ALIGN #define ALIGN_STR __ALIGN_STR @@ -66,6 +67,7 @@ ALIGN; \ name: #endif +#endif /* LINKER_SCRIPT */ #ifndef WEAK #define WEAK(name) \ diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index ad651f4e45ac..3cc2f2c53e4c 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h @@ -32,8 +32,17 @@ struct lis3lv02d_platform_data { #define LIS3_IRQ2_DATA_READY (4 << 3) #define LIS3_IRQ2_CLICK (7 << 3) #define LIS3_IRQ_OPEN_DRAIN (1 << 6) -#define LIS3_IRQ_ACTIVE_HIGH (1 << 7) +#define LIS3_IRQ_ACTIVE_LOW (1 << 7) unsigned char irq_cfg; + +#define LIS3_WAKEUP_X_LO (1 << 0) +#define LIS3_WAKEUP_X_HI (1 << 1) +#define LIS3_WAKEUP_Y_LO (1 << 2) +#define LIS3_WAKEUP_Y_HI (1 << 3) +#define LIS3_WAKEUP_Z_LO (1 << 4) +#define LIS3_WAKEUP_Z_HI (1 << 5) + unsigned char wakeup_flags; + unsigned char wakeup_thresh; }; #endif /* __LIS3LV02D_H_ */ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c325b187966b..a34dea46b629 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -338,49 +338,6 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) } } -static inline int __nlm_cmp_addr4(const struct sockaddr *sap1, - const struct sockaddr *sap2) -{ - const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; - const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; - return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; -} - -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, - const struct sockaddr *sap2) -{ - const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; - const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; - return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); -} -#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ -static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, - const struct sockaddr *sap2) -{ - return 0; -} -#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ - -/* - * Compare two host addresses - * - * Return TRUE if the addresses are the same; otherwise FALSE. - */ -static inline int nlm_cmp_addr(const struct sockaddr *sap1, - const struct sockaddr *sap2) -{ - if (sap1->sa_family == sap2->sa_family) { - switch (sap1->sa_family) { - case AF_INET: - return __nlm_cmp_addr4(sap1, sap2); - case AF_INET6: - return __nlm_cmp_addr6(sap1, sap2); - } - } - return 0; -} - /* * Compare two NLM locks. * When the second lock is of type F_UNLCK, this acts like a wildcard. @@ -395,7 +352,7 @@ static inline int nlm_compare_locks(const struct file_lock *fl1, &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK); } -extern struct lock_manager_operations nlmsvc_lock_operations; +extern const struct lock_manager_operations nlmsvc_lock_operations; #endif /* __KERNEL__ */ diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 190c37854870..f78f83d7663f 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -26,14 +26,15 @@ /* Auxiliary data to use in generating the audit record. */ struct common_audit_data { - char type; -#define LSM_AUDIT_DATA_FS 1 -#define LSM_AUDIT_DATA_NET 2 -#define LSM_AUDIT_DATA_CAP 3 -#define LSM_AUDIT_DATA_IPC 4 -#define LSM_AUDIT_DATA_TASK 5 -#define LSM_AUDIT_DATA_KEY 6 -#define LSM_AUDIT_NO_AUDIT 7 + char type; +#define LSM_AUDIT_DATA_FS 1 +#define LSM_AUDIT_DATA_NET 2 +#define LSM_AUDIT_DATA_CAP 3 +#define LSM_AUDIT_DATA_IPC 4 +#define LSM_AUDIT_DATA_TASK 5 +#define LSM_AUDIT_DATA_KEY 6 +#define LSM_AUDIT_NO_AUDIT 7 +#define LSM_AUDIT_DATA_KMOD 8 struct task_struct *tsk; union { struct { @@ -66,6 +67,7 @@ struct common_audit_data { char *key_desc; } key_struct; #endif + char *kmod_name; } u; /* this union contains LSM specific data */ union { diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 536ca12442ca..78c3bed1c3f5 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -104,7 +104,7 @@ #define DL_UNITDATA_IND 0x3108 #define DL_INFORMATION_IND 0x0008 -/* intern layer 2 managment */ +/* intern layer 2 management */ #define MDL_ASSIGN_REQ 0x1804 #define MDL_ASSIGN_IND 0x1904 #define MDL_REMOVE_REQ 0x1A04 diff --git a/include/linux/magic.h b/include/linux/magic.h index 1923327b9869..76285e01b39e 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h @@ -12,7 +12,9 @@ #define SYSFS_MAGIC 0x62656572 #define SECURITYFS_MAGIC 0x73636673 #define SELINUX_MAGIC 0xf97cff8c +#define RAMFS_MAGIC 0x858458f6 /* some random number */ #define TMPFS_MAGIC 0x01021994 +#define HUGETLBFS_MAGIC 0x958458f6 /* some random number */ #define SQUASHFS_MAGIC 0x73717368 #define EFS_SUPER_MAGIC 0x414A53 #define EXT2_SUPER_MAGIC 0xEF53 @@ -53,4 +55,8 @@ #define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA #define STACK_END_MAGIC 0x57AC6E9D + +#define DEVPTS_SUPER_MAGIC 0x1cd1 +#define SOCKFS_MAGIC 0x534F434B + #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e46a0734ab6e..bf9213b2db8f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -118,6 +118,9 @@ static inline bool mem_cgroup_disabled(void) extern bool mem_cgroup_oom_called(struct task_struct *task); void mem_cgroup_update_mapped_file_stat(struct page *page, int val); +unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, + gfp_t gfp_mask, int nid, + int zid); #else /* CONFIG_CGROUP_MEM_RES_CTLR */ struct mem_cgroup; @@ -276,6 +279,13 @@ static inline void mem_cgroup_update_mapped_file_stat(struct page *page, { } +static inline +unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, + gfp_t gfp_mask, int nid, int zid) +{ + return 0; +} + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index d95f72e79b82..fed969281a41 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -191,14 +191,6 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) #endif /* ! CONFIG_MEMORY_HOTPLUG */ -/* - * Walk through all memory which is registered as resource. - * arg is (start_pfn, nr_pages, private_arg_pointer) - */ -extern int walk_memory_resource(unsigned long start_pfn, - unsigned long nr_pages, void *arg, - int (*func)(unsigned long, unsigned long, void *)); - #ifdef CONFIG_MEMORY_HOTREMOVE extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 9be484d11283..7c08052e3321 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -47,22 +47,16 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) } /* - * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree - * the amount of memory specified by pool_data + * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the + * amount of memory specified by pool_data */ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); -void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data); void mempool_kfree(void *element, void *pool_data); static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) { return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, (void *) size); } -static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size) -{ - return mempool_create(min_nr, mempool_kzalloc, mempool_kfree, - (void *) size); -} /* * A mempool_alloc_t and mempool_free_t for a simple page allocator that diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h index 115dbe965082..c63b65c94429 100644 --- a/include/linux/mfd/da903x.h +++ b/include/linux/mfd/da903x.h @@ -1,7 +1,7 @@ #ifndef __LINUX_PMIC_DA903X_H #define __LINUX_PMIC_DA903X_H -/* Unified sub device IDs for DA9030/DA9034 */ +/* Unified sub device IDs for DA9030/DA9034/DA9035 */ enum { DA9030_ID_LED_1, DA9030_ID_LED_2, @@ -57,6 +57,8 @@ enum { DA9034_ID_LDO13, DA9034_ID_LDO14, DA9034_ID_LDO15, + + DA9035_ID_BUCK3, }; /* diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h new file mode 100644 index 000000000000..b18cbb027bc3 --- /dev/null +++ b/include/linux/mfd/wm831x/pmu.h @@ -0,0 +1,189 @@ +/* + * include/linux/mfd/wm831x/pmu.h -- PMU for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_PMU_H__ +#define __MFD_WM831X_PMU_H__ + +/* + * R16387 (0x4003) - Power State + */ +#define WM831X_CHIP_ON 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_MASK 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_SHIFT 15 /* CHIP_ON */ +#define WM831X_CHIP_ON_WIDTH 1 /* CHIP_ON */ +#define WM831X_CHIP_SLP 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_MASK 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_SHIFT 14 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_WIDTH 1 /* CHIP_SLP */ +#define WM831X_REF_LP 0x1000 /* REF_LP */ +#define WM831X_REF_LP_MASK 0x1000 /* REF_LP */ +#define WM831X_REF_LP_SHIFT 12 /* REF_LP */ +#define WM831X_REF_LP_WIDTH 1 /* REF_LP */ +#define WM831X_PWRSTATE_DLY_MASK 0x0C00 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_SHIFT 10 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_WIDTH 2 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_SWRST_DLY 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_MASK 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_SHIFT 9 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_WIDTH 1 /* SWRST_DLY */ +#define WM831X_USB100MA_STARTUP_MASK 0x0030 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_SHIFT 4 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_WIDTH 2 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB_CURR_STS 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_MASK 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_SHIFT 3 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_WIDTH 1 /* USB_CURR_STS */ +#define WM831X_USB_ILIM_MASK 0x0007 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_SHIFT 0 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_WIDTH 3 /* USB_ILIM - [2:0] */ + +/* + * R16397 (0x400D) - System Status + */ +#define WM831X_THW_STS 0x8000 /* THW_STS */ +#define WM831X_THW_STS_MASK 0x8000 /* THW_STS */ +#define WM831X_THW_STS_SHIFT 15 /* THW_STS */ +#define WM831X_THW_STS_WIDTH 1 /* THW_STS */ +#define WM831X_PWR_SRC_BATT 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_MASK 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_SHIFT 10 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_WIDTH 1 /* PWR_SRC_BATT */ +#define WM831X_PWR_WALL 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_MASK 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_SHIFT 9 /* PWR_WALL */ +#define WM831X_PWR_WALL_WIDTH 1 /* PWR_WALL */ +#define WM831X_PWR_USB 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_MASK 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_SHIFT 8 /* PWR_USB */ +#define WM831X_PWR_USB_WIDTH 1 /* PWR_USB */ +#define WM831X_MAIN_STATE_MASK 0x001F /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_SHIFT 0 /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_WIDTH 5 /* MAIN_STATE - [4:0] */ + +/* + * R16456 (0x4048) - Charger Control 1 + */ +#define WM831X_CHG_ENA 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_MASK 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_SHIFT 15 /* CHG_ENA */ +#define WM831X_CHG_ENA_WIDTH 1 /* CHG_ENA */ +#define WM831X_CHG_FRC 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_MASK 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_SHIFT 14 /* CHG_FRC */ +#define WM831X_CHG_FRC_WIDTH 1 /* CHG_FRC */ +#define WM831X_CHG_ITERM_MASK 0x1C00 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_SHIFT 10 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_WIDTH 3 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_FAST 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_MASK 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_SHIFT 5 /* CHG_FAST */ +#define WM831X_CHG_FAST_WIDTH 1 /* CHG_FAST */ +#define WM831X_CHG_IMON_ENA 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_MASK 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_SHIFT 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_WIDTH 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_CHIP_TEMP_MON 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_MASK 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_SHIFT 0 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_WIDTH 1 /* CHG_CHIP_TEMP_MON */ + +/* + * R16457 (0x4049) - Charger Control 2 + */ +#define WM831X_CHG_OFF_MSK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_MASK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_SHIFT 14 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_WIDTH 1 /* CHG_OFF_MSK */ +#define WM831X_CHG_TIME_MASK 0x0F00 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_SHIFT 8 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_WIDTH 4 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TRKL_ILIM_MASK 0x00C0 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_SHIFT 6 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_WIDTH 2 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_VSEL_MASK 0x0030 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_SHIFT 4 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_WIDTH 2 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_FAST_ILIM_MASK 0x000F /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_SHIFT 0 /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_WIDTH 4 /* CHG_FAST_ILIM - [3:0] */ + +/* + * R16458 (0x404A) - Charger Status + */ +#define WM831X_BATT_OV_STS 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_MASK 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_SHIFT 15 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_WIDTH 1 /* BATT_OV_STS */ +#define WM831X_CHG_STATE_MASK 0x7000 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_SHIFT 12 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_WIDTH 3 /* CHG_STATE - [14:12] */ +#define WM831X_BATT_HOT_STS 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_MASK 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_SHIFT 11 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_WIDTH 1 /* BATT_HOT_STS */ +#define WM831X_BATT_COLD_STS 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_MASK 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_SHIFT 10 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_WIDTH 1 /* BATT_COLD_STS */ +#define WM831X_CHG_TOPOFF 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_MASK 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_SHIFT 9 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_WIDTH 1 /* CHG_TOPOFF */ +#define WM831X_CHG_ACTIVE 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_MASK 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_SHIFT 8 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_WIDTH 1 /* CHG_ACTIVE */ +#define WM831X_CHG_TIME_ELAPSED_MASK 0x00FF /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_SHIFT 0 /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_WIDTH 8 /* CHG_TIME_ELAPSED - [7:0] */ + +#define WM831X_CHG_STATE_OFF (0 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE (1 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST (2 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE_OT (3 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST_OT (4 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_DEFECTIVE (5 << WM831X_CHG_STATE_SHIFT) + +/* + * R16459 (0x404B) - Backup Charger Control + */ +#define WM831X_BKUP_CHG_ENA 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_MASK 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_SHIFT 15 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_WIDTH 1 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_STS 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_MASK 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_SHIFT 14 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_WIDTH 1 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_MODE 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_MASK 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_SHIFT 12 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_WIDTH 1 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_BATT_DET_ENA 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_MASK 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_SHIFT 11 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_WIDTH 1 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_STS 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_MASK 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_SHIFT 10 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_WIDTH 1 /* BKUP_BATT_STS */ +#define WM831X_BKUP_CHG_VLIM 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_MASK 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_SHIFT 4 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_WIDTH 1 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_ILIM_MASK 0x0003 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_SHIFT 0 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_WIDTH 2 /* BKUP_CHG_ILIM - [1:0] */ + +#endif diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h index f95466343fb2..955d30fc6a27 100644 --- a/include/linux/mfd/wm831x/regulator.h +++ b/include/linux/mfd/wm831x/regulator.h @@ -1212,7 +1212,7 @@ #define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ #define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ -#define WM831X_ISINK_MAX_ISEL 56 -extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL]; +#define WM831X_ISINK_MAX_ISEL 55 +extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; #endif diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h new file mode 100644 index 000000000000..6bc090d0e3ac --- /dev/null +++ b/include/linux/mfd/wm831x/status.h @@ -0,0 +1,34 @@ +/* + * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_STATUS_H__ +#define __MFD_WM831X_STATUS_H__ + +#define WM831X_LED_SRC_MASK 0xC000 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_SHIFT 14 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_WIDTH 2 /* LED_SRC - [15:14] */ +#define WM831X_LED_MODE_MASK 0x0300 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_SHIFT 8 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_WIDTH 2 /* LED_MODE - [9:8] */ +#define WM831X_LED_SEQ_LEN_MASK 0x0030 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_SHIFT 4 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_WIDTH 2 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_DUR_MASK 0x000C /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_SHIFT 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_WIDTH 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUTY_CYC_MASK 0x0003 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_SHIFT 0 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_WIDTH 2 /* LED_DUTY_CYC - [1:0] */ + +#endif diff --git a/include/linux/mm.h b/include/linux/mm.h index 9a72cc78e6b8..24c395694f4d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -25,6 +25,7 @@ extern unsigned long max_mapnr; #endif extern unsigned long num_physpages; +extern unsigned long totalram_pages; extern void * high_memory; extern int page_cluster; @@ -103,6 +104,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ +#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS @@ -283,6 +285,14 @@ static inline int is_vmalloc_addr(const void *x) return 0; #endif } +#ifdef CONFIG_MMU +extern int is_vmalloc_or_module_addr(const void *x); +#else +static inline int is_vmalloc_or_module_addr(const void *x) +{ + return 0; +} +#endif static inline struct page *compound_head(struct page *page) { @@ -685,11 +695,12 @@ static inline int page_mapped(struct page *page) #define VM_FAULT_SIGBUS 0x0002 #define VM_FAULT_MAJOR 0x0004 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ +#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */ #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ -#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON) /* * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. @@ -700,17 +711,8 @@ extern void pagefault_out_of_memory(void); extern void show_free_areas(void); -#ifdef CONFIG_SHMEM -extern int shmem_lock(struct file *file, int lock, struct user_struct *user); -#else -static inline int shmem_lock(struct file *file, int lock, - struct user_struct *user) -{ - return 0; -} -#endif +int shmem_lock(struct file *file, int lock, struct user_struct *user); struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); - int shmem_zero_setup(struct vm_area_struct *); #ifndef CONFIG_MMU @@ -790,8 +792,14 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, unmap_mapping_range(mapping, holebegin, holelen, 0); } -extern int vmtruncate(struct inode * inode, loff_t offset); -extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); +extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); +extern int vmtruncate(struct inode *inode, loff_t offset); +extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); + +int truncate_inode_page(struct address_space *mapping, struct page *page); +int generic_error_remove_page(struct address_space *mapping, struct page *page); + +int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, @@ -815,6 +823,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, struct page **pages, struct vm_area_struct **vmas); int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); +struct page *get_dump_page(unsigned long addr); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); @@ -1058,6 +1067,8 @@ extern void setup_per_cpu_pageset(void); static inline void setup_per_cpu_pageset(void) {} #endif +extern void zone_pcp_update(struct zone *zone); + /* nommu.c */ extern atomic_long_t mmap_pages_allocated; @@ -1226,7 +1237,8 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, #define FOLL_WRITE 0x01 /* check pte is writable */ #define FOLL_TOUCH 0x02 /* mark page accessed */ #define FOLL_GET 0x04 /* do get_page on page */ -#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ +#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ +#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); @@ -1274,7 +1286,7 @@ int in_gate_area_no_task(unsigned long addr); #define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) #endif /* __HAVE_ARCH_GATE_AREA */ -int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, +int drop_caches_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages); @@ -1303,5 +1315,12 @@ void vmemmap_populate_print_last(void); extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, size_t size); extern void refund_locked_memory(struct mm_struct *mm, size_t size); + +extern void memory_failure(unsigned long pfn, int trapno); +extern int __memory_failure(unsigned long pfn, int trapno, int ref); +extern int sysctl_memory_failure_early_kill; +extern int sysctl_memory_failure_recovery; +extern atomic_long_t mce_bad_pages; + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 7fbb97267556..8835b877b8db 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -5,7 +5,7 @@ * page_is_file_cache - should the page be on a file LRU or anon LRU? * @page: the page to test * - * Returns LRU_FILE if @page is page cache page backed by a regular filesystem, + * Returns 1 if @page is page cache page backed by a regular filesystem, * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. * Used by functions that manipulate the LRU lists, to sort a page * onto the right LRU list. @@ -16,11 +16,7 @@ */ static inline int page_is_file_cache(struct page *page) { - if (PageSwapBacked(page)) - return 0; - - /* The page is page cache backed by a normal filesystem. */ - return LRU_FILE; + return !PageSwapBacked(page); } static inline void @@ -39,21 +35,36 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) mem_cgroup_del_lru_list(page, l); } +/** + * page_lru_base_type - which LRU list type should a page be on? + * @page: the page to test + * + * Used for LRU list index arithmetic. + * + * Returns the base LRU type - file or anon - @page should be on. + */ +static inline enum lru_list page_lru_base_type(struct page *page) +{ + if (page_is_file_cache(page)) + return LRU_INACTIVE_FILE; + return LRU_INACTIVE_ANON; +} + static inline void del_page_from_lru(struct zone *zone, struct page *page) { - enum lru_list l = LRU_BASE; + enum lru_list l; list_del(&page->lru); if (PageUnevictable(page)) { __ClearPageUnevictable(page); l = LRU_UNEVICTABLE; } else { + l = page_lru_base_type(page); if (PageActive(page)) { __ClearPageActive(page); l += LRU_ACTIVE; } - l += page_is_file_cache(page); } __dec_zone_state(zone, NR_LRU_BASE + l); mem_cgroup_del_lru_list(page, l); @@ -68,14 +79,14 @@ del_page_from_lru(struct zone *zone, struct page *page) */ static inline enum lru_list page_lru(struct page *page) { - enum lru_list lru = LRU_BASE; + enum lru_list lru; if (PageUnevictable(page)) lru = LRU_UNEVICTABLE; else { + lru = page_lru_base_type(page); if (PageActive(page)) lru += LRU_ACTIVE; - lru += page_is_file_cache(page); } return lru; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0042090a4d70..84a524afb3dc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -171,7 +171,7 @@ struct vm_area_struct { struct anon_vma *anon_vma; /* Serialized by page_table_lock */ /* Function pointers to deal with this struct. */ - struct vm_operations_struct * vm_ops; + const struct vm_operations_struct *vm_ops; /* Information about our backing store: */ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE @@ -240,6 +240,8 @@ struct mm_struct { unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ + struct linux_binfmt *binfmt; + cpumask_t cpu_vm_mask; /* Architecture-specific MM context */ @@ -259,11 +261,10 @@ struct mm_struct { unsigned long flags; /* Must use atomic bitops to access the bits */ struct core_state *core_state; /* coredumping support */ - - /* aio bits */ +#ifdef CONFIG_AIO spinlock_t ioctx_lock; struct hlist_head ioctx_list; - +#endif #ifdef CONFIG_MM_OWNER /* * "owner" points to a task that is regarded as the canonical diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 403aa505f27e..2ee22e8af110 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -40,6 +40,8 @@ struct mmc_csd { }; struct mmc_ext_csd { + u8 rev; + unsigned int sa_timeout; /* Units: 100ns */ unsigned int hs_max_dtr; unsigned int sectors; }; @@ -62,7 +64,8 @@ struct sdio_cccr { low_speed:1, wide_bus:1, high_power:1, - high_speed:1; + high_speed:1, + disable_cd:1; }; struct sdio_cis { @@ -94,6 +97,8 @@ struct mmc_card { #define MMC_STATE_READONLY (1<<1) /* card is read-only */ #define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ #define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ + unsigned int quirks; /* card quirks */ +#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ u32 raw_cid[4]; /* raw card CID */ u32 raw_csd[4]; /* raw card CSD */ @@ -129,6 +134,11 @@ struct mmc_card { #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) +static inline int mmc_card_lenient_fn0(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_LENIENT_FN0; +} + #define mmc_card_name(c) ((c)->cid.prod_name) #define mmc_card_id(c) (dev_name(&(c)->dev)) diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 7ac8b500d55c..e4898e9eeb59 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -139,6 +139,7 @@ extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); extern void mmc_release_host(struct mmc_host *host); +extern int mmc_try_claim_host(struct mmc_host *host); /** * mmc_claim_host - exclusively claim a host diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 3e7615e9087e..eaf36364b7d4 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -11,6 +11,7 @@ #define LINUX_MMC_HOST_H #include <linux/leds.h> +#include <linux/sched.h> #include <linux/mmc/core.h> @@ -51,6 +52,35 @@ struct mmc_ios { }; struct mmc_host_ops { + /* + * Hosts that support power saving can use the 'enable' and 'disable' + * methods to exit and enter power saving states. 'enable' is called + * when the host is claimed and 'disable' is called (or scheduled with + * a delay) when the host is released. The 'disable' is scheduled if + * the disable delay set by 'mmc_set_disable_delay()' is non-zero, + * otherwise 'disable' is called immediately. 'disable' may be + * scheduled repeatedly, to permit ever greater power saving at the + * expense of ever greater latency to re-enable. Rescheduling is + * determined by the return value of the 'disable' method. A positive + * value gives the delay in milliseconds. + * + * In the case where a host function (like set_ios) may be called + * with or without the host claimed, enabling and disabling can be + * done directly and will nest correctly. Call 'mmc_host_enable()' and + * 'mmc_host_lazy_disable()' for this purpose, but note that these + * functions must be paired. + * + * Alternatively, 'mmc_host_enable()' may be paired with + * 'mmc_host_disable()' which calls 'disable' immediately. In this + * case the 'disable' method will be called with 'lazy' set to 0. + * This is mainly useful for error paths. + * + * Because lazy disable may be called from a work queue, the 'disable' + * method must claim the host when 'lazy' != 0, which will work + * correctly because recursion is detected and handled. + */ + int (*enable)(struct mmc_host *host); + int (*disable)(struct mmc_host *host, int lazy); void (*request)(struct mmc_host *host, struct mmc_request *req); /* * Avoid calling these three functions too often or in a "fast path", @@ -118,6 +148,9 @@ struct mmc_host { #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ +#define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */ +#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ +#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ @@ -142,9 +175,18 @@ struct mmc_host { unsigned int removed:1; /* host is being removed */ #endif + /* Only used with MMC_CAP_DISABLE */ + int enabled; /* host is enabled */ + int nesting_cnt; /* "enable" nesting count */ + int en_dis_recurs; /* detect recursion */ + unsigned int disable_delay; /* disable delay in msecs */ + struct delayed_work disable; /* disabling work */ + struct mmc_card *card; /* device attached to this host */ wait_queue_head_t wq; + struct task_struct *claimer; /* task that has host claimed */ + int claim_cnt; /* "claim" nesting count */ struct delayed_work detect; @@ -183,6 +225,9 @@ static inline void *mmc_priv(struct mmc_host *host) extern int mmc_suspend_host(struct mmc_host *, pm_message_t); extern int mmc_resume_host(struct mmc_host *); +extern void mmc_power_save_host(struct mmc_host *host); +extern void mmc_power_restore_host(struct mmc_host *host); + extern void mmc_detect_change(struct mmc_host *, unsigned long delay); extern void mmc_request_done(struct mmc_host *, struct mmc_request *); @@ -197,5 +242,19 @@ struct regulator; int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit); +int mmc_card_awake(struct mmc_host *host); +int mmc_card_sleep(struct mmc_host *host); +int mmc_card_can_sleep(struct mmc_host *host); + +int mmc_host_enable(struct mmc_host *host); +int mmc_host_disable(struct mmc_host *host); +int mmc_host_lazy_disable(struct mmc_host *host); + +static inline void mmc_set_disable_delay(struct mmc_host *host, + unsigned int disable_delay) +{ + host->disable_delay = disable_delay; +} + #endif diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 14b81f3e5232..c02c8db73701 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -31,6 +31,7 @@ #define MMC_ALL_SEND_CID 2 /* bcr R2 */ #define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */ #define MMC_SET_DSR 4 /* bc [31:16] RCA */ +#define MMC_SLEEP_AWAKE 5 /* ac [31:16] RCA 15:flg R1b */ #define MMC_SWITCH 6 /* ac [31:0] See below R1b */ #define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */ #define MMC_SEND_EXT_CSD 8 /* adtc R1 */ @@ -127,6 +128,7 @@ #define R1_STATUS(x) (x & 0xFFFFE000) #define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ #define R1_READY_FOR_DATA (1 << 8) /* sx, a */ +#define R1_SWITCH_ERROR (1 << 7) /* sx, c */ #define R1_APP_CMD (1 << 5) /* sr, c */ /* @@ -254,6 +256,7 @@ struct _mmc_csd { #define EXT_CSD_CARD_TYPE 196 /* RO */ #define EXT_CSD_REV 192 /* RO */ #define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ +#define EXT_CSD_S_A_TIMEOUT 217 /* * EXT_CSD field definitions diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 451bdfc85830..ac3ab683fec6 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -67,6 +67,7 @@ struct sdio_func { #define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) #define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) +#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) /* * SDIO function device driver @@ -81,6 +82,8 @@ struct sdio_driver { struct device_driver drv; }; +#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) + /** * SDIO_DEVICE - macro used to describe a specific SDIO device * @vend: the 16 bit manufacturer code diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h new file mode 100644 index 000000000000..70fffeba7495 --- /dev/null +++ b/include/linux/mmu_context.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_MMU_CONTEXT_H +#define _LINUX_MMU_CONTEXT_H + +struct mm_struct; + +void use_mm(struct mm_struct *mm); +void unuse_mm(struct mm_struct *mm); + +#endif diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index b77486d152cd..4e02ee2b071e 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -62,6 +62,15 @@ struct mmu_notifier_ops { unsigned long address); /* + * change_pte is called in cases that pte mapping to page is changed: + * for example, when ksm remaps pte to point to a new shared page. + */ + void (*change_pte)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address, + pte_t pte); + + /* * Before this is invoked any secondary MMU is still ok to * read/write to the page previously pointed to by the Linux * pte because the page hasn't been freed yet and it won't be @@ -154,6 +163,8 @@ extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); extern void __mmu_notifier_release(struct mm_struct *mm); extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long address); +extern void __mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte); extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address); extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, @@ -175,6 +186,13 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, return 0; } +static inline void mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_change_pte(mm, address, pte); +} + static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address) { @@ -236,6 +254,16 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) __young; \ }) +#define set_pte_at_notify(__mm, __address, __ptep, __pte) \ +({ \ + struct mm_struct *___mm = __mm; \ + unsigned long ___address = __address; \ + pte_t ___pte = __pte; \ + \ + set_pte_at(___mm, ___address, __ptep, ___pte); \ + mmu_notifier_change_pte(___mm, ___address, ___pte); \ +}) + #else /* CONFIG_MMU_NOTIFIER */ static inline void mmu_notifier_release(struct mm_struct *mm) @@ -248,6 +276,11 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, return 0; } +static inline void mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte) +{ +} + static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address) { @@ -273,6 +306,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define ptep_clear_flush_young_notify ptep_clear_flush_young #define ptep_clear_flush_notify ptep_clear_flush +#define set_pte_at_notify set_pte_at #endif /* CONFIG_MMU_NOTIFIER */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 889598537370..6f7561730d88 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -38,6 +38,7 @@ #define MIGRATE_UNMOVABLE 0 #define MIGRATE_RECLAIMABLE 1 #define MIGRATE_MOVABLE 2 +#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ #define MIGRATE_RESERVE 3 #define MIGRATE_ISOLATE 4 /* can't allocate from here */ #define MIGRATE_TYPES 5 @@ -94,11 +95,15 @@ enum zone_stat_item { NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */ + NR_KERNEL_STACK, + /* Second 128 byte cacheline */ NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_BOUNCE, NR_VMSCAN_WRITE, - /* Second 128 byte cacheline */ NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ + NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ + NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ @@ -165,7 +170,9 @@ struct per_cpu_pages { int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ - struct list_head list; /* the list of pages */ + + /* Lists of pages, one per migrate type stored on the pcp-lists */ + struct list_head lists[MIGRATE_PCPTYPES]; }; struct per_cpu_pageset { @@ -269,6 +276,11 @@ struct zone_reclaim_stat { */ unsigned long recent_rotated[2]; unsigned long recent_scanned[2]; + + /* + * accumulated for batching + */ + unsigned long nr_saved_scan[NR_LRU_LISTS]; }; struct zone { @@ -323,7 +335,6 @@ struct zone { spinlock_t lru_lock; struct zone_lru { struct list_head list; - unsigned long nr_saved_scan; /* accumulated for batching */ } lru[NR_LRU_LISTS]; struct zone_reclaim_stat reclaim_stat; @@ -744,21 +755,20 @@ static inline int is_dma(struct zone *zone) /* These two functions are used to setup the per zone pages min values */ struct ctl_table; -struct file; -int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, +int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; -int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, +int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); -int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, +int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, - struct file *, void __user *, size_t *, loff_t *); + void __user *, size_t *, loff_t *); int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, - struct file *, void __user *, size_t *, loff_t *); + void __user *, size_t *, loff_t *); extern int numa_zonelist_order_handler(struct ctl_table *, int, - struct file *, void __user *, size_t *, loff_t *); + void __user *, size_t *, loff_t *); extern char numa_zonelist_order[]; #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 1bf5900ffe43..f58e9d836f32 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -399,6 +399,17 @@ struct i2c_device_id { __attribute__((aligned(sizeof(kernel_ulong_t)))); }; +/* spi */ + +#define SPI_NAME_SIZE 32 +#define SPI_MODULE_PREFIX "spi:" + +struct spi_device_id { + char name[SPI_NAME_SIZE]; + kernel_ulong_t driver_data /* Data private to the driver */ + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + /* dmi */ enum dmi_field { DMI_NONE, diff --git a/include/linux/module.h b/include/linux/module.h index 1c755b2f937d..482efc865acf 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -128,7 +128,10 @@ extern struct module __this_module; */ #define MODULE_LICENSE(_license) MODULE_INFO(license, _license) -/* Author, ideally of form NAME[, NAME]*[ and NAME] */ +/* + * Author(s), use "Name <email>" or just "Name", for multiple + * authors use multiple MODULE_AUTHOR() statements/lines. + */ #define MODULE_AUTHOR(_author) MODULE_INFO(author, _author) /* What your module does. */ @@ -308,10 +311,14 @@ struct module #endif #ifdef CONFIG_KALLSYMS - /* We keep the symbol and string tables for kallsyms. */ - Elf_Sym *symtab; - unsigned int num_symtab; - char *strtab; + /* + * We keep the symbol and string tables for kallsyms. + * The core_* fields below are temporary, loader-only (they + * could really be discarded after module init). + */ + Elf_Sym *symtab, *core_symtab; + unsigned int num_symtab, core_num_syms; + char *strtab, *core_strtab; /* Section attributes */ struct module_sect_attrs *sect_attrs; diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 6547c3cdbc4c..82a9124f7d75 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -37,7 +37,6 @@ typedef int (*param_set_fn)(const char *val, struct kernel_param *kp); typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp); /* Flag bits for kernel_param.flags */ -#define KPARAM_KMALLOCED 1 #define KPARAM_ISBOOL 2 struct kernel_param { diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 0d45b4e8d367..08bc776d05e2 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -145,14 +145,14 @@ static inline int ip_mroute_opt(int opt) #endif #ifdef CONFIG_IP_MROUTE -extern int ip_mroute_setsockopt(struct sock *, int, char __user *, int); +extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); extern int ip_mr_init(void); #else static inline int ip_mroute_setsockopt(struct sock *sock, - int optname, char __user *optval, int optlen) + int optname, char __user *optval, unsigned int optlen) { return -ENOPROTOOPT; } diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 43dc97e32183..b191865a6ca3 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -134,7 +134,7 @@ static inline int ip6_mroute_opt(int opt) struct sock; #ifdef CONFIG_IPV6_MROUTE -extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int); +extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); @@ -143,7 +143,7 @@ extern void ip6_mr_cleanup(void); #else static inline int ip6_mroute_setsockopt(struct sock *sock, - int optname, char __user *optval, int optlen) + int optname, char __user *optval, unsigned int optlen) { return -ENOPROTOOPT; } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 4030ebada49e..7a232a9bdd62 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -121,6 +121,7 @@ typedef enum { NAND_ECC_SOFT, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, + NAND_ECC_HW_OOB_FIRST, } nand_ecc_modes_t; /* @@ -271,13 +272,13 @@ struct nand_ecc_ctrl { uint8_t *calc_ecc); int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf); + uint8_t *buf, int page); void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf); int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf); + uint8_t *buf, int page); int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offs, uint32_t len, diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 090da505425d..052ea8ca2434 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -21,6 +21,12 @@ struct mtd_info; int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); /* + * Detect and correct a 1 bit error for eccsize byte block + */ +int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, + unsigned int eccsize); + +/* * Detect and correct a 1 bit error for 256 byte block */ int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 8ed873374381..4e49f3350678 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -214,4 +214,12 @@ unsigned onenand_block(struct onenand_chip *this, loff_t addr); loff_t onenand_addr(struct onenand_chip *this, int block); int flexonenand_region(struct mtd_info *mtd, loff_t addr); +struct mtd_partition; + +struct onenand_platform_data { + void (*mmcontrol)(struct mtd_info *mtd, int sync_read); + struct mtd_partition *parts; + unsigned int nr_parts; +}; + #endif /* __LINUX_MTD_ONENAND_H */ diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index 86a6bbef6465..acadbf53a69f 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -207,6 +207,9 @@ #define ONENAND_ECC_2BIT (1 << 1) #define ONENAND_ECC_2BIT_ALL (0xAAAA) #define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010) +#define ONENAND_ECC_3BIT (1 << 2) +#define ONENAND_ECC_4BIT (1 << 3) +#define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010) /* * One-Time Programmable (OTP) diff --git a/include/linux/namei.h b/include/linux/namei.h index d870ae2faedc..ec0f607b364a 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -40,7 +40,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; * - follow links at the end * - require a directory * - ending slashes ok even for nonexistent files - * - internal "there are more path compnents" flag + * - internal "there are more path components" flag * - locked when lookup done with dcache_lock held * - dentry cache is untrusted; force a real lookup */ diff --git a/include/linux/net.h b/include/linux/net.h index df20f680f455..d7e26e30c8c2 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -178,11 +178,11 @@ struct proto_ops { int (*listen) (struct socket *sock, int len); int (*shutdown) (struct socket *sock, int flags); int (*setsockopt)(struct socket *sock, int level, - int optname, char __user *optval, int optlen); + int optname, char __user *optval, unsigned int optlen); int (*getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); int (*compat_setsockopt)(struct socket *sock, int level, - int optname, char __user *optval, int optlen); + int optname, char __user *optval, unsigned int optlen); int (*compat_getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); int (*sendmsg) (struct kiocb *iocb, struct socket *sock, @@ -256,7 +256,7 @@ extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr, extern int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen); extern int kernel_setsockopt(struct socket *sock, int level, int optname, - char *optval, int optlen); + char *optval, unsigned int optlen); extern int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); @@ -313,7 +313,7 @@ SOCKCALL_WRAP(name, compat_ioctl, (struct socket *sock, unsigned int cmd, \ SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \ SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \ SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \ - char __user *optval, int optlen), (sock, level, optname, optval, optlen)) \ + char __user *optval, unsigned int optlen), (sock, level, optname, optval, optlen)) \ SOCKCALL_WRAP(name, getsockopt, (struct socket *sock, int level, int optname, \ char __user *optval, int __user *optlen), (sock, level, optname, optval, optlen)) \ SOCKCALL_WRAP(name, sendmsg, (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len), \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 94958c109761..812a5f3c2abe 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -557,7 +557,7 @@ struct netdev_queue { * Callback uses when the transmitter has not made any progress * for dev->watchdog ticks. * - * struct net_device_stats* (*get_stats)(struct net_device *dev); + * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); * Called when a user wants to get the network device usage * statistics. If not defined, the counters in dev->stats will * be used. diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 48cfe51bfddc..6132b5e6d9d3 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -221,12 +221,12 @@ __ret;}) /* Call setsockopt() */ int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, - int len); + unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, - char __user *opt, int len); + char __user *opt, unsigned int len); int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 080f6ba9e73a..ab5d3126831f 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -187,6 +187,7 @@ extern struct sock *netlink_kernel_create(struct net *net, extern void netlink_kernel_release(struct sock *sk); extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); +extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group); extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); extern int netlink_has_listeners(struct sock *sk, unsigned int group); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 33b283601f62..c4c060208109 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -234,7 +234,7 @@ enum nfs_opnum4 { Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS -#define LAST_NFS4_OP OP_RELEASE_LOCKOWNER +#define LAST_NFS4_OP OP_RECLAIM_COMPLETE enum nfsstat4 { NFS4_OK = 0, diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f6b90240dd41..d09db1bc9083 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -40,7 +40,6 @@ #ifdef __KERNEL__ #include <linux/in.h> -#include <linux/kref.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/rbtree.h> diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 2b49d676d0c9..510ffdd5020e 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -56,8 +56,11 @@ extern struct svc_version nfsd_version2, nfsd_version3, extern u32 nfsd_supported_minorversion; extern struct mutex nfsd_mutex; extern struct svc_serv *nfsd_serv; +extern spinlock_t nfsd_drc_lock; +extern unsigned int nfsd_drc_max_mem; +extern unsigned int nfsd_drc_mem_used; -extern struct seq_operations nfs_exports_op; +extern const struct seq_operations nfs_exports_op; /* * Function prototypes. @@ -163,7 +166,7 @@ extern int nfsd_max_blksize; extern unsigned int max_delegations; int nfs4_state_init(void); void nfsd4_free_slabs(void); -void nfs4_state_start(void); +int nfs4_state_start(void); void nfs4_state_shutdown(void); time_t nfs4_lease_time(void); void nfs4_reset_lease(time_t leasetime); @@ -171,7 +174,7 @@ int nfs4_reset_recoverydir(char *recdir); #else static inline int nfs4_state_init(void) { return 0; } static inline void nfsd4_free_slabs(void) { } -static inline void nfs4_state_start(void) { } +static inline int nfs4_state_start(void) { return 0; } static inline void nfs4_state_shutdown(void) { } static inline time_t nfs4_lease_time(void) { return 0; } static inline void nfs4_reset_lease(time_t leasetime) { } diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 57ab2ed08459..b38d11324189 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -60,6 +60,12 @@ typedef struct { #define si_stateownerid si_opaque.so_stateownerid #define si_fileid si_opaque.so_fileid +struct nfsd4_cb_sequence { + /* args/res */ + u32 cbs_minorversion; + struct nfs4_client *cbs_clp; +}; + struct nfs4_delegation { struct list_head dl_perfile; struct list_head dl_perclnt; @@ -81,38 +87,35 @@ struct nfs4_delegation { /* client delegation callback info */ struct nfs4_cb_conn { /* SETCLIENTID info */ - u32 cb_addr; - unsigned short cb_port; + struct sockaddr_storage cb_addr; + size_t cb_addrlen; u32 cb_prog; u32 cb_minorversion; u32 cb_ident; /* minorversion 0 only */ /* RPC client info */ atomic_t cb_set; /* successful CB_NULL call */ struct rpc_clnt * cb_client; - struct rpc_cred * cb_cred; }; -/* Maximum number of slots per session. 128 is useful for long haul TCP */ -#define NFSD_MAX_SLOTS_PER_SESSION 128 -/* Maximum number of pages per slot cache entry */ -#define NFSD_PAGES_PER_SLOT 1 +/* Maximum number of slots per session. 160 is useful for long haul TCP */ +#define NFSD_MAX_SLOTS_PER_SESSION 160 /* Maximum number of operations per session compound */ #define NFSD_MAX_OPS_PER_COMPOUND 16 - -struct nfsd4_cache_entry { - __be32 ce_status; - struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */ - struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1]; - int ce_cachethis; - short ce_resused; - int ce_opcnt; - int ce_rpchdrlen; -}; +/* Maximum session per slot cache size */ +#define NFSD_SLOT_CACHE_SIZE 1024 +/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */ +#define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32 +#define NFSD_MAX_MEM_PER_SESSION \ + (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) struct nfsd4_slot { - bool sl_inuse; - u32 sl_seqid; - struct nfsd4_cache_entry sl_cache_entry; + bool sl_inuse; + bool sl_cachethis; + u16 sl_opcnt; + u32 sl_seqid; + __be32 sl_status; + u32 sl_datalen; + char sl_data[]; }; struct nfsd4_channel_attrs { @@ -126,6 +129,25 @@ struct nfsd4_channel_attrs { u32 rdma_attrs; }; +struct nfsd4_create_session { + clientid_t clientid; + struct nfs4_sessionid sessionid; + u32 seqid; + u32 flags; + struct nfsd4_channel_attrs fore_channel; + struct nfsd4_channel_attrs back_channel; + u32 callback_prog; + u32 uid; + u32 gid; +}; + +/* The single slot clientid cache structure */ +struct nfsd4_clid_slot { + u32 sl_seqid; + __be32 sl_status; + struct nfsd4_create_session sl_cr_ses; +}; + struct nfsd4_session { struct kref se_ref; struct list_head se_hash; /* hash by sessionid */ @@ -135,7 +157,7 @@ struct nfsd4_session { struct nfs4_sessionid se_sessionid; struct nfsd4_channel_attrs se_fchannel; struct nfsd4_channel_attrs se_bchannel; - struct nfsd4_slot se_slots[]; /* forward channel slots */ + struct nfsd4_slot *se_slots[]; /* forward channel slots */ }; static inline void @@ -180,7 +202,7 @@ struct nfs4_client { char cl_recdir[HEXDIR_LEN]; /* recovery dir */ nfs4_verifier cl_verifier; /* generated by client */ time_t cl_time; /* time of last lease renewal */ - __be32 cl_addr; /* client ipaddress */ + struct sockaddr_storage cl_addr; /* client ipaddress */ u32 cl_flavor; /* setclientid pseudoflavor */ char *cl_principal; /* setclientid principal name */ struct svc_cred cl_cred; /* setclientid principal */ @@ -192,9 +214,17 @@ struct nfs4_client { /* for nfs41 */ struct list_head cl_sessions; - struct nfsd4_slot cl_slot; /* create_session slot */ + struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */ u32 cl_exchange_flags; struct nfs4_sessionid cl_sessionid; + + /* for nfs41 callbacks */ + /* We currently support a single back channel with a single slot */ + unsigned long cl_cb_slot_busy; + u32 cl_cb_seq_nr; + struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */ + struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ + /* wait here for slots */ }; /* struct nfs4_client_reset @@ -345,6 +375,7 @@ extern int nfs4_in_grace(void); extern __be32 nfs4_check_open_reclaim(clientid_t *clid); extern void put_nfs4_client(struct nfs4_client *clp); extern void nfs4_free_stateowner(struct kref *kref); +extern int set_callback_cred(void); extern void nfsd4_probe_callback(struct nfs4_client *clp); extern void nfsd4_cb_recall(struct nfs4_delegation *dp); extern void nfs4_put_delegation(struct nfs4_delegation *dp); diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 2bacf7535069..73164c2b3d29 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h @@ -51,7 +51,7 @@ struct nfsd4_compound_state { /* For sessions DRC */ struct nfsd4_session *session; struct nfsd4_slot *slot; - __be32 *statp; + __be32 *datap; size_t iovlen; u32 minorversion; u32 status; @@ -366,18 +366,6 @@ struct nfsd4_exchange_id { int spa_how; }; -struct nfsd4_create_session { - clientid_t clientid; - struct nfs4_sessionid sessionid; - u32 seqid; - u32 flags; - struct nfsd4_channel_attrs fore_channel; - struct nfsd4_channel_attrs back_channel; - u32 callback_prog; - u32 uid; - u32 gid; -}; - struct nfsd4_sequence { struct nfs4_sessionid sessionid; /* request/response */ u32 seqid; /* request/response */ @@ -479,13 +467,12 @@ struct nfsd4_compoundres { static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) { struct nfsd4_compoundargs *args = resp->rqstp->rq_argp; - return args->opcnt == 1; + return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE; } static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) { - return !resp->cstate.slot->sl_cache_entry.ce_cachethis || - nfsd4_is_solo_sequence(resp); + return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp); } #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 79fec6af3f9f..ce520402e840 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -425,15 +425,6 @@ struct nilfs_dat_entry { }; /** - * struct nilfs_dat_group_desc - block group descriptor - * @dg_nfrees: number of free virtual block numbers in block group - */ -struct nilfs_dat_group_desc { - __le32 dg_nfrees; -}; - - -/** * struct nilfs_snapshot_list - snapshot list * @ssl_next: next checkpoint number on snapshot list * @ssl_prev: previous checkpoint number on snapshot list diff --git a/include/linux/oom.h b/include/linux/oom.h index a7979baf1e39..6aac5fe4f6f1 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -30,5 +30,16 @@ extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); extern int register_oom_notifier(struct notifier_block *nb); extern int unregister_oom_notifier(struct notifier_block *nb); +extern bool oom_killer_disabled; + +static inline void oom_killer_disable(void) +{ + oom_killer_disabled = true; +} + +static inline void oom_killer_enable(void) +{ + oom_killer_disabled = false; +} #endif /* __KERNEL__*/ #endif /* _INCLUDE_LINUX_OOM_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 2b87acfc5f87..6b202b173955 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -51,6 +51,9 @@ * PG_buddy is set to indicate that the page is free and in the buddy system * (see mm/page_alloc.c). * + * PG_hwpoison indicates that a page got corrupted in hardware and contains + * data with incorrect ECC bits that triggered a machine check. Accessing is + * not safe since it may cause another machine check. Don't touch! */ /* @@ -102,6 +105,9 @@ enum pageflags { #ifdef CONFIG_ARCH_USES_PG_UNCACHED PG_uncached, /* Page has been mapped as uncached */ #endif +#ifdef CONFIG_MEMORY_FAILURE + PG_hwpoison, /* hardware poisoned page. Don't touch */ +#endif __NR_PAGEFLAGS, /* Filesystems */ @@ -158,6 +164,9 @@ static inline int TestSetPage##uname(struct page *page) \ static inline int TestClearPage##uname(struct page *page) \ { return test_and_clear_bit(PG_##lname, &page->flags); } +#define __TESTCLEARFLAG(uname, lname) \ +static inline int __TestClearPage##uname(struct page *page) \ + { return __test_and_clear_bit(PG_##lname, &page->flags); } #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) @@ -184,6 +193,9 @@ static inline void __ClearPage##uname(struct page *page) { } #define TESTCLEARFLAG_FALSE(uname) \ static inline int TestClearPage##uname(struct page *page) { return 0; } +#define __TESTCLEARFLAG_FALSE(uname) \ +static inline int __TestClearPage##uname(struct page *page) { return 0; } + struct page; /* forward declaration */ TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked) @@ -250,11 +262,11 @@ PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #define MLOCK_PAGES 1 PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) - TESTSCFLAG(Mlocked, mlocked) + TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) #else #define MLOCK_PAGES 0 -PAGEFLAG_FALSE(Mlocked) - SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) +PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) + TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED @@ -263,6 +275,15 @@ PAGEFLAG(Uncached, uncached) PAGEFLAG_FALSE(Uncached) #endif +#ifdef CONFIG_MEMORY_FAILURE +PAGEFLAG(HWPoison, hwpoison) +TESTSETFLAG(HWPoison, hwpoison) +#define __PG_HWPOISON (1UL << PG_hwpoison) +#else +PAGEFLAG_FALSE(HWPoison) +#define __PG_HWPOISON 0 +#endif + static inline int PageUptodate(struct page *page) { int ret = test_bit(PG_uptodate, &(page)->flags); @@ -387,7 +408,7 @@ static inline void __ClearPageTail(struct page *page) 1 << PG_private | 1 << PG_private_2 | \ 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED) + 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON) /* * Flags checked when a page is prepped for return by the page allocator. @@ -396,8 +417,8 @@ static inline void __ClearPageTail(struct page *page) */ #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) -#endif /* !__GENERATING_BOUNDS_H */ - +#define PAGE_FLAGS_PRIVATE \ + (1 << PG_private | 1 << PG_private_2) /** * page_has_private - Determine if page has private stuff * @page: The page to be checked @@ -405,8 +426,11 @@ static inline void __ClearPageTail(struct page *page) * Determine if a page has private stuff, indicating that release routines * should be invoked upon it. */ -#define page_has_private(page) \ - ((page)->flags & ((1 << PG_private) | \ - (1 << PG_private_2))) +static inline int page_has_private(struct page *page) +{ + return !!(page->flags & PAGE_FLAGS_PRIVATE); +} + +#endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index ada779f24178..4b938d4f3ac2 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -38,6 +38,7 @@ enum { PCG_LOCK, /* page cgroup is locked */ PCG_CACHE, /* charged as cache */ PCG_USED, /* this object is in use. */ + PCG_ACCT_LRU, /* page has been accounted for */ }; #define TESTPCGFLAG(uname, lname) \ @@ -52,11 +53,23 @@ static inline void SetPageCgroup##uname(struct page_cgroup *pc)\ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ { clear_bit(PCG_##lname, &pc->flags); } +#define TESTCLEARPCGFLAG(uname, lname) \ +static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ + { return test_and_clear_bit(PCG_##lname, &pc->flags); } + /* Cache flag is set only once (at allocation) */ TESTPCGFLAG(Cache, CACHE) +CLEARPCGFLAG(Cache, CACHE) +SETPCGFLAG(Cache, CACHE) TESTPCGFLAG(Used, USED) CLEARPCGFLAG(Used, USED) +SETPCGFLAG(Used, USED) + +SETPCGFLAG(AcctLRU, ACCT_LRU) +CLEARPCGFLAG(AcctLRU, ACCT_LRU) +TESTPCGFLAG(AcctLRU, ACCT_LRU) +TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU) static inline int page_cgroup_nid(struct page_cgroup *pc) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3b6b788fe2b5..daecca3c8300 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -379,9 +379,6 @@ #define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c #define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390 #define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c -/* AMD SB Chipset */ -#define PCI_DEVICE_ID_AMD_SB900_IDE 0x780c -#define PCI_DEVICE_ID_AMD_SB900_SATA_IDE 0x7800 #define PCI_VENDOR_ID_VLSI 0x1004 #define PCI_DEVICE_ID_VLSI_82C592 0x0005 @@ -485,6 +482,9 @@ #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 +#define PCI_SUBVENDOR_ID_IBM 0x1014 +#define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4 + #define PCI_VENDOR_ID_UNISYS 0x1018 #define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C @@ -543,6 +543,7 @@ #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 +#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 @@ -552,9 +553,10 @@ #define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096 #define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097 #define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A - #define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081 #define PCI_DEVICE_ID_AMD_LX_AES 0x2082 +#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c +#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800 #define PCI_VENDOR_ID_TRIDENT 0x1023 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 @@ -775,6 +777,7 @@ #define PCI_DEVICE_ID_TI_X515 0x8036 #define PCI_DEVICE_ID_TI_XX12 0x8039 #define PCI_DEVICE_ID_TI_XX12_FM 0x803b +#define PCI_DEVICE_ID_TI_XIO2000A 0x8231 #define PCI_DEVICE_ID_TI_1130 0xac12 #define PCI_DEVICE_ID_TI_1031 0xac13 #define PCI_DEVICE_ID_TI_1131 0xac15 @@ -1630,6 +1633,8 @@ #define PCI_DEVICE_ID_O2_6730 0x673a #define PCI_DEVICE_ID_O2_6832 0x6832 #define PCI_DEVICE_ID_O2_6836 0x6836 +#define PCI_DEVICE_ID_O2_6812 0x6872 +#define PCI_DEVICE_ID_O2_6933 0x6933 #define PCI_VENDOR_ID_3DFX 0x121a #define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 @@ -1953,6 +1958,8 @@ #define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */ #define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_A 0x0120 /* 2x 16550A, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_B 0x0121 /* 2x 16550A, half of 4 port */ #define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */ #define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */ #define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */ @@ -2160,6 +2167,10 @@ #define PCI_DEVICE_ID_ADDIDATA_APCI7420_3 0x700D #define PCI_DEVICE_ID_ADDIDATA_APCI7300_3 0x700E #define PCI_DEVICE_ID_ADDIDATA_APCI7800_3 0x700F +#define PCI_DEVICE_ID_ADDIDATA_APCIe7300 0x7010 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7420 0x7011 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7500 0x7012 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7800 0x7013 #define PCI_VENDOR_ID_PDC 0x15e9 @@ -2526,6 +2537,16 @@ #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c +#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 740caad09a44..7b7fbf433cff 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -1,5 +1,9 @@ /* - * Performance counters: + * NOTE: this file will be removed in a future kernel release, it is + * provided as a courtesy copy of user-space code that relies on the + * old (pre-rename) symbols and constants. + * + * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar @@ -131,19 +135,19 @@ enum perf_counter_sample_format { * as specified by attr.read_format: * * struct read_format { - * { u64 value; - * { u64 time_enabled; } && PERF_FORMAT_ENABLED - * { u64 time_running; } && PERF_FORMAT_RUNNING - * { u64 id; } && PERF_FORMAT_ID - * } && !PERF_FORMAT_GROUP + * { u64 value; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 id; } && PERF_FORMAT_ID + * } && !PERF_FORMAT_GROUP * - * { u64 nr; - * { u64 time_enabled; } && PERF_FORMAT_ENABLED - * { u64 time_running; } && PERF_FORMAT_RUNNING - * { u64 value; - * { u64 id; } && PERF_FORMAT_ID - * } cntr[nr]; - * } && PERF_FORMAT_GROUP + * { u64 nr; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 value; + * { u64 id; } && PERF_FORMAT_ID + * } cntr[nr]; + * } && PERF_FORMAT_GROUP * }; */ enum perf_counter_read_format { @@ -314,9 +318,9 @@ enum perf_event_type { /* * struct { - * struct perf_event_header header; - * u64 id; - * u64 lost; + * struct perf_event_header header; + * u64 id; + * u64 lost; * }; */ PERF_EVENT_LOST = 2, @@ -357,17 +361,17 @@ enum perf_event_type { * struct perf_event_header header; * u32 pid, ppid; * u32 tid, ptid; - * { u64 time; } && PERF_SAMPLE_TIME + * u64 time; * }; */ PERF_EVENT_FORK = 7, /* * struct { - * struct perf_event_header header; - * u32 pid, tid; + * struct perf_event_header header; + * u32 pid, tid; * - * struct read_format values; + * struct read_format values; * }; */ PERF_EVENT_READ = 8, @@ -383,23 +387,23 @@ enum perf_event_type { * { u64 id; } && PERF_SAMPLE_ID * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID * { u32 cpu, res; } && PERF_SAMPLE_CPU - * { u64 period; } && PERF_SAMPLE_PERIOD + * { u64 period; } && PERF_SAMPLE_PERIOD * * { struct read_format values; } && PERF_SAMPLE_READ * * { u64 nr, * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN * - * # - * # The RAW record below is opaque data wrt the ABI - * # - * # That is, the ABI doesn't make any promises wrt to - * # the stability of its content, it may vary depending - * # on event, hardware, kernel version and phase of - * # the moon. - * # - * # In other words, PERF_SAMPLE_RAW contents are not an ABI. - * # + * # + * # The RAW record below is opaque data wrt the ABI + * # + * # That is, the ABI doesn't make any promises wrt to + * # the stability of its content, it may vary depending + * # on event, hardware, kernel version and phase of + * # the moon. + * # + * # In other words, PERF_SAMPLE_RAW contents are not an ABI. + * # * * { u32 size; * char data[size];}&& PERF_SAMPLE_RAW @@ -422,437 +426,16 @@ enum perf_callchain_context { PERF_CONTEXT_MAX = (__u64)-4095, }; -#define PERF_FLAG_FD_NO_GROUP (1U << 0) -#define PERF_FLAG_FD_OUTPUT (1U << 1) - -#ifdef __KERNEL__ -/* - * Kernel-internal data types and definitions: - */ - -#ifdef CONFIG_PERF_COUNTERS -# include <asm/perf_counter.h> -#endif - -#include <linux/list.h> -#include <linux/mutex.h> -#include <linux/rculist.h> -#include <linux/rcupdate.h> -#include <linux/spinlock.h> -#include <linux/hrtimer.h> -#include <linux/fs.h> -#include <linux/pid_namespace.h> -#include <asm/atomic.h> - -#define PERF_MAX_STACK_DEPTH 255 - -struct perf_callchain_entry { - __u64 nr; - __u64 ip[PERF_MAX_STACK_DEPTH]; -}; - -struct perf_raw_record { - u32 size; - void *data; -}; - -struct task_struct; - -/** - * struct hw_perf_counter - performance counter hardware details: - */ -struct hw_perf_counter { -#ifdef CONFIG_PERF_COUNTERS - union { - struct { /* hardware */ - u64 config; - unsigned long config_base; - unsigned long counter_base; - int idx; - }; - union { /* software */ - atomic64_t count; - struct hrtimer hrtimer; - }; - }; - atomic64_t prev_count; - u64 sample_period; - u64 last_period; - atomic64_t period_left; - u64 interrupts; - - u64 freq_count; - u64 freq_interrupts; - u64 freq_stamp; -#endif -}; - -struct perf_counter; - -/** - * struct pmu - generic performance monitoring unit - */ -struct pmu { - int (*enable) (struct perf_counter *counter); - void (*disable) (struct perf_counter *counter); - void (*read) (struct perf_counter *counter); - void (*unthrottle) (struct perf_counter *counter); -}; - -/** - * enum perf_counter_active_state - the states of a counter - */ -enum perf_counter_active_state { - PERF_COUNTER_STATE_ERROR = -2, - PERF_COUNTER_STATE_OFF = -1, - PERF_COUNTER_STATE_INACTIVE = 0, - PERF_COUNTER_STATE_ACTIVE = 1, -}; - -struct file; - -struct perf_mmap_data { - struct rcu_head rcu_head; - int nr_pages; /* nr of data pages */ - int writable; /* are we writable */ - int nr_locked; /* nr pages mlocked */ - - atomic_t poll; /* POLL_ for wakeups */ - atomic_t events; /* event limit */ - - atomic_long_t head; /* write position */ - atomic_long_t done_head; /* completed head */ - - atomic_t lock; /* concurrent writes */ - atomic_t wakeup; /* needs a wakeup */ - atomic_t lost; /* nr records lost */ - - long watermark; /* wakeup watermark */ - - struct perf_counter_mmap_page *user_page; - void *data_pages[0]; -}; - -struct perf_pending_entry { - struct perf_pending_entry *next; - void (*func)(struct perf_pending_entry *); -}; - -/** - * struct perf_counter - performance counter kernel representation: - */ -struct perf_counter { -#ifdef CONFIG_PERF_COUNTERS - struct list_head list_entry; - struct list_head event_entry; - struct list_head sibling_list; - int nr_siblings; - struct perf_counter *group_leader; - struct perf_counter *output; - const struct pmu *pmu; - - enum perf_counter_active_state state; - atomic64_t count; - - /* - * These are the total time in nanoseconds that the counter - * has been enabled (i.e. eligible to run, and the task has - * been scheduled in, if this is a per-task counter) - * and running (scheduled onto the CPU), respectively. - * - * They are computed from tstamp_enabled, tstamp_running and - * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. - */ - u64 total_time_enabled; - u64 total_time_running; - - /* - * These are timestamps used for computing total_time_enabled - * and total_time_running when the counter is in INACTIVE or - * ACTIVE state, measured in nanoseconds from an arbitrary point - * in time. - * tstamp_enabled: the notional time when the counter was enabled - * tstamp_running: the notional time when the counter was scheduled on - * tstamp_stopped: in INACTIVE state, the notional time when the - * counter was scheduled off. - */ - u64 tstamp_enabled; - u64 tstamp_running; - u64 tstamp_stopped; - - struct perf_counter_attr attr; - struct hw_perf_counter hw; - - struct perf_counter_context *ctx; - struct file *filp; - - /* - * These accumulate total time (in nanoseconds) that children - * counters have been enabled and running, respectively. - */ - atomic64_t child_total_time_enabled; - atomic64_t child_total_time_running; - - /* - * Protect attach/detach and child_list: - */ - struct mutex child_mutex; - struct list_head child_list; - struct perf_counter *parent; - - int oncpu; - int cpu; - - struct list_head owner_entry; - struct task_struct *owner; - - /* mmap bits */ - struct mutex mmap_mutex; - atomic_t mmap_count; - struct perf_mmap_data *data; - - /* poll related */ - wait_queue_head_t waitq; - struct fasync_struct *fasync; - - /* delayed work for NMIs and such */ - int pending_wakeup; - int pending_kill; - int pending_disable; - struct perf_pending_entry pending; - - atomic_t event_limit; - - void (*destroy)(struct perf_counter *); - struct rcu_head rcu_head; - - struct pid_namespace *ns; - u64 id; -#endif -}; - -/** - * struct perf_counter_context - counter context structure - * - * Used as a container for task counters and CPU counters as well: - */ -struct perf_counter_context { - /* - * Protect the states of the counters in the list, - * nr_active, and the list: - */ - spinlock_t lock; - /* - * Protect the list of counters. Locking either mutex or lock - * is sufficient to ensure the list doesn't change; to change - * the list you need to lock both the mutex and the spinlock. - */ - struct mutex mutex; - - struct list_head counter_list; - struct list_head event_list; - int nr_counters; - int nr_active; - int is_active; - int nr_stat; - atomic_t refcount; - struct task_struct *task; - - /* - * Context clock, runs when context enabled. - */ - u64 time; - u64 timestamp; - - /* - * These fields let us detect when two contexts have both - * been cloned (inherited) from a common ancestor. - */ - struct perf_counter_context *parent_ctx; - u64 parent_gen; - u64 generation; - int pin_count; - struct rcu_head rcu_head; -}; - -/** - * struct perf_counter_cpu_context - per cpu counter context structure - */ -struct perf_cpu_context { - struct perf_counter_context ctx; - struct perf_counter_context *task_ctx; - int active_oncpu; - int max_pertask; - int exclusive; - - /* - * Recursion avoidance: - * - * task, softirq, irq, nmi context - */ - int recursion[4]; -}; - -struct perf_output_handle { - struct perf_counter *counter; - struct perf_mmap_data *data; - unsigned long head; - unsigned long offset; - int nmi; - int sample; - int locked; - unsigned long flags; -}; - -#ifdef CONFIG_PERF_COUNTERS +#define PERF_FLAG_FD_NO_GROUP (1U << 0) +#define PERF_FLAG_FD_OUTPUT (1U << 1) /* - * Set by architecture code: + * In case some app still references the old symbols: */ -extern int perf_max_counters; - -extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); - -extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); -extern void perf_counter_task_sched_out(struct task_struct *task, - struct task_struct *next, int cpu); -extern void perf_counter_task_tick(struct task_struct *task, int cpu); -extern int perf_counter_init_task(struct task_struct *child); -extern void perf_counter_exit_task(struct task_struct *child); -extern void perf_counter_free_task(struct task_struct *task); -extern void set_perf_counter_pending(void); -extern void perf_counter_do_pending(void); -extern void perf_counter_print_debug(void); -extern void __perf_disable(void); -extern bool __perf_enable(void); -extern void perf_disable(void); -extern void perf_enable(void); -extern int perf_counter_task_disable(void); -extern int perf_counter_task_enable(void); -extern int hw_perf_group_sched_in(struct perf_counter *group_leader, - struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, int cpu); -extern void perf_counter_update_userpage(struct perf_counter *counter); - -struct perf_sample_data { - u64 type; - - u64 ip; - struct { - u32 pid; - u32 tid; - } tid_entry; - u64 time; - u64 addr; - u64 id; - u64 stream_id; - struct { - u32 cpu; - u32 reserved; - } cpu_entry; - u64 period; - struct perf_callchain_entry *callchain; - struct perf_raw_record *raw; -}; -extern void perf_output_sample(struct perf_output_handle *handle, - struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_counter *counter); -extern void perf_prepare_sample(struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_counter *counter, - struct pt_regs *regs); +#define __NR_perf_counter_open __NR_perf_event_open -extern int perf_counter_overflow(struct perf_counter *counter, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs); +#define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE +#define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE -/* - * Return 1 for a software counter, 0 for a hardware counter - */ -static inline int is_software_counter(struct perf_counter *counter) -{ - return (counter->attr.type != PERF_TYPE_RAW) && - (counter->attr.type != PERF_TYPE_HARDWARE) && - (counter->attr.type != PERF_TYPE_HW_CACHE); -} - -extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; - -extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); - -static inline void -perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) -{ - if (atomic_read(&perf_swcounter_enabled[event])) - __perf_swcounter_event(event, nr, nmi, regs, addr); -} - -extern void __perf_counter_mmap(struct vm_area_struct *vma); - -static inline void perf_counter_mmap(struct vm_area_struct *vma) -{ - if (vma->vm_flags & VM_EXEC) - __perf_counter_mmap(vma); -} - -extern void perf_counter_comm(struct task_struct *tsk); -extern void perf_counter_fork(struct task_struct *tsk); - -extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); - -extern int sysctl_perf_counter_paranoid; -extern int sysctl_perf_counter_mlock; -extern int sysctl_perf_counter_sample_rate; - -extern void perf_counter_init(void); -extern void perf_tpcounter_event(int event_id, u64 addr, u64 count, - void *record, int entry_size); - -#ifndef perf_misc_flags -#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ - PERF_EVENT_MISC_KERNEL) -#define perf_instruction_pointer(regs) instruction_pointer(regs) -#endif - -extern int perf_output_begin(struct perf_output_handle *handle, - struct perf_counter *counter, unsigned int size, - int nmi, int sample); -extern void perf_output_end(struct perf_output_handle *handle); -extern void perf_output_copy(struct perf_output_handle *handle, - const void *buf, unsigned int len); -#else -static inline void -perf_counter_task_sched_in(struct task_struct *task, int cpu) { } -static inline void -perf_counter_task_sched_out(struct task_struct *task, - struct task_struct *next, int cpu) { } -static inline void -perf_counter_task_tick(struct task_struct *task, int cpu) { } -static inline int perf_counter_init_task(struct task_struct *child) { return 0; } -static inline void perf_counter_exit_task(struct task_struct *child) { } -static inline void perf_counter_free_task(struct task_struct *task) { } -static inline void perf_counter_do_pending(void) { } -static inline void perf_counter_print_debug(void) { } -static inline void perf_disable(void) { } -static inline void perf_enable(void) { } -static inline int perf_counter_task_disable(void) { return -EINVAL; } -static inline int perf_counter_task_enable(void) { return -EINVAL; } - -static inline void -perf_swcounter_event(u32 event, u64 nr, int nmi, - struct pt_regs *regs, u64 addr) { } - -static inline void perf_counter_mmap(struct vm_area_struct *vma) { } -static inline void perf_counter_comm(struct task_struct *tsk) { } -static inline void perf_counter_fork(struct task_struct *tsk) { } -static inline void perf_counter_init(void) { } - -#endif - -#define perf_output_put(handle, x) \ - perf_output_copy((handle), &(x), sizeof(x)) - -#endif /* __KERNEL__ */ #endif /* _LINUX_PERF_COUNTER_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h new file mode 100644 index 000000000000..9e7012689a84 --- /dev/null +++ b/include/linux/perf_event.h @@ -0,0 +1,863 @@ +/* + * Performance events: + * + * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra + * + * Data type definitions, declarations, prototypes. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_PERF_EVENT_H +#define _LINUX_PERF_EVENT_H + +#include <linux/types.h> +#include <linux/ioctl.h> +#include <asm/byteorder.h> + +/* + * User-space ABI bits: + */ + +/* + * attr.type + */ +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + + PERF_TYPE_MAX, /* non-ABI */ +}; + +/* + * Generalized performance event event_id types, used by the + * attr.event_id parameter of the sys_perf_event_open() + * syscall: + */ +enum perf_hw_id { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + + PERF_COUNT_HW_MAX, /* non-ABI */ +}; + +/* + * Generalized hardware cache events: + * + * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x + * { read, write, prefetch } x + * { accesses, misses } + */ +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + + PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + + PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + + PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ +}; + +/* + * Special "software" events provided by the kernel, even if the hardware + * does not support performance events. These events measure various + * physical and sw events of the kernel (and allow the profiling of them as + * well): + */ +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + + PERF_COUNT_SW_MAX, /* non-ABI */ +}; + +/* + * Bits that can be set in attr.sample_type to request information + * in the overflow packets. + */ +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1U << 0, + PERF_SAMPLE_TID = 1U << 1, + PERF_SAMPLE_TIME = 1U << 2, + PERF_SAMPLE_ADDR = 1U << 3, + PERF_SAMPLE_READ = 1U << 4, + PERF_SAMPLE_CALLCHAIN = 1U << 5, + PERF_SAMPLE_ID = 1U << 6, + PERF_SAMPLE_CPU = 1U << 7, + PERF_SAMPLE_PERIOD = 1U << 8, + PERF_SAMPLE_STREAM_ID = 1U << 9, + PERF_SAMPLE_RAW = 1U << 10, + + PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ +}; + +/* + * The format of the data returned by read() on a perf event fd, + * as specified by attr.read_format: + * + * struct read_format { + * { u64 value; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 id; } && PERF_FORMAT_ID + * } && !PERF_FORMAT_GROUP + * + * { u64 nr; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 value; + * { u64 id; } && PERF_FORMAT_ID + * } cntr[nr]; + * } && PERF_FORMAT_GROUP + * }; + */ +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, + PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, + PERF_FORMAT_ID = 1U << 2, + PERF_FORMAT_GROUP = 1U << 3, + + PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ +}; + +#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ + +/* + * Hardware event_id to monitor via a performance monitoring event: + */ +struct perf_event_attr { + + /* + * Major type: hardware/software/tracepoint/etc. + */ + __u32 type; + + /* + * Size of the attr structure, for fwd/bwd compat. + */ + __u32 size; + + /* + * Type specific configuration information. + */ + __u64 config; + + union { + __u64 sample_period; + __u64 sample_freq; + }; + + __u64 sample_type; + __u64 read_format; + + __u64 disabled : 1, /* off by default */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + mmap : 1, /* include mmap data */ + comm : 1, /* include comm data */ + freq : 1, /* use freq, not period */ + inherit_stat : 1, /* per task counts */ + enable_on_exec : 1, /* next exec enables */ + task : 1, /* trace fork/exit */ + watermark : 1, /* wakeup_watermark */ + + __reserved_1 : 49; + + union { + __u32 wakeup_events; /* wakeup every n events */ + __u32 wakeup_watermark; /* bytes before wakeup */ + }; + __u32 __reserved_2; + + __u64 __reserved_3; +}; + +/* + * Ioctls that can be done on a perf event fd: + */ +#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) +#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) +#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) +#define PERF_EVENT_IOC_RESET _IO ('$', 3) +#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) +#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1U << 0, +}; + +/* + * Structure of the page that can be mapped via mmap + */ +struct perf_event_mmap_page { + __u32 version; /* version number of this structure */ + __u32 compat_version; /* lowest version this is compat with */ + + /* + * Bits needed to read the hw events in user-space. + * + * u32 seq; + * s64 count; + * + * do { + * seq = pc->lock; + * + * barrier() + * if (pc->index) { + * count = pmc_read(pc->index - 1); + * count += pc->offset; + * } else + * goto regular_read; + * + * barrier(); + * } while (pc->lock != seq); + * + * NOTE: for obvious reason this only works on self-monitoring + * processes. + */ + __u32 lock; /* seqlock for synchronization */ + __u32 index; /* hardware event identifier */ + __s64 offset; /* add to hardware event value */ + __u64 time_enabled; /* time event active */ + __u64 time_running; /* time event on cpu */ + + /* + * Hole for extension of the self monitor capabilities + */ + + __u64 __reserved[123]; /* align to 1k */ + + /* + * Control data for the mmap() data buffer. + * + * User-space reading the @data_head value should issue an rmb(), on + * SMP capable platforms, after reading this value -- see + * perf_event_wakeup(). + * + * When the mapping is PROT_WRITE the @data_tail value should be + * written by userspace to reflect the last read data. In this case + * the kernel will not over-write unread data. + */ + __u64 data_head; /* head in the data section */ + __u64 data_tail; /* user-space written tail */ +}; + +#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) +#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) +#define PERF_RECORD_MISC_KERNEL (1 << 0) +#define PERF_RECORD_MISC_USER (2 << 0) +#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + + /* + * The MMAP events record the PROT_EXEC mappings so that we can + * correlate userspace IPs to code. They have the following structure: + * + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * u64 addr; + * u64 len; + * u64 pgoff; + * char filename[]; + * }; + */ + PERF_RECORD_MMAP = 1, + + /* + * struct { + * struct perf_event_header header; + * u64 id; + * u64 lost; + * }; + */ + PERF_RECORD_LOST = 2, + + /* + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * char comm[]; + * }; + */ + PERF_RECORD_COMM = 3, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * u64 time; + * }; + */ + PERF_RECORD_EXIT = 4, + + /* + * struct { + * struct perf_event_header header; + * u64 time; + * u64 id; + * u64 stream_id; + * }; + */ + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * u64 time; + * }; + */ + PERF_RECORD_FORK = 7, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, tid; + * + * struct read_format values; + * }; + */ + PERF_RECORD_READ = 8, + + /* + * struct { + * struct perf_event_header header; + * + * { u64 ip; } && PERF_SAMPLE_IP + * { u32 pid, tid; } && PERF_SAMPLE_TID + * { u64 time; } && PERF_SAMPLE_TIME + * { u64 addr; } && PERF_SAMPLE_ADDR + * { u64 id; } && PERF_SAMPLE_ID + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID + * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u64 period; } && PERF_SAMPLE_PERIOD + * + * { struct read_format values; } && PERF_SAMPLE_READ + * + * { u64 nr, + * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN + * + * # + * # The RAW record below is opaque data wrt the ABI + * # + * # That is, the ABI doesn't make any promises wrt to + * # the stability of its content, it may vary depending + * # on event, hardware, kernel version and phase of + * # the moon. + * # + * # In other words, PERF_SAMPLE_RAW contents are not an ABI. + * # + * + * { u32 size; + * char data[size];}&& PERF_SAMPLE_RAW + * }; + */ + PERF_RECORD_SAMPLE = 9, + + PERF_RECORD_MAX, /* non-ABI */ +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, + + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, + + PERF_CONTEXT_MAX = (__u64)-4095, +}; + +#define PERF_FLAG_FD_NO_GROUP (1U << 0) +#define PERF_FLAG_FD_OUTPUT (1U << 1) + +#ifdef __KERNEL__ +/* + * Kernel-internal data types and definitions: + */ + +#ifdef CONFIG_PERF_EVENTS +# include <asm/perf_event.h> +#endif + +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/spinlock.h> +#include <linux/hrtimer.h> +#include <linux/fs.h> +#include <linux/pid_namespace.h> +#include <linux/workqueue.h> +#include <asm/atomic.h> + +#define PERF_MAX_STACK_DEPTH 255 + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[PERF_MAX_STACK_DEPTH]; +}; + +struct perf_raw_record { + u32 size; + void *data; +}; + +struct task_struct; + +/** + * struct hw_perf_event - performance event hardware details: + */ +struct hw_perf_event { +#ifdef CONFIG_PERF_EVENTS + union { + struct { /* hardware */ + u64 config; + unsigned long config_base; + unsigned long event_base; + int idx; + }; + struct { /* software */ + s64 remaining; + struct hrtimer hrtimer; + }; + }; + atomic64_t prev_count; + u64 sample_period; + u64 last_period; + atomic64_t period_left; + u64 interrupts; + + u64 freq_count; + u64 freq_interrupts; + u64 freq_stamp; +#endif +}; + +struct perf_event; + +/** + * struct pmu - generic performance monitoring unit + */ +struct pmu { + int (*enable) (struct perf_event *event); + void (*disable) (struct perf_event *event); + void (*read) (struct perf_event *event); + void (*unthrottle) (struct perf_event *event); +}; + +/** + * enum perf_event_active_state - the states of a event + */ +enum perf_event_active_state { + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +struct file; + +struct perf_mmap_data { + struct rcu_head rcu_head; +#ifdef CONFIG_PERF_USE_VMALLOC + struct work_struct work; +#endif + int data_order; + int nr_pages; /* nr of data pages */ + int writable; /* are we writable */ + int nr_locked; /* nr pages mlocked */ + + atomic_t poll; /* POLL_ for wakeups */ + atomic_t events; /* event_id limit */ + + atomic_long_t head; /* write position */ + atomic_long_t done_head; /* completed head */ + + atomic_t lock; /* concurrent writes */ + atomic_t wakeup; /* needs a wakeup */ + atomic_t lost; /* nr records lost */ + + long watermark; /* wakeup watermark */ + + struct perf_event_mmap_page *user_page; + void *data_pages[0]; +}; + +struct perf_pending_entry { + struct perf_pending_entry *next; + void (*func)(struct perf_pending_entry *); +}; + +/** + * struct perf_event - performance event kernel representation: + */ +struct perf_event { +#ifdef CONFIG_PERF_EVENTS + struct list_head group_entry; + struct list_head event_entry; + struct list_head sibling_list; + int nr_siblings; + struct perf_event *group_leader; + struct perf_event *output; + const struct pmu *pmu; + + enum perf_event_active_state state; + atomic64_t count; + + /* + * These are the total time in nanoseconds that the event + * has been enabled (i.e. eligible to run, and the task has + * been scheduled in, if this is a per-task event) + * and running (scheduled onto the CPU), respectively. + * + * They are computed from tstamp_enabled, tstamp_running and + * tstamp_stopped when the event is in INACTIVE or ACTIVE state. + */ + u64 total_time_enabled; + u64 total_time_running; + + /* + * These are timestamps used for computing total_time_enabled + * and total_time_running when the event is in INACTIVE or + * ACTIVE state, measured in nanoseconds from an arbitrary point + * in time. + * tstamp_enabled: the notional time when the event was enabled + * tstamp_running: the notional time when the event was scheduled on + * tstamp_stopped: in INACTIVE state, the notional time when the + * event was scheduled off. + */ + u64 tstamp_enabled; + u64 tstamp_running; + u64 tstamp_stopped; + + struct perf_event_attr attr; + struct hw_perf_event hw; + + struct perf_event_context *ctx; + struct file *filp; + + /* + * These accumulate total time (in nanoseconds) that children + * events have been enabled and running, respectively. + */ + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + + /* + * Protect attach/detach and child_list: + */ + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + + int oncpu; + int cpu; + + struct list_head owner_entry; + struct task_struct *owner; + + /* mmap bits */ + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_mmap_data *data; + + /* poll related */ + wait_queue_head_t waitq; + struct fasync_struct *fasync; + + /* delayed work for NMIs and such */ + int pending_wakeup; + int pending_kill; + int pending_disable; + struct perf_pending_entry pending; + + atomic_t event_limit; + + void (*destroy)(struct perf_event *); + struct rcu_head rcu_head; + + struct pid_namespace *ns; + u64 id; +#endif +}; + +/** + * struct perf_event_context - event context structure + * + * Used as a container for task events and CPU events as well: + */ +struct perf_event_context { + /* + * Protect the states of the events in the list, + * nr_active, and the list: + */ + spinlock_t lock; + /* + * Protect the list of events. Locking either mutex or lock + * is sufficient to ensure the list doesn't change; to change + * the list you need to lock both the mutex and the spinlock. + */ + struct mutex mutex; + + struct list_head group_list; + struct list_head event_list; + int nr_events; + int nr_active; + int is_active; + int nr_stat; + atomic_t refcount; + struct task_struct *task; + + /* + * Context clock, runs when context enabled. + */ + u64 time; + u64 timestamp; + + /* + * These fields let us detect when two contexts have both + * been cloned (inherited) from a common ancestor. + */ + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + struct rcu_head rcu_head; +}; + +/** + * struct perf_event_cpu_context - per cpu event context structure + */ +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int active_oncpu; + int max_pertask; + int exclusive; + + /* + * Recursion avoidance: + * + * task, softirq, irq, nmi context + */ + int recursion[4]; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_mmap_data *data; + unsigned long head; + unsigned long offset; + int nmi; + int sample; + int locked; + unsigned long flags; +}; + +#ifdef CONFIG_PERF_EVENTS + +/* + * Set by architecture code: + */ +extern int perf_max_events; + +extern const struct pmu *hw_perf_event_init(struct perf_event *event); + +extern void perf_event_task_sched_in(struct task_struct *task, int cpu); +extern void perf_event_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu); +extern void perf_event_task_tick(struct task_struct *task, int cpu); +extern int perf_event_init_task(struct task_struct *child); +extern void perf_event_exit_task(struct task_struct *child); +extern void perf_event_free_task(struct task_struct *task); +extern void set_perf_event_pending(void); +extern void perf_event_do_pending(void); +extern void perf_event_print_debug(void); +extern void __perf_disable(void); +extern bool __perf_enable(void); +extern void perf_disable(void); +extern void perf_enable(void); +extern int perf_event_task_disable(void); +extern int perf_event_task_enable(void); +extern int hw_perf_group_sched_in(struct perf_event *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx, int cpu); +extern void perf_event_update_userpage(struct perf_event *event); + +struct perf_sample_data { + u64 type; + + u64 ip; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 addr; + u64 id; + u64 stream_id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + u64 period; + struct perf_callchain_entry *callchain; + struct perf_raw_record *raw; +}; + +extern void perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event); +extern void perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event, + struct pt_regs *regs); + +extern int perf_event_overflow(struct perf_event *event, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs); + +/* + * Return 1 for a software event, 0 for a hardware event + */ +static inline int is_software_event(struct perf_event *event) +{ + return (event->attr.type != PERF_TYPE_RAW) && + (event->attr.type != PERF_TYPE_HARDWARE) && + (event->attr.type != PERF_TYPE_HW_CACHE); +} + +extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; + +extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); + +static inline void +perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) +{ + if (atomic_read(&perf_swevent_enabled[event_id])) + __perf_sw_event(event_id, nr, nmi, regs, addr); +} + +extern void __perf_event_mmap(struct vm_area_struct *vma); + +static inline void perf_event_mmap(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_EXEC) + __perf_event_mmap(vma); +} + +extern void perf_event_comm(struct task_struct *tsk); +extern void perf_event_fork(struct task_struct *tsk); + +extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); + +extern int sysctl_perf_event_paranoid; +extern int sysctl_perf_event_mlock; +extern int sysctl_perf_event_sample_rate; + +extern void perf_event_init(void); +extern void perf_tp_event(int event_id, u64 addr, u64 count, + void *record, int entry_size); + +#ifndef perf_misc_flags +#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ + PERF_RECORD_MISC_KERNEL) +#define perf_instruction_pointer(regs) instruction_pointer(regs) +#endif + +extern int perf_output_begin(struct perf_output_handle *handle, + struct perf_event *event, unsigned int size, + int nmi, int sample); +extern void perf_output_end(struct perf_output_handle *handle); +extern void perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len); +#else +static inline void +perf_event_task_sched_in(struct task_struct *task, int cpu) { } +static inline void +perf_event_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu) { } +static inline void +perf_event_task_tick(struct task_struct *task, int cpu) { } +static inline int perf_event_init_task(struct task_struct *child) { return 0; } +static inline void perf_event_exit_task(struct task_struct *child) { } +static inline void perf_event_free_task(struct task_struct *task) { } +static inline void perf_event_do_pending(void) { } +static inline void perf_event_print_debug(void) { } +static inline void perf_disable(void) { } +static inline void perf_enable(void) { } +static inline int perf_event_task_disable(void) { return -EINVAL; } +static inline int perf_event_task_enable(void) { return -EINVAL; } + +static inline void +perf_sw_event(u32 event_id, u64 nr, int nmi, + struct pt_regs *regs, u64 addr) { } + +static inline void perf_event_mmap(struct vm_area_struct *vma) { } +static inline void perf_event_comm(struct task_struct *tsk) { } +static inline void perf_event_fork(struct task_struct *tsk) { } +static inline void perf_event_init(void) { } + +#endif + +#define perf_output_put(handle, x) \ + perf_output_copy((handle), &(x), sizeof(x)) + +#endif /* __KERNEL__ */ +#endif /* _LINUX_PERF_EVENT_H */ diff --git a/include/linux/phonet.h b/include/linux/phonet.h index 1ef5a0781831..e5126cff9b2a 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h @@ -38,6 +38,7 @@ #define PNPIPE_IFINDEX 2 #define PNADDR_ANY 0 +#define PNADDR_BROADCAST 0xFC #define PNPORT_RESOURCE_ROUTING 0 /* Values for PNPIPE_ENCAP option */ diff --git a/include/linux/pnp.h b/include/linux/pnp.h index b063c7328ba5..fddfafaed024 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -360,6 +360,7 @@ struct pnp_driver { unsigned int flags; int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id); void (*remove) (struct pnp_dev *dev); + void (*shutdown) (struct pnp_dev *dev); int (*suspend) (struct pnp_dev *dev, pm_message_t state); int (*resume) (struct pnp_dev *dev); struct device_driver driver; diff --git a/include/linux/poison.h b/include/linux/poison.h index 6729f7dcd60e..7fc194aef8c2 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -65,6 +65,9 @@ #define MUTEX_DEBUG_INIT 0x11 #define MUTEX_DEBUG_FREE 0x22 +/********** lib/flex_array.c **********/ +#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ + /********** security/ **********/ #define KEY_DESTROY 0xbd diff --git a/include/linux/poll.h b/include/linux/poll.h index fa287f25138d..6673743946f7 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -6,10 +6,10 @@ #ifdef __KERNEL__ #include <linux/compiler.h> +#include <linux/ktime.h> #include <linux/wait.h> #include <linux/string.h> #include <linux/fs.h> -#include <linux/sched.h> #include <asm/uaccess.h> /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 065a3652a3ea..67608161df6b 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -147,6 +147,20 @@ static inline void forget_cached_acl(struct inode *inode, int type) if (old != ACL_NOT_CACHED) posix_acl_release(old); } + +static inline void forget_all_cached_acls(struct inode *inode) +{ + struct posix_acl *old_access, *old_default; + spin_lock(&inode->i_lock); + old_access = inode->i_acl; + old_default = inode->i_default_acl; + inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; + spin_unlock(&inode->i_lock); + if (old_access != ACL_NOT_CACHED) + posix_acl_release(old_access); + if (old_default != ACL_NOT_CACHED) + posix_acl_release(old_default); +} #endif static inline void cache_no_acl(struct inode *inode) diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 594c494ac3f0..b5d096d3a9be 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -39,6 +39,13 @@ enum { }; enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE, + POWER_SUPPLY_CHARGE_TYPE_FAST, +}; + +enum { POWER_SUPPLY_HEALTH_UNKNOWN = 0, POWER_SUPPLY_HEALTH_GOOD, POWER_SUPPLY_HEALTH_OVERHEAT, @@ -58,9 +65,19 @@ enum { POWER_SUPPLY_TECHNOLOGY_LiMn, }; +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL, + POWER_SUPPLY_CAPACITY_LEVEL_LOW, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH, + POWER_SUPPLY_CAPACITY_LEVEL_FULL, +}; + enum power_supply_property { /* Properties of type `int' */ POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, @@ -89,6 +106,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_ENERGY_NOW, POWER_SUPPLY_PROP_ENERGY_AVG, POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_AMBIENT, POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, @@ -126,6 +144,7 @@ struct power_supply { enum power_supply_property psp, union power_supply_propval *val); void (*external_power_changed)(struct power_supply *psy); + void (*set_charged)(struct power_supply *psy); /* For APM emulation, think legacy userspace. */ int use_for_apm; @@ -165,8 +184,10 @@ struct power_supply_info { int use_for_apm; }; +extern struct power_supply *power_supply_get_by_name(char *name); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); +extern int power_supply_set_battery_charged(struct power_supply *psy); #if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE) extern int power_supply_is_system_supplied(void); diff --git a/include/linux/prctl.h b/include/linux/prctl.h index b00df4c79c63..a3baeb2c2161 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -85,7 +85,21 @@ #define PR_SET_TIMERSLACK 29 #define PR_GET_TIMERSLACK 30 -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 +#define PR_TASK_PERF_EVENTS_DISABLE 31 +#define PR_TASK_PERF_EVENTS_ENABLE 32 + +/* + * Set early/late kill mode for hwpoison memory corruption. + * This influences when the process gets killed on a memory corruption. + */ +#define PR_MCE_KILL 33 +# define PR_MCE_KILL_CLEAR 0 +# define PR_MCE_KILL_SET 1 + +# define PR_MCE_KILL_LATE 0 +# define PR_MCE_KILL_EARLY 1 +# define PR_MCE_KILL_DEFAULT 2 + +#define PR_MCE_KILL_GET 34 #endif /* _LINUX_PRCTL_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index e6e77d31c418..379eaed72d4b 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -78,10 +78,19 @@ struct proc_dir_entry { struct list_head pde_openers; /* who did ->open, but not ->release */ }; +enum kcore_type { + KCORE_TEXT, + KCORE_VMALLOC, + KCORE_RAM, + KCORE_VMEMMAP, + KCORE_OTHER, +}; + struct kcore_list { - struct kcore_list *next; + struct list_head list; unsigned long addr; size_t size; + int type; }; struct vmcore { @@ -233,11 +242,12 @@ static inline void dup_mm_exe_file(struct mm_struct *oldmm, #endif /* CONFIG_PROC_FS */ #if !defined(CONFIG_PROC_KCORE) -static inline void kclist_add(struct kcore_list *new, void *addr, size_t size) +static inline void +kclist_add(struct kcore_list *new, void *addr, size_t size, int type) { } #else -extern void kclist_add(struct kcore_list *, void *, size_t); +extern void kclist_add(struct kcore_list *, void *, size_t, int type); #endif union proc_op { diff --git a/include/linux/quota.h b/include/linux/quota.h index 78c48895b12a..ce9a9b2e5cd4 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -376,6 +376,17 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type) return flags >> _DQUOT_STATE_FLAGS; } +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE +extern void quota_send_warning(short type, unsigned int id, dev_t dev, + const char warntype); +#else +static inline void quota_send_warning(short type, unsigned int id, dev_t dev, + const char warntype) +{ + return; +} +#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */ + struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ struct mutex dqio_mutex; /* lock device while I/O in progress */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 26361c4c037a..3ebb23153640 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -135,8 +135,8 @@ static inline int sb_any_quota_active(struct super_block *sb) /* * Operations supported for diskquotas. */ -extern struct dquot_operations dquot_operations; -extern struct quotactl_ops vfs_quotactl_ops; +extern const struct dquot_operations dquot_operations; +extern const struct quotactl_ops vfs_quotactl_ops; #define sb_dquot_ops (&dquot_operations) #define sb_quotactl_ops (&vfs_quotactl_ops) diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 37aaf2b39863..4e768dda87b0 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -17,7 +17,7 @@ extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); #endif extern const struct file_operations ramfs_file_operations; -extern struct vm_operations_struct generic_file_vm_ops; +extern const struct vm_operations_struct generic_file_vm_ops; extern int __init init_rootfs(void); #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6fe0363724e9..3ebd0b7bcb08 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -77,7 +77,7 @@ extern int rcu_scheduler_active; #error "Unknown RCU implementation specified to kernel configuration" #endif -#define RCU_HEAD_INIT { .next = NULL, .func = NULL } +#define RCU_HEAD_INIT { .next = NULL, .func = NULL } #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT #define INIT_RCU_HEAD(ptr) do { \ (ptr)->next = NULL; (ptr)->func = NULL; \ @@ -129,12 +129,6 @@ static inline void rcu_read_lock(void) rcu_read_acquire(); } -/** - * rcu_read_unlock - marks the end of an RCU read-side critical section. - * - * See rcu_read_lock() for more information. - */ - /* * So where is rcu_write_lock()? It does not exist, as there is no * way for writers to lock out RCU readers. This is a feature, not @@ -144,6 +138,12 @@ static inline void rcu_read_lock(void) * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ + +/** + * rcu_read_unlock - marks the end of an RCU read-side critical section. + * + * See rcu_read_lock() for more information. + */ static inline void rcu_read_unlock(void) { rcu_read_release(); @@ -196,6 +196,8 @@ static inline void rcu_read_lock_sched(void) __acquire(RCU_SCHED); rcu_read_acquire(); } + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_lock_sched_notrace(void) { preempt_disable_notrace(); @@ -213,6 +215,8 @@ static inline void rcu_read_unlock_sched(void) __release(RCU_SCHED); preempt_enable(); } + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_unlock_sched_notrace(void) { __release(RCU_SCHED); diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 37682770e9d2..9642c6bcb399 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,10 +30,14 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H +struct notifier_block; + extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); - +extern int rcu_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu); extern int rcu_needs_cpu(int cpu); +extern int rcu_expedited_torture_stats(char *page); #ifdef CONFIG_TREE_PREEMPT_RCU @@ -72,11 +76,7 @@ static inline void __rcu_read_unlock_bh(void) extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); - -static inline void synchronize_rcu_expedited(void) -{ - synchronize_sched_expedited(); -} +extern void synchronize_rcu_expedited(void); static inline void synchronize_rcu_bh_expedited(void) { @@ -85,16 +85,11 @@ static inline void synchronize_rcu_bh_expedited(void) extern void __rcu_init(void); extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); extern long rcu_batches_completed_sched(void); -static inline void rcu_init_sched(void) -{ -} - #ifdef CONFIG_NO_HZ void rcu_enter_nohz(void); void rcu_exit_nohz(void); @@ -107,7 +102,7 @@ static inline void rcu_exit_nohz(void) } #endif /* CONFIG_NO_HZ */ -/* A context switch is a grace period for rcutree. */ +/* A context switch is a grace period for RCU-sched and RCU-bh. */ static inline int rcu_blocking_is_gp(void) { return num_online_cpus() == 1; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 277f4b964df5..490c5b37b6d7 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -125,6 +125,8 @@ struct regulator_bulk_data { /* regulator get and put */ struct regulator *__must_check regulator_get(struct device *dev, const char *id); +struct regulator *__must_check regulator_get_exclusive(struct device *dev, + const char *id); void regulator_put(struct regulator *regulator); /* regulator output control and status */ @@ -144,6 +146,8 @@ void regulator_bulk_free(int num_consumers, int regulator_count_voltages(struct regulator *regulator); int regulator_list_voltage(struct regulator *regulator, unsigned selector); +int regulator_is_supported_voltage(struct regulator *regulator, + int min_uV, int max_uV); int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); int regulator_get_voltage(struct regulator *regulator); int regulator_set_current_limit(struct regulator *regulator, diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index ce1be708ca16..31f2055eae28 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -37,7 +37,8 @@ enum regulator_status { * * @enable: Configure the regulator as enabled. * @disable: Configure the regulator as disabled. - * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. + * @is_enabled: Return 1 if the regulator is enabled, 0 if not. + * May also return negative errno. * * @set_voltage: Set the voltage for the regulator within the range specified. * The driver should select the voltage closest to min_uV. @@ -162,6 +163,8 @@ struct regulator_desc { struct regulator_dev { struct regulator_desc *desc; int use_count; + int open_count; + int exclusive; /* lists we belong to */ struct list_head list; /* list of all regulators */ diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 91b4da31f1b5..e94a4a1c7c8a 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h @@ -5,6 +5,9 @@ * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * + * Copyright (c) 2009 Nokia Corporation + * Roger Quadros <ext-roger.quadros@nokia.com> + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the @@ -16,9 +19,30 @@ struct regulator_init_data; +/** + * struct fixed_voltage_config - fixed_voltage_config structure + * @supply_name: Name of the regulator supply + * @microvolts: Output voltage of regulator + * @gpio: GPIO to use for enable control + * set to -EINVAL if not used + * @enable_high: Polarity of enable GPIO + * 1 = Active high, 0 = Active low + * @enabled_at_boot: Whether regulator has been enabled at + * boot or not. 1 = Yes, 0 = No + * This is used to keep the regulator at + * the default state + * @init_data: regulator_init_data + * + * This structure contains fixed voltage regulator configuration + * information that must be passed by platform code to the fixed + * voltage regulator driver. + */ struct fixed_voltage_config { const char *supply_name; int microvolts; + int gpio; + unsigned enable_high:1; + unsigned enabled_at_boot:1; struct regulator_init_data *init_data; }; diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index bac64fa390f2..87f5f176d4ef 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -41,7 +41,7 @@ struct regulator; #define REGULATOR_CHANGE_DRMS 0x10 /** - * struct regulator_state - regulator state during low power syatem states + * struct regulator_state - regulator state during low power system states * * This describes a regulators state during a system wide low power state. * @@ -117,25 +117,37 @@ struct regulation_constraints { /* mode to set on startup */ unsigned int initial_mode; - /* constriant flags */ + /* constraint flags */ unsigned always_on:1; /* regulator never off when system is on */ unsigned boot_on:1; /* bootloader/firmware enabled regulator */ - unsigned apply_uV:1; /* apply uV constraint iff min == max */ + unsigned apply_uV:1; /* apply uV constraint if min == max */ }; /** * struct regulator_consumer_supply - supply -> device mapping * - * This maps a supply name to a device. + * This maps a supply name to a device. Only one of dev or dev_name + * can be specified. Use of dev_name allows support for buses which + * make struct device available late such as I2C and is the preferred + * form. * * @dev: Device structure for the consumer. + * @dev_name: Result of dev_name() for the consumer. * @supply: Name for the supply. */ struct regulator_consumer_supply { struct device *dev; /* consumer */ + const char *dev_name; /* dev_name() for consumer */ const char *supply; /* consumer supply - e.g. "vcc" */ }; +/* Initialize struct regulator_consumer_supply */ +#define REGULATOR_SUPPLY(_name, _dev_name) \ +{ \ + .supply = _name, \ + .dev_name = _dev_name, \ +} + /** * struct regulator_init_data - regulator platform initialisation data. * @@ -166,6 +178,12 @@ struct regulator_init_data { int regulator_suspend_prepare(suspend_state_t state); +#ifdef CONFIG_REGULATOR void regulator_has_full_constraints(void); +#else +static inline void regulator_has_full_constraints(void) +{ +} +#endif #endif diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h index 44563192bf16..de9a7fae20be 100644 --- a/include/linux/regulator/max1586.h +++ b/include/linux/regulator/max1586.h @@ -36,7 +36,7 @@ * max1586_subdev_data - regulator data * @id: regulator Id (either MAX1586_V3 or MAX1586_V6) * @name: regulator cute name (example for V3: "vcc_core") - * @platform_data: regulator init data (contraints, supplies, ...) + * @platform_data: regulator init data (constraints, supplies, ...) */ struct max1586_subdev_data { int id; @@ -46,7 +46,7 @@ struct max1586_subdev_data { /** * max1586_platform_data - platform data for max1586 - * @num_subdevs: number of regultors used (may be 1 or 2) + * @num_subdevs: number of regulators used (may be 1 or 2) * @subdevs: regulator used * At most, there will be a regulator for V3 and one for V6 voltages. * @v3_gain: gain on the V3 voltage output multiplied by 1e6. diff --git a/include/linux/relay.h b/include/linux/relay.h index 953fc055e875..14a86bc7102b 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -140,7 +140,7 @@ struct rchan_callbacks * cause relay_open() to create a single global buffer rather * than the default set of per-cpu buffers. * - * See Documentation/filesystems/relayfs.txt for more info. + * See Documentation/filesystems/relay.txt for more info. */ struct dentry *(*create_buf_file)(const char *filename, struct dentry *parent, diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 511f42fc6816..fcb9884df618 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -35,6 +35,10 @@ struct res_counter { */ unsigned long long limit; /* + * the limit that usage can be exceed + */ + unsigned long long soft_limit; + /* * the number of unsuccessful attempts to consume the resource */ unsigned long long failcnt; @@ -87,6 +91,7 @@ enum { RES_MAX_USAGE, RES_LIMIT, RES_FAILCNT, + RES_SOFT_LIMIT, }; /* @@ -132,6 +137,36 @@ static inline bool res_counter_limit_check_locked(struct res_counter *cnt) return false; } +static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt) +{ + if (cnt->usage < cnt->soft_limit) + return true; + + return false; +} + +/** + * Get the difference between the usage and the soft limit + * @cnt: The counter + * + * Returns 0 if usage is less than or equal to soft limit + * The difference between usage and soft limit, otherwise. + */ +static inline unsigned long long +res_counter_soft_limit_excess(struct res_counter *cnt) +{ + unsigned long long excess; + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + if (cnt->usage <= cnt->soft_limit) + excess = 0; + else + excess = cnt->usage - cnt->soft_limit; + spin_unlock_irqrestore(&cnt->lock, flags); + return excess; +} + /* * Helper function to detect if the cgroup is within it's limit or * not. It's currently called from cgroup_rss_prepare() @@ -147,6 +182,17 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt) return ret; } +static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt) +{ + bool ret; + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + ret = res_counter_soft_limit_check_locked(cnt); + spin_unlock_irqrestore(&cnt->lock, flags); + return ret; +} + static inline void res_counter_reset_max(struct res_counter *cnt) { unsigned long flags; @@ -180,4 +226,16 @@ static inline int res_counter_set_limit(struct res_counter *cnt, return ret; } +static inline int +res_counter_set_soft_limit(struct res_counter *cnt, + unsigned long long soft_limit) +{ + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + cnt->soft_limit = soft_limit; + spin_unlock_irqrestore(&cnt->lock, flags); + return 0; +} + #endif diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bf116d0dbf23..cb0ba7032609 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -71,21 +71,29 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon void page_add_file_rmap(struct page *); void page_remove_rmap(struct page *); -#ifdef CONFIG_DEBUG_VM -void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); -#else -static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) +static inline void page_dup_rmap(struct page *page) { atomic_inc(&page->_mapcount); } -#endif /* * Called from mm/vmscan.c to handle paging out */ int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt, unsigned long *vm_flags); -int try_to_unmap(struct page *, int ignore_refs); +enum ttu_flags { + TTU_UNMAP = 0, /* unmap mode */ + TTU_MIGRATION = 1, /* migration mode */ + TTU_MUNLOCK = 2, /* munlock mode */ + TTU_ACTION_MASK = 0xff, + + TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ + TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ + TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ +}; +#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) + +int try_to_unmap(struct page *, enum ttu_flags flags); /* * Called from mm/filemap_xip.c to unmap empty zero page @@ -112,6 +120,13 @@ int page_mkclean(struct page *); */ int try_to_munlock(struct page *); +/* + * Called by memory-failure.c to kill processes. + */ +struct anon_vma *page_lock_anon_vma(struct page *page); +void page_unlock_anon_vma(struct anon_vma *anon_vma); +int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); + #else /* !CONFIG_MMU */ #define anon_vma_init() do {} while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h index 115af05ecabd..49be8f7c05f6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -100,7 +100,7 @@ struct robust_list_head; struct bio; struct fs_struct; struct bts_context; -struct perf_counter_context; +struct perf_event_context; /* * List of flags we want to share for kernel threads, @@ -140,6 +140,10 @@ extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_iowait(void); +extern unsigned long nr_iowait_cpu(void); +extern unsigned long this_cpu_load(void); + + extern void calc_global_load(void); extern u64 cpu_nr_migrations(int cpu); @@ -305,7 +309,7 @@ extern void softlockup_tick(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, + void __user *buffer, size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; extern int softlockup_thresh; @@ -327,7 +331,7 @@ extern unsigned long sysctl_hung_task_check_count; extern unsigned long sysctl_hung_task_timeout_secs; extern unsigned long sysctl_hung_task_warnings; extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, + void __user *buffer, size_t *lenp, loff_t *ppos); #endif @@ -422,6 +426,15 @@ static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) return max(mm->hiwater_rss, get_mm_rss(mm)); } +static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, + struct mm_struct *mm) +{ + unsigned long hiwater_rss = get_mm_hiwater_rss(mm); + + if (*maxrss < hiwater_rss) + *maxrss = hiwater_rss; +} + static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) { return max(mm->hiwater_vm, mm->total_vm); @@ -434,7 +447,9 @@ extern int get_dumpable(struct mm_struct *mm); /* dumpable bits */ #define MMF_DUMPABLE 0 /* core dump is permitted */ #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ + #define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) /* coredump filter bits */ #define MMF_DUMP_ANON_PRIVATE 2 @@ -444,6 +459,7 @@ extern int get_dumpable(struct mm_struct *mm); #define MMF_DUMP_ELF_HEADERS 6 #define MMF_DUMP_HUGETLB_PRIVATE 7 #define MMF_DUMP_HUGETLB_SHARED 8 + #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS #define MMF_DUMP_FILTER_BITS 7 #define MMF_DUMP_FILTER_MASK \ @@ -457,6 +473,10 @@ extern int get_dumpable(struct mm_struct *mm); #else # define MMF_DUMP_MASK_DEFAULT_ELF 0 #endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) struct sighand_struct { atomic_t count; @@ -473,6 +493,13 @@ struct pacct_struct { unsigned long ac_minflt, ac_majflt; }; +struct cpu_itimer { + cputime_t expires; + cputime_t incr; + u32 error; + u32 incr_error; +}; + /** * struct task_cputime - collected CPU time counts * @utime: time spent in user mode, in &cputime_t units @@ -567,9 +594,12 @@ struct signal_struct { struct pid *leader_pid; ktime_t it_real_incr; - /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ - cputime_t it_prof_expires, it_virt_expires; - cputime_t it_prof_incr, it_virt_incr; + /* + * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use + * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these + * values are defined to 0 and 1 respectively + */ + struct cpu_itimer it[2]; /* * Thread group totals for process CPU timers. @@ -601,6 +631,7 @@ struct signal_struct { unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; struct task_io_accounting ioac; /* @@ -632,6 +663,8 @@ struct signal_struct { unsigned audit_tty; struct tty_audit_buf *tty_audit_buf; #endif + + int oom_adj; /* OOM kill score adjustment (bit shift) */ }; /* Context switch must be unlocked if interrupts are to be enabled */ @@ -701,7 +734,7 @@ struct user_struct { #endif #endif -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS atomic_long_t locked_vm; #endif }; @@ -1214,7 +1247,6 @@ struct task_struct { * a short time */ unsigned char fpu_counter; - s8 oomkilladj; /* OOM kill score adjustment (bit shift). */ #ifdef CONFIG_BLK_DEV_IO_TRACE unsigned int btrace_seq; #endif @@ -1239,7 +1271,6 @@ struct task_struct { struct mm_struct *mm, *active_mm; /* task state */ - struct linux_binfmt *binfmt; int exit_state; int exit_code, exit_signal; int pdeath_signal; /* The signal sent when the parent dies */ @@ -1390,17 +1421,17 @@ struct task_struct { #endif #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; - int hardirqs_enabled; unsigned long hardirq_enable_ip; - unsigned int hardirq_enable_event; unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; - int softirqs_enabled; + int hardirqs_enabled; + int hardirq_context; unsigned long softirq_disable_ip; - unsigned int softirq_disable_event; unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; unsigned int softirq_enable_event; - int hardirq_context; + int softirqs_enabled; int softirq_context; #endif #ifdef CONFIG_LOCKDEP @@ -1451,10 +1482,10 @@ struct task_struct { struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; #endif -#ifdef CONFIG_PERF_COUNTERS - struct perf_counter_context *perf_counter_ctxp; - struct mutex perf_counter_mutex; - struct list_head perf_counter_list; +#ifdef CONFIG_PERF_EVENTS + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; #endif #ifdef CONFIG_NUMA struct mempolicy *mempolicy; /* Protected by alloc_lock */ @@ -1507,6 +1538,7 @@ struct task_struct { /* bitmask of trace recursion */ unsigned long trace_recursion; #endif /* CONFIG_TRACING */ + unsigned long stack_start; }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ @@ -1702,6 +1734,7 @@ extern cputime_t task_gtime(struct task_struct *p); #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ +#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ #define PF_DUMPCORE 0x00000200 /* dumped core */ #define PF_SIGNALED 0x00000400 /* killed by a signal */ @@ -1713,7 +1746,7 @@ extern cputime_t task_gtime(struct task_struct *p); #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ #define PF_KSWAPD 0x00040000 /* I am kswapd */ -#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ +#define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ @@ -1721,6 +1754,7 @@ extern cputime_t task_gtime(struct task_struct *p); #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ +#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ @@ -1784,10 +1818,13 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, return 0; } #endif + +#ifndef CONFIG_CPUMASK_OFFSTACK static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { return set_cpus_allowed_ptr(p, &new_mask); } +#endif /* * Architectures can set this to 1 if they have specified @@ -1870,7 +1907,7 @@ extern unsigned int sysctl_sched_time_avg; extern unsigned int sysctl_timer_migration; int sched_nr_latency_handler(struct ctl_table *table, int write, - struct file *file, void __user *buffer, size_t *length, + void __user *buffer, size_t *length, loff_t *ppos); #endif #ifdef CONFIG_SCHED_DEBUG @@ -1888,7 +1925,7 @@ extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; int sched_rt_handler(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, size_t *lenp, + void __user *buffer, size_t *lenp, loff_t *ppos); extern unsigned int sysctl_sched_compat_yield; @@ -2023,6 +2060,7 @@ extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern int kill_proc_info(int, struct siginfo *, pid_t); extern int do_notify_parent(struct task_struct *, int); +extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); @@ -2300,7 +2338,10 @@ static inline int signal_pending(struct task_struct *p) return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); } -extern int __fatal_signal_pending(struct task_struct *p); +static inline int __fatal_signal_pending(struct task_struct *p) +{ + return unlikely(sigismember(&p->pending.signal, SIGKILL)); +} static inline int fatal_signal_pending(struct task_struct *p) { diff --git a/include/linux/securebits.h b/include/linux/securebits.h index d2c5ed845bcc..33406174cbe8 100644 --- a/include/linux/securebits.h +++ b/include/linux/securebits.h @@ -1,6 +1,15 @@ #ifndef _LINUX_SECUREBITS_H #define _LINUX_SECUREBITS_H 1 +/* Each securesetting is implemented using two bits. One bit specifies + whether the setting is on or off. The other bit specify whether the + setting is locked or not. A setting which is locked cannot be + changed from user-level. */ +#define issecure_mask(X) (1 << (X)) +#ifdef __KERNEL__ +#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits)) +#endif + #define SECUREBITS_DEFAULT 0x00000000 /* When set UID 0 has no special privileges. When unset, we support @@ -12,6 +21,9 @@ #define SECURE_NOROOT 0 #define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ +#define SECBIT_NOROOT (issecure_mask(SECURE_NOROOT)) +#define SECBIT_NOROOT_LOCKED (issecure_mask(SECURE_NOROOT_LOCKED)) + /* When set, setuid to/from uid 0 does not trigger capability-"fixup". When unset, to provide compatiblility with old programs relying on set*uid to gain/lose privilege, transitions to/from uid 0 cause @@ -19,6 +31,10 @@ #define SECURE_NO_SETUID_FIXUP 2 #define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ +#define SECBIT_NO_SETUID_FIXUP (issecure_mask(SECURE_NO_SETUID_FIXUP)) +#define SECBIT_NO_SETUID_FIXUP_LOCKED \ + (issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)) + /* When set, a process can retain its capabilities even after transitioning to a non-root user (the set-uid fixup suppressed by bit 2). Bit-4 is cleared when a process calls exec(); setting both @@ -27,12 +43,8 @@ #define SECURE_KEEP_CAPS 4 #define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ -/* Each securesetting is implemented using two bits. One bit specifies - whether the setting is on or off. The other bit specify whether the - setting is locked or not. A setting which is locked cannot be - changed from user-level. */ -#define issecure_mask(X) (1 << (X)) -#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits)) +#define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS)) +#define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED)) #define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ issecure_mask(SECURE_NO_SETUID_FIXUP) | \ diff --git a/include/linux/security.h b/include/linux/security.h index d050b66ab9ef..466cbadbd1ef 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -133,7 +133,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint) return PAGE_ALIGN(mmap_min_addr); return hint; } -extern int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp, +extern int mmap_min_addr_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #ifdef CONFIG_SECURITY @@ -447,6 +447,22 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @new_dir contains the path structure for parent of the new link. * @new_dentry contains the dentry structure of the new link. * Return 0 if permission is granted. + * @path_chmod: + * Check for permission to change DAC's permission of a file or directory. + * @dentry contains the dentry structure. + * @mnt contains the vfsmnt structure. + * @mode contains DAC's mode. + * Return 0 if permission is granted. + * @path_chown: + * Check for permission to change owner/group of a file or directory. + * @path contains the path structure. + * @uid contains new owner's ID. + * @gid contains new group's ID. + * Return 0 if permission is granted. + * @path_chroot: + * Check for permission to change root directory. + * @path contains the path structure. + * Return 0 if permission is granted. * @inode_readlink: * Check the permission to read the symbolic link. * @dentry contains the dentry structure for the file link. @@ -690,6 +706,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @kernel_module_request: * Ability to trigger the kernel to automatically upcall to userspace for * userspace to load a kernel module with the given name. + * @kmod_name name of the module requested by the kernel * Return 0 if successful. * @task_setuid: * Check permission before setting one or more of the user identity @@ -1488,6 +1505,10 @@ struct security_operations { struct dentry *new_dentry); int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry); + int (*path_chmod) (struct dentry *dentry, struct vfsmount *mnt, + mode_t mode); + int (*path_chown) (struct path *path, uid_t uid, gid_t gid); + int (*path_chroot) (struct path *path); #endif int (*inode_alloc_security) (struct inode *inode); @@ -1557,7 +1578,7 @@ struct security_operations { void (*cred_transfer)(struct cred *new, const struct cred *old); int (*kernel_act_as)(struct cred *new, u32 secid); int (*kernel_create_files_as)(struct cred *new, struct inode *inode); - int (*kernel_module_request)(void); + int (*kernel_module_request)(char *kmod_name); int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); int (*task_fix_setuid) (struct cred *new, const struct cred *old, int flags); @@ -1822,7 +1843,7 @@ void security_commit_creds(struct cred *new, const struct cred *old); void security_transfer_creds(struct cred *new, const struct cred *old); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); -int security_kernel_module_request(void); +int security_kernel_module_request(char *kmod_name); int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); @@ -2387,7 +2408,7 @@ static inline int security_kernel_create_files_as(struct cred *cred, return 0; } -static inline int security_kernel_module_request(void) +static inline int security_kernel_module_request(char *kmod_name) { return 0; } @@ -2952,6 +2973,10 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry); int security_path_rename(struct path *old_dir, struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry); +int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt, + mode_t mode); +int security_path_chown(struct path *path, uid_t uid, gid_t gid); +int security_path_chroot(struct path *path); #else /* CONFIG_SECURITY_PATH */ static inline int security_path_unlink(struct path *dir, struct dentry *dentry) { @@ -3001,6 +3026,23 @@ static inline int security_path_rename(struct path *old_dir, { return 0; } + +static inline int security_path_chmod(struct dentry *dentry, + struct vfsmount *mnt, + mode_t mode) +{ + return 0; +} + +static inline int security_path_chown(struct path *path, uid_t uid, gid_t gid) +{ + return 0; +} + +static inline int security_path_chroot(struct path *path) +{ + return 0; +} #endif /* CONFIG_SECURITY_PATH */ #ifdef CONFIG_KEYS diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 0c6a86b79596..8366d8f12e53 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -35,6 +35,44 @@ struct seq_operations { #define SEQ_SKIP 1 +/** + * seq_get_buf - get buffer to write arbitrary data to + * @m: the seq_file handle + * @bufp: the beginning of the buffer is stored here + * + * Return the number of bytes available in the buffer, or zero if + * there's no space. + */ +static inline size_t seq_get_buf(struct seq_file *m, char **bufp) +{ + BUG_ON(m->count > m->size); + if (m->count < m->size) + *bufp = m->buf + m->count; + else + *bufp = NULL; + + return m->size - m->count; +} + +/** + * seq_commit - commit data to the buffer + * @m: the seq_file handle + * @num: the number of bytes to commit + * + * Commit @num bytes of data written to a buffer previously acquired + * by seq_buf_get. To signal an error condition, or that the data + * didn't fit in the available space, pass a negative @num value. + */ +static inline void seq_commit(struct seq_file *m, int num) +{ + if (num < 0) { + m->count = m->size; + } else { + BUG_ON(m->count + num > m->size); + m->count += num; + } +} + char *mangle_path(char *s, char *p, char *esc); int seq_open(struct file *, const struct seq_operations *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index d58e460844dd..db532ce288be 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -176,6 +176,9 @@ /* Qualcomm MSM SoCs */ #define PORT_MSM 88 +/* BCM63xx family SoCs */ +#define PORT_BCM63XX 89 + #ifdef __KERNEL__ #include <linux/compiler.h> @@ -477,7 +480,7 @@ static inline int uart_handle_break(struct uart_port *port) /** * uart_handle_dcd_change - handle a change of carrier detect state - * @port: uart_port structure for the open port + * @uport: uart_port structure for the open port * @status: new carrier detect status, nonzero if active */ static inline void @@ -503,7 +506,7 @@ uart_handle_dcd_change(struct uart_port *uport, unsigned int status) /** * uart_handle_cts_change - handle a change of clear-to-send state - * @port: uart_port structure for the open port + * @uport: uart_port structure for the open port * @status: new clear to send status, nonzero if active */ static inline void diff --git a/include/linux/sfi.h b/include/linux/sfi.h new file mode 100644 index 000000000000..9a6f7607174e --- /dev/null +++ b/include/linux/sfi.h @@ -0,0 +1,206 @@ +/* sfi.h Simple Firmware Interface */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _LINUX_SFI_H +#define _LINUX_SFI_H + +/* Table signatures reserved by the SFI specification */ +#define SFI_SIG_SYST "SYST" +#define SFI_SIG_FREQ "FREQ" +#define SFI_SIG_IDLE "IDLE" +#define SFI_SIG_CPUS "CPUS" +#define SFI_SIG_MTMR "MTMR" +#define SFI_SIG_MRTC "MRTC" +#define SFI_SIG_MMAP "MMAP" +#define SFI_SIG_APIC "APIC" +#define SFI_SIG_XSDT "XSDT" +#define SFI_SIG_WAKE "WAKE" +#define SFI_SIG_SPIB "SPIB" +#define SFI_SIG_I2CB "I2CB" +#define SFI_SIG_GPEM "GPEM" + +#define SFI_SIGNATURE_SIZE 4 +#define SFI_OEM_ID_SIZE 6 +#define SFI_OEM_TABLE_ID_SIZE 8 + +#define SFI_SYST_SEARCH_BEGIN 0x000E0000 +#define SFI_SYST_SEARCH_END 0x000FFFFF + +#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \ + ((ptable->header.len - sizeof(struct sfi_table_header)) / \ + (sizeof(entry_type))) +/* + * Table structures must be byte-packed to match the SFI specification, + * as they are provided by the BIOS. + */ +struct sfi_table_header { + char sig[SFI_SIGNATURE_SIZE]; + u32 len; + u8 rev; + u8 csum; + char oem_id[SFI_OEM_ID_SIZE]; + char oem_table_id[SFI_OEM_TABLE_ID_SIZE]; +} __packed; + +struct sfi_table_simple { + struct sfi_table_header header; + u64 pentry[1]; +} __packed; + +/* Comply with UEFI spec 2.1 */ +struct sfi_mem_entry { + u32 type; + u64 phys_start; + u64 virt_start; + u64 pages; + u64 attrib; +} __packed; + +struct sfi_cpu_table_entry { + u32 apic_id; +} __packed; + +struct sfi_cstate_table_entry { + u32 hint; /* MWAIT hint */ + u32 latency; /* latency in ms */ +} __packed; + +struct sfi_apic_table_entry { + u64 phys_addr; /* phy base addr for APIC reg */ +} __packed; + +struct sfi_freq_table_entry { + u32 freq_mhz; /* in MHZ */ + u32 latency; /* transition latency in ms */ + u32 ctrl_val; /* value to write to PERF_CTL */ +} __packed; + +struct sfi_wake_table_entry { + u64 phys_addr; /* pointer to where the wake vector locates */ +} __packed; + +struct sfi_timer_table_entry { + u64 phys_addr; /* phy base addr for the timer */ + u32 freq_hz; /* in HZ */ + u32 irq; +} __packed; + +struct sfi_rtc_table_entry { + u64 phys_addr; /* phy base addr for the RTC */ + u32 irq; +} __packed; + +struct sfi_spi_table_entry { + u16 host_num; /* attached to host 0, 1...*/ + u16 cs; /* chip select */ + u16 irq_info; + char name[16]; + u8 dev_info[10]; +} __packed; + +struct sfi_i2c_table_entry { + u16 host_num; + u16 addr; /* slave addr */ + u16 irq_info; + char name[16]; + u8 dev_info[10]; +} __packed; + +struct sfi_gpe_table_entry { + u16 logical_id; /* logical id */ + u16 phys_id; /* physical GPE id */ +} __packed; + + +typedef int (*sfi_table_handler) (struct sfi_table_header *table); + +#ifdef CONFIG_SFI +extern void __init sfi_init(void); +extern int __init sfi_platform_init(void); +extern void __init sfi_init_late(void); +extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id, + sfi_table_handler handler); + +extern int sfi_disabled; +static inline void disable_sfi(void) +{ + sfi_disabled = 1; +} + +#else /* !CONFIG_SFI */ + +static inline void sfi_init(void) +{ +} + +static inline void sfi_init_late(void) +{ +} + +#define sfi_disabled 0 + +static inline int sfi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + sfi_table_handler handler) +{ + return -1; +} + +#endif /* !CONFIG_SFI */ + +#endif /*_LINUX_SFI_H*/ diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h new file mode 100644 index 000000000000..c4a5a8cd4469 --- /dev/null +++ b/include/linux/sfi_acpi.h @@ -0,0 +1,93 @@ +/* sfi.h Simple Firmware Interface */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _LINUX_SFI_ACPI_H +#define _LINUX_SFI_ACPI_H + +#ifdef CONFIG_SFI +#include <acpi/acpi.h> /* struct acpi_table_header */ + +extern int sfi_acpi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + int (*handler)(struct acpi_table_header *)); + +static inline int acpi_sfi_table_parse(char *signature, + int (*handler)(struct acpi_table_header *)) +{ + if (!acpi_table_parse(signature, handler)) + return 0; + + return sfi_acpi_table_parse(signature, NULL, NULL, handler); +} +#else /* !CONFIG_SFI */ + +static inline int sfi_acpi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + int (*handler)(struct acpi_table_header *)) +{ + return -1; +} + +static inline int acpi_sfi_table_parse(char *signature, + int (*handler)(struct acpi_table_header *)) +{ + return acpi_table_parse(signature, handler); +} +#endif /* !CONFIG_SFI */ + +#endif /*_LINUX_SFI_ACPI_H*/ diff --git a/include/linux/signal.h b/include/linux/signal.h index c7552836bd95..ab9272cc270c 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -233,6 +233,8 @@ static inline int valid_signal(unsigned long sig) } extern int next_signal(struct sigpending *pending, sigset_t *mask); +extern int do_send_sig_info(int sig, struct siginfo *info, + struct task_struct *p, bool group); extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index df7b23ac66e6..bcdd6606f468 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -354,8 +354,8 @@ struct sk_buff { ipvs_property:1, peeked:1, nf_trace:1; + __be16 protocol:16; kmemcheck_bitfield_end(flags1); - __be16 protocol; void (*destructor)(struct sk_buff *skb); #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -367,7 +367,6 @@ struct sk_buff { #endif int iif; - __u16 queue_mapping; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ #ifdef CONFIG_NET_CLS_ACT @@ -376,6 +375,7 @@ struct sk_buff { #endif kmemcheck_bitfield_begin(flags2); + __u16 queue_mapping:16; #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif @@ -1757,6 +1757,8 @@ extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, int to_offset, int size); extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); +extern void skb_free_datagram_locked(struct sock *sk, + struct sk_buff *skb); extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); extern __wsum skb_checksum(const struct sk_buff *skb, int offset, diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index b65c8881f07a..13337bf6c3f5 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -17,13 +17,20 @@ #ifdef CONFIG_SLOW_WORK #include <linux/sysctl.h> +#include <linux/timer.h> struct slow_work; +#ifdef CONFIG_SLOW_WORK_DEBUG +struct seq_file; +#endif /* * The operations used to support slow work items */ struct slow_work_ops { + /* owner */ + struct module *owner; + /* get a ref on a work item * - return 0 if successful, -ve if not */ @@ -34,6 +41,11 @@ struct slow_work_ops { /* execute a work item */ void (*execute)(struct slow_work *work); + +#ifdef CONFIG_SLOW_WORK_DEBUG + /* describe a work item for debugfs */ + void (*desc)(struct slow_work *work, struct seq_file *m); +#endif }; /* @@ -42,13 +54,24 @@ struct slow_work_ops { * queued */ struct slow_work { + struct module *owner; /* the owning module */ unsigned long flags; #define SLOW_WORK_PENDING 0 /* item pending (further) execution */ #define SLOW_WORK_EXECUTING 1 /* item currently executing */ #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ +#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ +#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ const struct slow_work_ops *ops; /* operations table for this item */ struct list_head link; /* link in queue */ +#ifdef CONFIG_SLOW_WORK_DEBUG + struct timespec mark; /* jiffies at which queued or exec begun */ +#endif +}; + +struct delayed_slow_work { + struct slow_work work; + struct timer_list timer; }; /** @@ -67,6 +90,20 @@ static inline void slow_work_init(struct slow_work *work, } /** + * slow_work_init - Initialise a delayed slow work item + * @work: The work item to initialise + * @ops: The operations to use to handle the slow work item + * + * Initialise a delayed slow work item. + */ +static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, + const struct slow_work_ops *ops) +{ + init_timer(&dwork->timer); + slow_work_init(&dwork->work, ops); +} + +/** * vslow_work_init - Initialise a very slow work item * @work: The work item to initialise * @ops: The operations to use to handle the slow work item @@ -83,9 +120,40 @@ static inline void vslow_work_init(struct slow_work *work, INIT_LIST_HEAD(&work->link); } +/** + * slow_work_is_queued - Determine if a slow work item is on the work queue + * work: The work item to test + * + * Determine if the specified slow-work item is on the work queue. This + * returns true if it is actually on the queue. + * + * If the item is executing and has been marked for requeue when execution + * finishes, then false will be returned. + * + * Anyone wishing to wait for completion of execution can wait on the + * SLOW_WORK_EXECUTING bit. + */ +static inline bool slow_work_is_queued(struct slow_work *work) +{ + unsigned long flags = work->flags; + return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING); +} + extern int slow_work_enqueue(struct slow_work *work); -extern int slow_work_register_user(void); -extern void slow_work_unregister_user(void); +extern void slow_work_cancel(struct slow_work *work); +extern int slow_work_register_user(struct module *owner); +extern void slow_work_unregister_user(struct module *owner); + +extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, + unsigned long delay); + +static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) +{ + slow_work_cancel(&dwork->work); +} + +extern bool slow_work_sleep_till_thread_needed(struct slow_work *work, + signed long *_timeout); #ifdef CONFIG_SYSCTL extern ctl_table slow_work_sysctls[]; diff --git a/include/linux/smp.h b/include/linux/smp.h index 9e3d8af09207..7a0570e6a596 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -73,18 +73,12 @@ int smp_call_function(void(*func)(void *info), void *info, int wait); void smp_call_function_many(const struct cpumask *mask, void (*func)(void *info), void *info, bool wait); -/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */ -static inline int -smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, - int wait) -{ - smp_call_function_many(&mask, func, info, wait); - return 0; -} - void __smp_call_function_single(int cpuid, struct call_single_data *data, int wait); +int smp_call_function_any(const struct cpumask *mask, + void (*func)(void *info), void *info, int wait); + /* * Generic and arch helpers */ @@ -144,13 +138,17 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) static inline void smp_send_reschedule(int cpu) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) -#define smp_call_function_mask(mask, func, info, wait) \ - (up_smp_call_function(func, info)) #define smp_call_function_many(mask, func, info, wait) \ (up_smp_call_function(func, info)) -static inline void init_call_single_data(void) +static inline void init_call_single_data(void) { } + +static inline int +smp_call_function_any(const struct cpumask *mask, void (*func)(void *info), + void *info, int wait) { + return smp_call_function_single(0, func, info, wait); } + #endif /* !SMP */ /* diff --git a/include/linux/socket.h b/include/linux/socket.h index 3b461dffe244..3273a0c5043b 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -16,7 +16,7 @@ struct __kernel_sockaddr_storage { /* _SS_MAXSIZE value minus size of ss_family */ } __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ -#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) +#ifdef __KERNEL__ #include <asm/socket.h> /* arch-dependent defines */ #include <linux/sockios.h> /* the SIOCxxx I/O controls */ @@ -101,21 +101,6 @@ struct cmsghdr { ((char *)(cmsg) - (char *)(mhdr)->msg_control))) /* - * This mess will go away with glibc - */ - -#ifdef __KERNEL__ -#define __KINLINE static inline -#elif defined(__GNUC__) -#define __KINLINE static __inline__ -#elif defined(__cplusplus) -#define __KINLINE static inline -#else -#define __KINLINE static -#endif - - -/* * Get the next cmsg header * * PLEASE, do not touch this function. If you think, that it is @@ -128,7 +113,7 @@ struct cmsghdr { * ancillary object DATA. --ANK (980731) */ -__KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, +static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, struct cmsghdr *__cmsg) { struct cmsghdr * __ptr; @@ -140,7 +125,7 @@ __KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, return __ptr; } -__KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) +static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) { return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); } diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h new file mode 100644 index 000000000000..555d254e6606 --- /dev/null +++ b/include/linux/spi/lms283gf05.h @@ -0,0 +1,28 @@ +/* + * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD + * + * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ +#define _INCLUDE_LINUX_SPI_LMS283GF05_H_ + +struct lms283gf05_pdata { + unsigned long reset_gpio; + bool reset_inverted; +}; + +#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */ diff --git a/include/linux/spi/mc33880.h b/include/linux/spi/mc33880.h new file mode 100644 index 000000000000..82ffccd6fbe5 --- /dev/null +++ b/include/linux/spi/mc33880.h @@ -0,0 +1,10 @@ +#ifndef LINUX_SPI_MC33880_H +#define LINUX_SPI_MC33880_H + +struct mc33880_platform_data { + /* number assigned to the first GPIO */ + unsigned base; +}; + +#endif + diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index c47c4b4da97e..97b60b37f445 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -20,6 +20,7 @@ #define __LINUX_SPI_H #include <linux/device.h> +#include <linux/mod_devicetable.h> /* * INTERFACES between SPI master-side drivers and SPI infrastructure. @@ -86,7 +87,7 @@ struct spi_device { int irq; void *controller_state; void *controller_data; - char modalias[32]; + char modalias[SPI_NAME_SIZE]; /* * likely need more hooks for more protocol options affecting how @@ -145,6 +146,7 @@ struct spi_message; /** * struct spi_driver - Host side "protocol" driver + * @id_table: List of SPI devices supported by this driver * @probe: Binds this driver to the spi device. Drivers can verify * that the device is actually present, and may need to configure * characteristics (such as bits_per_word) which weren't needed for @@ -170,6 +172,7 @@ struct spi_message; * MMC, RTC, filesystem character device nodes, and hardware monitoring. */ struct spi_driver { + const struct spi_device_id *id_table; int (*probe)(struct spi_device *spi); int (*remove)(struct spi_device *spi); void (*shutdown)(struct spi_device *spi); @@ -207,6 +210,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * each slave has a chipselect signal, but it's common that not * every chipselect is connected to a slave. * @dma_alignment: SPI controller constraint on DMA buffers alignment. + * @mode_bits: flags understood by this controller driver + * @flags: other constraints relevant to this driver * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This * must fail if an unrecognized or unsupported mode is requested. @@ -253,6 +258,8 @@ struct spi_master { /* other constraints relevant to this driver */ u16 flags; #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ +#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ +#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ /* Setup mode and clock, etc (spi driver may call many times). * @@ -533,42 +540,7 @@ static inline void spi_message_free(struct spi_message *m) } extern int spi_setup(struct spi_device *spi); - -/** - * spi_async - asynchronous SPI transfer - * @spi: device with which data will be exchanged - * @message: describes the data transfers, including completion callback - * Context: any (irqs may be blocked, etc) - * - * This call may be used in_irq and other contexts which can't sleep, - * as well as from task contexts which can sleep. - * - * The completion callback is invoked in a context which can't sleep. - * Before that invocation, the value of message->status is undefined. - * When the callback is issued, message->status holds either zero (to - * indicate complete success) or a negative error code. After that - * callback returns, the driver which issued the transfer request may - * deallocate the associated memory; it's no longer in use by any SPI - * core or controller driver code. - * - * Note that although all messages to a spi_device are handled in - * FIFO order, messages may go to different devices in other orders. - * Some device might be higher priority, or have various "hard" access - * time requirements, for example. - * - * On detection of any fault during the transfer, processing of - * the entire message is aborted, and the device is deselected. - * Until returning from the associated message completion callback, - * no other spi_message queued to that device will be processed. - * (This rule applies equally to all the synchronous transfer calls, - * which are wrappers around this core asynchronous primitive.) - */ -static inline int -spi_async(struct spi_device *spi, struct spi_message *message) -{ - message->spi = spi; - return spi->master->transfer(spi, message); -} +extern int spi_async(struct spi_device *spi, struct spi_message *message); /*---------------------------------------------------------------------------*/ @@ -732,7 +704,7 @@ struct spi_board_info { * controller_data goes to spi_device.controller_data, * irq is copied too */ - char modalias[32]; + char modalias[SPI_NAME_SIZE]; const void *platform_data; void *controller_data; int irq; @@ -800,4 +772,7 @@ spi_unregister_device(struct spi_device *spi) device_unregister(&spi->dev); } +extern const struct spi_device_id * +spi_get_device_id(const struct spi_device *sdev); + #endif /* __LINUX_SPI_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index f0ca7a7a1757..71dccfeb0d88 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -79,8 +79,6 @@ */ #include <linux/spinlock_types.h> -extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); - /* * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): */ @@ -102,7 +100,7 @@ do { \ #else # define spin_lock_init(lock) \ - do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) + do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) #endif #ifdef CONFIG_DEBUG_SPINLOCK @@ -116,7 +114,7 @@ do { \ } while (0) #else # define rwlock_init(lock) \ - do { *(lock) = RW_LOCK_UNLOCKED; } while (0) + do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) #endif #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 7a7e18fc2415..8264a7f459bc 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -60,137 +60,118 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(lock); -/* - * We inline the unlock functions in the nondebug case: - */ -#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT) -#define __always_inline__spin_unlock -#define __always_inline__read_unlock -#define __always_inline__write_unlock -#define __always_inline__spin_unlock_irq -#define __always_inline__read_unlock_irq -#define __always_inline__write_unlock_irq -#endif - -#ifndef CONFIG_DEBUG_SPINLOCK -#ifndef CONFIG_GENERIC_LOCKBREAK - -#ifdef __always_inline__spin_lock +#ifdef CONFIG_INLINE_SPIN_LOCK #define _spin_lock(lock) __spin_lock(lock) #endif -#ifdef __always_inline__read_lock +#ifdef CONFIG_INLINE_READ_LOCK #define _read_lock(lock) __read_lock(lock) #endif -#ifdef __always_inline__write_lock +#ifdef CONFIG_INLINE_WRITE_LOCK #define _write_lock(lock) __write_lock(lock) #endif -#ifdef __always_inline__spin_lock_bh +#ifdef CONFIG_INLINE_SPIN_LOCK_BH #define _spin_lock_bh(lock) __spin_lock_bh(lock) #endif -#ifdef __always_inline__read_lock_bh +#ifdef CONFIG_INLINE_READ_LOCK_BH #define _read_lock_bh(lock) __read_lock_bh(lock) #endif -#ifdef __always_inline__write_lock_bh +#ifdef CONFIG_INLINE_WRITE_LOCK_BH #define _write_lock_bh(lock) __write_lock_bh(lock) #endif -#ifdef __always_inline__spin_lock_irq +#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ #define _spin_lock_irq(lock) __spin_lock_irq(lock) #endif -#ifdef __always_inline__read_lock_irq +#ifdef CONFIG_INLINE_READ_LOCK_IRQ #define _read_lock_irq(lock) __read_lock_irq(lock) #endif -#ifdef __always_inline__write_lock_irq +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ #define _write_lock_irq(lock) __write_lock_irq(lock) #endif -#ifdef __always_inline__spin_lock_irqsave +#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) #endif -#ifdef __always_inline__read_lock_irqsave +#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) #endif -#ifdef __always_inline__write_lock_irqsave +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) #endif -#endif /* !CONFIG_GENERIC_LOCKBREAK */ - -#ifdef __always_inline__spin_trylock +#ifdef CONFIG_INLINE_SPIN_TRYLOCK #define _spin_trylock(lock) __spin_trylock(lock) #endif -#ifdef __always_inline__read_trylock +#ifdef CONFIG_INLINE_READ_TRYLOCK #define _read_trylock(lock) __read_trylock(lock) #endif -#ifdef __always_inline__write_trylock +#ifdef CONFIG_INLINE_WRITE_TRYLOCK #define _write_trylock(lock) __write_trylock(lock) #endif -#ifdef __always_inline__spin_trylock_bh +#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) #endif -#ifdef __always_inline__spin_unlock +#ifdef CONFIG_INLINE_SPIN_UNLOCK #define _spin_unlock(lock) __spin_unlock(lock) #endif -#ifdef __always_inline__read_unlock +#ifdef CONFIG_INLINE_READ_UNLOCK #define _read_unlock(lock) __read_unlock(lock) #endif -#ifdef __always_inline__write_unlock +#ifdef CONFIG_INLINE_WRITE_UNLOCK #define _write_unlock(lock) __write_unlock(lock) #endif -#ifdef __always_inline__spin_unlock_bh +#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) #endif -#ifdef __always_inline__read_unlock_bh +#ifdef CONFIG_INLINE_READ_UNLOCK_BH #define _read_unlock_bh(lock) __read_unlock_bh(lock) #endif -#ifdef __always_inline__write_unlock_bh +#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH #define _write_unlock_bh(lock) __write_unlock_bh(lock) #endif -#ifdef __always_inline__spin_unlock_irq +#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) #endif -#ifdef __always_inline__read_unlock_irq +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ #define _read_unlock_irq(lock) __read_unlock_irq(lock) #endif -#ifdef __always_inline__write_unlock_irq +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ #define _write_unlock_irq(lock) __write_unlock_irq(lock) #endif -#ifdef __always_inline__spin_unlock_irqrestore +#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) #endif -#ifdef __always_inline__read_unlock_irqrestore +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) #endif -#ifdef __always_inline__write_unlock_irqrestore +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) #endif -#endif /* CONFIG_DEBUG_SPINLOCK */ - static inline int __spin_trylock(spinlock_t *lock) { preempt_disable(); diff --git a/include/linux/string.h b/include/linux/string.h index 489019ef1694..b8508868d5ad 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -62,7 +62,7 @@ extern char * strnchr(const char *, size_t, int); #ifndef __HAVE_ARCH_STRRCHR extern char * strrchr(const char *,int); #endif -extern char * strstrip(char *); +extern char * __must_check strstrip(char *); #ifndef __HAVE_ARCH_STRSTR extern char * strstr(const char *,const char *); #endif diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 3f632182d8eb..996df4dac7d4 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -111,7 +111,7 @@ struct rpc_credops { void (*crdestroy)(struct rpc_cred *); int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); - void (*crbind)(struct rpc_task *, struct rpc_cred *); + void (*crbind)(struct rpc_task *, struct rpc_cred *, int); __be32 * (*crmarshal)(struct rpc_task *, __be32 *); int (*crrefresh)(struct rpc_task *); __be32 * (*crvalidate)(struct rpc_task *, __be32 *); @@ -140,7 +140,7 @@ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred * void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); void rpcauth_bindcred(struct rpc_task *, struct rpc_cred *, int); -void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *); +void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); void put_rpccred(struct rpc_cred *); void rpcauth_unbindcred(struct rpc_task *); __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index ab3f6e90caa5..8ed9642a5a76 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -22,6 +22,7 @@ #include <linux/sunrpc/timer.h> #include <asm/signal.h> #include <linux/path.h> +#include <net/ipv6.h> struct rpc_inode; @@ -113,6 +114,7 @@ struct rpc_create_args { rpc_authflavor_t authflavor; unsigned long flags; char *client_name; + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ }; /* Values for "flags" field */ @@ -188,5 +190,117 @@ static inline void rpc_set_port(struct sockaddr *sap, #define IPV6_SCOPE_DELIMITER '%' #define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") +static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; + const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; + + return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; +} + +static inline bool __rpc_copy_addr4(struct sockaddr *dst, + const struct sockaddr *src) +{ + const struct sockaddr_in *ssin = (struct sockaddr_in *) src; + struct sockaddr_in *dsin = (struct sockaddr_in *) dst; + + dsin->sin_family = ssin->sin_family; + dsin->sin_addr.s_addr = ssin->sin_addr.s_addr; + return true; +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; + const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; + return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); +} + +static inline bool __rpc_copy_addr6(struct sockaddr *dst, + const struct sockaddr *src) +{ + const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src; + struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; + + dsin6->sin6_family = ssin6->sin6_family; + ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr); + return true; +} +#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ +static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + return false; +} + +static inline bool __rpc_copy_addr6(struct sockaddr *dst, + const struct sockaddr *src) +{ + return false; +} +#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ + +/** + * rpc_cmp_addr - compare the address portion of two sockaddrs. + * @sap1: first sockaddr + * @sap2: second sockaddr + * + * Just compares the family and address portion. Ignores port, scope, etc. + * Returns true if the addrs are equal, false if they aren't. + */ +static inline bool rpc_cmp_addr(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + if (sap1->sa_family == sap2->sa_family) { + switch (sap1->sa_family) { + case AF_INET: + return __rpc_cmp_addr4(sap1, sap2); + case AF_INET6: + return __rpc_cmp_addr6(sap1, sap2); + } + } + return false; +} + +/** + * rpc_copy_addr - copy the address portion of one sockaddr to another + * @dst: destination sockaddr + * @src: source sockaddr + * + * Just copies the address portion and family. Ignores port, scope, etc. + * Caller is responsible for making certain that dst is large enough to hold + * the address in src. Returns true if address family is supported. Returns + * false otherwise. + */ +static inline bool rpc_copy_addr(struct sockaddr *dst, + const struct sockaddr *src) +{ + switch (src->sa_family) { + case AF_INET: + return __rpc_copy_addr4(dst, src); + case AF_INET6: + return __rpc_copy_addr6(dst, src); + } + return false; +} + +/** + * rpc_get_scope_id - return scopeid for a given sockaddr + * @sa: sockaddr to get scopeid from + * + * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if + * not an AF_INET6 address. + */ +static inline u32 rpc_get_scope_id(const struct sockaddr *sa) +{ + if (sa->sa_family != AF_INET6) + return 0; + + return ((struct sockaddr_in6 *) sa)->sin6_scope_id; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index ea8009695c69..52e8cb0a7569 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -94,8 +94,6 @@ struct svc_serv { struct module * sv_module; /* optional module to count when * adding threads */ svc_thread_fn sv_function; /* main function for threads */ - unsigned int sv_drc_max_pages; /* Total pages for DRC */ - unsigned int sv_drc_pages_used;/* DRC pages used */ #if defined(CONFIG_NFS_V4_1) struct list_head sv_cb_list; /* queue for callback requests * that arrive over the same diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 2223ae0b5ed5..5f4e18b3ce73 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -65,6 +65,7 @@ struct svc_xprt { size_t xpt_locallen; /* length of address */ struct sockaddr_storage xpt_remote; /* remote peer's address */ size_t xpt_remotelen; /* length of address */ + struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ }; int svc_reg_xprt_class(struct svc_xprt_class *); diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 04dba23c59f2..1b353a76c304 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -28,6 +28,7 @@ struct svc_sock { /* private TCP part */ u32 sk_reclen; /* length of record */ u32 sk_tcplen; /* current read length */ + struct rpc_xprt *sk_bc_xprt; /* NFSv4.1 backchannel xprt */ }; /* diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 7da466ba4b0d..f5cc0898bc53 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -11,6 +11,7 @@ #include <linux/uio.h> #include <asm/byteorder.h> +#include <asm/unaligned.h> #include <linux/scatterlist.h> /* @@ -117,14 +118,14 @@ static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int le static inline __be32 * xdr_encode_hyper(__be32 *p, __u64 val) { - *(__be64 *)p = cpu_to_be64(val); + put_unaligned_be64(val, p); return p + 2; } static inline __be32 * xdr_decode_hyper(__be32 *p, __u64 *valp) { - *valp = be64_to_cpup((__be64 *)p); + *valp = get_unaligned_be64(p); return p + 2; } diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index c090df442572..6f9457a75b8f 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -124,6 +124,23 @@ struct rpc_xprt_ops { void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); }; +/* + * RPC transport identifiers + * + * To preserve compatibility with the historical use of raw IP protocol + * id's for transport selection, UDP and TCP identifiers are specified + * with the previous values. No such restriction exists for new transports, + * except that they may not collide with these values (17 and 6, + * respectively). + */ +#define XPRT_TRANSPORT_BC (1 << 31) +enum xprt_transports { + XPRT_TRANSPORT_UDP = IPPROTO_UDP, + XPRT_TRANSPORT_TCP = IPPROTO_TCP, + XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, + XPRT_TRANSPORT_RDMA = 256 +}; + struct rpc_xprt { struct kref kref; /* Reference count */ struct rpc_xprt_ops * ops; /* transport methods */ @@ -179,6 +196,7 @@ struct rpc_xprt { spinlock_t reserve_lock; /* lock slot table */ u32 xid; /* Next XID value to use */ struct rpc_task * snd_task; /* Task blocked in send */ + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ #if defined(CONFIG_NFS_V4_1) struct svc_serv *bc_serv; /* The RPC service which will */ /* process the callback */ @@ -231,6 +249,7 @@ struct xprt_create { struct sockaddr * srcaddr; /* optional local address */ struct sockaddr * dstaddr; /* remote peer address */ size_t addrlen; + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ }; struct xprt_class { diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 54a379c9e8eb..c2f04e1ae159 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h @@ -41,11 +41,6 @@ #define _LINUX_SUNRPC_XPRTRDMA_H /* - * RPC transport identifier for RDMA - */ -#define XPRT_TRANSPORT_RDMA 256 - -/* * rpcbind (v3+) RDMA netid. */ #define RPCBIND_NETID_RDMA "rdma" diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index c2a46c45c8f7..3f14a02e9cc0 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -13,17 +13,6 @@ int init_socket_xprt(void); void cleanup_socket_xprt(void); /* - * RPC transport identifiers for UDP, TCP - * - * To preserve compatibility with the historical use of raw IP protocol - * id's for transport selection, these are specified with the previous - * values. No such restriction exists for new transports, except that - * they may not collide with these values (17 and 6, respectively). - */ -#define XPRT_TRANSPORT_UDP IPPROTO_UDP -#define XPRT_TRANSPORT_TCP IPPROTO_TCP - -/* * RPC slot table sizes for UDP, TCP transports */ extern unsigned int xprt_udp_slot_table_entries; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index cd15df6c63cd..5e781d824e6d 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -301,6 +301,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) #define pm_notifier(fn, pri) do { (void)(fn); } while (0) #endif /* !CONFIG_PM_SLEEP */ +extern struct mutex pm_mutex; + #ifndef CONFIG_HIBERNATION static inline void register_nosave_region(unsigned long b, unsigned long e) { @@ -308,8 +310,23 @@ static inline void register_nosave_region(unsigned long b, unsigned long e) static inline void register_nosave_region_late(unsigned long b, unsigned long e) { } -#endif -extern struct mutex pm_mutex; +static inline void lock_system_sleep(void) {} +static inline void unlock_system_sleep(void) {} + +#else + +/* Let some subsystems like memory hotadd exclude hibernation */ + +static inline void lock_system_sleep(void) +{ + mutex_lock(&pm_mutex); +} + +static inline void unlock_system_sleep(void) +{ + mutex_unlock(&pm_mutex); +} +#endif #endif /* _LINUX_SUSPEND_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 7c15334f3ff2..4ec90019c1a4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -34,15 +34,37 @@ static inline int current_is_kswapd(void) * the type/offset into the pte as 5/27 as well. */ #define MAX_SWAPFILES_SHIFT 5 -#ifndef CONFIG_MIGRATION -#define MAX_SWAPFILES (1 << MAX_SWAPFILES_SHIFT) + +/* + * Use some of the swap files numbers for other purposes. This + * is a convenient way to hook into the VM to trigger special + * actions on faults. + */ + +/* + * NUMA node memory migration support + */ +#ifdef CONFIG_MIGRATION +#define SWP_MIGRATION_NUM 2 +#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) +#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) +#else +#define SWP_MIGRATION_NUM 0 +#endif + +/* + * Handling of hardware poisoned pages with memory corruption. + */ +#ifdef CONFIG_MEMORY_FAILURE +#define SWP_HWPOISON_NUM 1 +#define SWP_HWPOISON MAX_SWAPFILES #else -/* Use last two entries for page migration swap entries */ -#define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)-2) -#define SWP_MIGRATION_READ MAX_SWAPFILES -#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + 1) +#define SWP_HWPOISON_NUM 0 #endif +#define MAX_SWAPFILES \ + ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) + /* * Magic header for a swap area. The first part of the union is * what the swap magic looks like for the old (limited to 128MB) @@ -217,6 +239,11 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, unsigned int swappiness); +extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, + gfp_t gfp_mask, bool noswap, + unsigned int swappiness, + struct zone *zone, + int nid); extern int __isolate_lru_page(struct page *page, int mode, int file); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; @@ -240,7 +267,7 @@ extern int page_evictable(struct page *page, struct vm_area_struct *vma); extern void scan_mapping_unevictable_pages(struct address_space *); extern unsigned long scan_unevictable_pages; -extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, +extern int scan_unevictable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int scan_unevictable_register_node(struct node *node); extern void scan_unevictable_unregister_node(struct node *node); @@ -419,10 +446,22 @@ static inline swp_entry_t get_swap_page(void) } /* linux/mm/thrash.c */ -#define put_swap_token(mm) do { } while (0) -#define grab_swap_token(mm) do { } while (0) -#define has_swap_token(mm) 0 -#define disable_swap_token() do { } while (0) +static inline void put_swap_token(struct mm_struct *mm) +{ +} + +static inline void grab_swap_token(struct mm_struct *mm) +{ +} + +static inline int has_swap_token(struct mm_struct *mm) +{ + return 0; +} + +static inline void disable_swap_token(void) +{ +} static inline void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 6ec39ab27b4b..cd42e30b7c6e 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -131,3 +131,41 @@ static inline int is_write_migration_entry(swp_entry_t entry) #endif +#ifdef CONFIG_MEMORY_FAILURE +/* + * Support for hardware poisoned pages + */ +static inline swp_entry_t make_hwpoison_entry(struct page *page) +{ + BUG_ON(!PageLocked(page)); + return swp_entry(SWP_HWPOISON, page_to_pfn(page)); +} + +static inline int is_hwpoison_entry(swp_entry_t entry) +{ + return swp_type(entry) == SWP_HWPOISON; +} +#else + +static inline swp_entry_t make_hwpoison_entry(struct page *page) +{ + return swp_entry(0, 0); +} + +static inline int is_hwpoison_entry(swp_entry_t swp) +{ + return 0; +} +#endif + +#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) +static inline int non_swap_entry(swp_entry_t entry) +{ + return swp_type(entry) >= MAX_SWAPFILES; +} +#else +static inline int non_swap_entry(swp_entry_t entry) +{ + return 0; +} +#endif diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 73b1f1cec423..febedcf67c7e 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -7,6 +7,8 @@ struct device; struct dma_attrs; struct scatterlist; +extern int swiotlb_force; + /* * Maximum allowable number of contiguous slabs to map, * must be a power of 2. What is the appropriate value ? @@ -20,8 +22,7 @@ struct scatterlist; */ #define IO_TLB_SHIFT 11 -extern void -swiotlb_init(void); +extern void swiotlb_init(int verbose); extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, @@ -88,4 +89,11 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); +#ifdef CONFIG_SWIOTLB +extern void __init swiotlb_free(void); +#else +static inline void swiotlb_free(void) { } +#endif + +extern void swiotlb_print_info(void); #endif /* __LINUX_SWIOTLB_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7d9803cbb20f..a990ace1a838 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -55,7 +55,7 @@ struct compat_timeval; struct robust_list_head; struct getcpu_cache; struct old_linux_dirent; -struct perf_counter_attr; +struct perf_event_attr; #include <linux/types.h> #include <linux/aio_abi.h> @@ -460,8 +460,7 @@ asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name, void __user *data); asmlinkage long sys_umount(char __user *name, int flags); asmlinkage long sys_oldumount(char __user *name); -asmlinkage long sys_truncate(const char __user *path, - unsigned long length); +asmlinkage long sys_truncate(const char __user *path, long length); asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length); asmlinkage long sys_stat(char __user *filename, struct __old_kernel_stat __user *statbuf); @@ -877,7 +876,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, int kernel_execve(const char *filename, char *const argv[], char *const envp[]); -asmlinkage long sys_perf_counter_open( - struct perf_counter_attr __user *attr_uptr, +asmlinkage long sys_perf_event_open( + struct perf_event_attr __user *attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags); #endif diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index e76d3b22a466..1e4743ee6831 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -29,7 +29,6 @@ #include <linux/types.h> #include <linux/compiler.h> -struct file; struct completion; #define CTL_MAXNAME 10 /* how many path components do we allow in a @@ -977,25 +976,25 @@ typedef int ctl_handler (struct ctl_table *table, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen); -typedef int proc_handler (struct ctl_table *ctl, int write, struct file * filp, +typedef int proc_handler (struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -extern int proc_dostring(struct ctl_table *, int, struct file *, +extern int proc_dostring(struct ctl_table *, int, void __user *, size_t *, loff_t *); -extern int proc_dointvec(struct ctl_table *, int, struct file *, +extern int proc_dointvec(struct ctl_table *, int, void __user *, size_t *, loff_t *); -extern int proc_dointvec_minmax(struct ctl_table *, int, struct file *, +extern int proc_dointvec_minmax(struct ctl_table *, int, void __user *, size_t *, loff_t *); -extern int proc_dointvec_jiffies(struct ctl_table *, int, struct file *, +extern int proc_dointvec_jiffies(struct ctl_table *, int, void __user *, size_t *, loff_t *); -extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, struct file *, +extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void __user *, size_t *, loff_t *); -extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, struct file *, +extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, void __user *, size_t *, loff_t *); -extern int proc_doulongvec_minmax(struct ctl_table *, int, struct file *, +extern int proc_doulongvec_minmax(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, - struct file *, void __user *, size_t *, loff_t *); + void __user *, size_t *, loff_t *); extern int do_sysctl (int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, diff --git a/include/linux/time.h b/include/linux/time.h index 56787c093345..fe04e5ef6a59 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -155,6 +155,34 @@ extern void timekeeping_leap_insert(int leapsecond); struct tms; extern void do_sys_times(struct tms *); +/* + * Similar to the struct tm in userspace <time.h>, but it needs to be here so + * that the kernel source is self contained. + */ +struct tm { + /* + * the number of seconds after the minute, normally in the range + * 0 to 59, but can be up to 60 to allow for leap seconds + */ + int tm_sec; + /* the number of minutes after the hour, in the range 0 to 59*/ + int tm_min; + /* the number of hours past midnight, in the range 0 to 23 */ + int tm_hour; + /* the day of the month, in the range 1 to 31 */ + int tm_mday; + /* the number of months since January, in the range 0 to 11 */ + int tm_mon; + /* the number of years since 1900 */ + long tm_year; + /* the number of days since Sunday, in the range 0 to 6 */ + int tm_wday; + /* the number of days since January 1, in the range 0 to 365 */ + int tm_yday; +}; + +void time_to_tm(time_t totalsecs, int offset, struct tm *result); + /** * timespec_to_ns - Convert timespec to nanoseconds * @ts: pointer to the timespec variable to be converted diff --git a/include/linux/topology.h b/include/linux/topology.h index 809b26c07090..57e63579bfdd 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -129,7 +129,7 @@ int arch_update_cpu_topology(void); | 1*SD_BALANCE_FORK \ | 0*SD_BALANCE_WAKE \ | 1*SD_WAKE_AFFINE \ - | 1*SD_PREFER_LOCAL \ + | 0*SD_PREFER_LOCAL \ | 0*SD_SHARE_CPUPOWER \ | 1*SD_SHARE_PKG_RESOURCES \ | 0*SD_SERIALIZE \ @@ -162,7 +162,7 @@ int arch_update_cpu_topology(void); | 1*SD_BALANCE_FORK \ | 0*SD_BALANCE_WAKE \ | 1*SD_WAKE_AFFINE \ - | 1*SD_PREFER_LOCAL \ + | 0*SD_PREFER_LOCAL \ | 0*SD_SHARE_CPUPOWER \ | 0*SD_SHARE_PKG_RESOURCES \ | 0*SD_SERIALIZE \ @@ -211,12 +211,6 @@ int arch_update_cpu_topology(void); #ifndef topology_core_id #define topology_core_id(cpu) ((void)(cpu), 0) #endif -#ifndef topology_thread_siblings -#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu) -#endif -#ifndef topology_core_siblings -#define topology_core_siblings(cpu) cpumask_of_cpu(cpu) -#endif #ifndef topology_thread_cpumask #define topology_thread_cpumask(cpu) cpumask_of(cpu) #endif diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 3338b3f5c21a..ac5d1c1285d9 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -27,9 +27,16 @@ */ #define TPM_ANY_NUM 0xFFFF -#if defined(CONFIG_TCG_TPM) +#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); +#else +static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { + return -ENODEV; +} +static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { + return -ENODEV; +} #endif #endif diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index c134dd1fe6b6..09077f6ed128 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -7,7 +7,7 @@ /* * Trace sequences are used to allow a function to call several other functions - * to create a string of data to use (up to a max of PAGE_SIZE. + * to create a string of data to use (up to a max of PAGE_SIZE). */ struct trace_seq { diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 17ba82efa483..1eb44a924e56 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -1,7 +1,7 @@ /* * Tracing hooks * - * Copyright (C) 2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions @@ -463,22 +463,38 @@ static inline int tracehook_get_signal(struct task_struct *task, /** * tracehook_notify_jctl - report about job control stop/continue - * @notify: nonzero if this is the last thread in the group to stop + * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED * @why: %CLD_STOPPED or %CLD_CONTINUED * * This is called when we might call do_notify_parent_cldstop(). - * It's called when about to stop for job control; we are already in - * %TASK_STOPPED state, about to call schedule(). It's also called when - * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made. * - * Return nonzero to generate a %SIGCHLD with @why, which is - * normal if @notify is nonzero. + * @notify is zero if we would not ordinarily send a %SIGCHLD, + * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD. * - * Called with no locks held. + * @why is %CLD_STOPPED when about to stop for job control; + * we are already in %TASK_STOPPED state, about to call schedule(). + * It might also be that we have just exited (check %PF_EXITING), + * but need to report that a group-wide stop is complete. + * + * @why is %CLD_CONTINUED when waking up after job control stop and + * ready to make a delayed @notify report. + * + * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal. + * + * Called with the siglock held. */ static inline int tracehook_notify_jctl(int notify, int why) { - return notify || (current->ptrace & PT_PTRACED); + return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; +} + +/** + * tracehook_finish_jctl - report about return from job control stop + * + * This is called by do_signal_stop() after wakeup. + */ +static inline void tracehook_finish_jctl(void) +{ } #define DEATH_REAP -1 diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 63a3f7a80580..2aac8a83e89b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -4,7 +4,7 @@ /* * Kernel Tracepoint API. * - * See Documentation/tracepoint.txt. + * See Documentation/trace/tracepoints.txt. * * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> * @@ -36,7 +36,7 @@ struct tracepoint { #ifndef DECLARE_TRACE #define TP_PROTO(args...) args -#define TP_ARGS(args...) args +#define TP_ARGS(args...) args #ifdef CONFIG_TRACEPOINTS diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 3566129384a4..b08677982525 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -45,8 +45,16 @@ * * void (*shutdown)(struct tty_struct * tty); * - * This routine is called when a particular tty device is closed for - * the last time freeing up the resources. + * This routine is called synchronously when a particular tty device + * is closed for the last time freeing up the resources. + * + * + * void (*cleanup)(struct tty_struct * tty); + * + * This routine is called asynchronously when a particular tty device + * is closed for the last time freeing up the resources. This is + * actually the second part of shutdown for routines that might sleep. + * * * int (*write)(struct tty_struct * tty, * const unsigned char *buf, int count); @@ -233,6 +241,7 @@ struct tty_operations { int (*open)(struct tty_struct * tty, struct file * filp); void (*close)(struct tty_struct * tty, struct file * filp); void (*shutdown)(struct tty_struct *tty); + void (*cleanup)(struct tty_struct *tty); int (*write)(struct tty_struct * tty, const unsigned char *buf, int count); int (*put_char)(struct tty_struct *tty, unsigned char ch); diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h index ae779bb8cc0f..adb44066680c 100644 --- a/include/linux/ucb1400.h +++ b/include/linux/ucb1400.h @@ -26,6 +26,7 @@ #include <sound/ac97_codec.h> #include <linux/mutex.h> #include <linux/platform_device.h> +#include <linux/gpio.h> /* * UCB1400 AC-link registers @@ -82,6 +83,17 @@ #define UCB_ID 0x7e #define UCB_ID_1400 0x4304 +struct ucb1400_gpio_data { + int gpio_offset; + int (*gpio_setup)(struct device *dev, int ngpio); + int (*gpio_teardown)(struct device *dev, int ngpio); +}; + +struct ucb1400_gpio { + struct gpio_chip gc; + struct snd_ac97 *ac97; +}; + struct ucb1400_ts { struct input_dev *ts_idev; struct task_struct *ts_task; @@ -95,6 +107,7 @@ struct ucb1400_ts { struct ucb1400 { struct platform_device *ucb1400_ts; + struct platform_device *ucb1400_gpio; }; static inline u16 ucb1400_reg_read(struct snd_ac97 *ac97, u16 reg) @@ -147,4 +160,10 @@ static inline void ucb1400_adc_disable(struct snd_ac97 *ac97) unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, int adcsync); +#ifdef CONFIG_GPIO_UCB1400 +void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data); +#else +static inline void ucb1400_gpio_set_data(struct ucb1400_gpio_data *data) {} +#endif + #endif diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h index 46dd12c5709e..9356b24223ac 100644 --- a/include/linux/unaligned/be_byteshift.h +++ b/include/linux/unaligned/be_byteshift.h @@ -1,7 +1,7 @@ #ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H #define _LINUX_UNALIGNED_BE_BYTESHIFT_H -#include <linux/kernel.h> +#include <linux/types.h> static inline u16 __get_unaligned_be16(const u8 *p) { diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h index 59777e951baf..be376fb79b64 100644 --- a/include/linux/unaligned/le_byteshift.h +++ b/include/linux/unaligned/le_byteshift.h @@ -1,7 +1,7 @@ #ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H #define _LINUX_UNALIGNED_LE_BYTESHIFT_H -#include <linux/kernel.h> +#include <linux/types.h> static inline u16 __get_unaligned_le16(const u8 *p) { diff --git a/include/linux/usb.h b/include/linux/usb.h index a8fe05f224e5..a34fa89f1474 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -195,7 +195,7 @@ struct usb_interface { struct device dev; /* interface specific device info */ struct device *usb_dev; - int pm_usage_cnt; /* usage counter for autosuspend */ + atomic_t pm_usage_cnt; /* usage counter for autosuspend */ struct work_struct reset_ws; /* for resets in atomic context */ }; #define to_usb_interface(d) container_of(d, struct usb_interface, dev) @@ -551,13 +551,13 @@ extern void usb_autopm_put_interface_async(struct usb_interface *intf); static inline void usb_autopm_enable(struct usb_interface *intf) { - intf->pm_usage_cnt = 0; + atomic_set(&intf->pm_usage_cnt, 0); usb_autopm_set_interface(intf); } static inline void usb_autopm_disable(struct usb_interface *intf) { - intf->pm_usage_cnt = 1; + atomic_set(&intf->pm_usage_cnt, 1); usb_autopm_set_interface(intf); } @@ -1036,9 +1036,10 @@ typedef void (*usb_complete_t)(struct urb *); * @transfer_flags: A variety of flags may be used to affect how URB * submission, unlinking, or operation are handled. Different * kinds of URB can use different flags. - * @transfer_buffer: This identifies the buffer to (or from) which - * the I/O request will be performed (unless URB_NO_TRANSFER_DMA_MAP - * is set). This buffer must be suitable for DMA; allocate it with + * @transfer_buffer: This identifies the buffer to (or from) which the I/O + * request will be performed unless URB_NO_TRANSFER_DMA_MAP is set + * (however, do not leave garbage in transfer_buffer even then). + * This buffer must be suitable for DMA; allocate it with * kmalloc() or equivalent. For transfers to "in" endpoints, contents * of this buffer will be modified. This buffer is used for the data * stage of control transfers. @@ -1071,7 +1072,7 @@ typedef void (*usb_complete_t)(struct urb *); * @start_frame: Returns the initial frame for isochronous transfers. * @number_of_packets: Lists the number of ISO transfer buffers. * @interval: Specifies the polling interval for interrupt or isochronous - * transfers. The units are frames (milliseconds) for for full and low + * transfers. The units are frames (milliseconds) for full and low * speed devices, and microframes (1/8 millisecond) for highspeed ones. * @error_count: Returns the number of ISO transfers that reported errors. * @context: For use in completion functions. This normally points to @@ -1104,9 +1105,15 @@ typedef void (*usb_complete_t)(struct urb *); * allocate a DMA buffer with usb_buffer_alloc() or call usb_buffer_map(). * When these transfer flags are provided, host controller drivers will * attempt to use the dma addresses found in the transfer_dma and/or - * setup_dma fields rather than determining a dma address themselves. (Note - * that transfer_buffer and setup_packet must still be set because not all - * host controllers use DMA, nor do virtual root hubs). + * setup_dma fields rather than determining a dma address themselves. + * + * Note that transfer_buffer must still be set if the controller + * does not support DMA (as indicated by bus.uses_dma) and when talking + * to root hub. If you have to trasfer between highmem zone and the device + * on such controller, create a bounce buffer or bail out with an error. + * If transfer_buffer cannot be set (is in highmem) and the controller is DMA + * capable, assign NULL to it, so that usbmon knows not to use the value. + * The setup_packet must always be set, so it cannot be located in highmem. * * Initialization: * diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index b5744bc218ab..eaf9dffe0a01 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -24,88 +24,78 @@ #define USB_SUBCLASS_AUDIOCONTROL 0x01 #define USB_SUBCLASS_AUDIOSTREAMING 0x02 #define USB_SUBCLASS_MIDISTREAMING 0x03 -#define USB_SUBCLASS_VENDOR_SPEC 0xff - -/* A.5 Audio Class-Specific AC interface Descriptor Subtypes*/ -#define HEADER 0x01 -#define INPUT_TERMINAL 0x02 -#define OUTPUT_TERMINAL 0x03 -#define MIXER_UNIT 0x04 -#define SELECTOR_UNIT 0x05 -#define FEATURE_UNIT 0x06 -#define PROCESSING_UNIT 0x07 -#define EXTENSION_UNIT 0x08 - -#define AS_GENERAL 0x01 -#define FORMAT_TYPE 0x02 -#define FORMAT_SPECIFIC 0x03 - -#define EP_GENERAL 0x01 - -#define MS_GENERAL 0x01 -#define MIDI_IN_JACK 0x02 -#define MIDI_OUT_JACK 0x03 - -/* endpoint attributes */ -#define EP_ATTR_MASK 0x0c -#define EP_ATTR_ASYNC 0x04 -#define EP_ATTR_ADAPTIVE 0x08 -#define EP_ATTR_SYNC 0x0c - -/* cs endpoint attributes */ -#define EP_CS_ATTR_SAMPLE_RATE 0x01 -#define EP_CS_ATTR_PITCH_CONTROL 0x02 -#define EP_CS_ATTR_FILL_MAX 0x80 - -/* Audio Class specific Request Codes */ -#define USB_AUDIO_SET_INTF 0x21 -#define USB_AUDIO_SET_ENDPOINT 0x22 -#define USB_AUDIO_GET_INTF 0xa1 -#define USB_AUDIO_GET_ENDPOINT 0xa2 - -#define SET_ 0x00 -#define GET_ 0x80 - -#define _CUR 0x1 -#define _MIN 0x2 -#define _MAX 0x3 -#define _RES 0x4 -#define _MEM 0x5 - -#define SET_CUR (SET_ | _CUR) -#define GET_CUR (GET_ | _CUR) -#define SET_MIN (SET_ | _MIN) -#define GET_MIN (GET_ | _MIN) -#define SET_MAX (SET_ | _MAX) -#define GET_MAX (GET_ | _MAX) -#define SET_RES (SET_ | _RES) -#define GET_RES (GET_ | _RES) -#define SET_MEM (SET_ | _MEM) -#define GET_MEM (GET_ | _MEM) - -#define GET_STAT 0xff - -#define USB_AC_TERMINAL_UNDEFINED 0x100 -#define USB_AC_TERMINAL_STREAMING 0x101 -#define USB_AC_TERMINAL_VENDOR_SPEC 0x1FF + +/* A.5 Audio Class-Specific AC Interface Descriptor Subtypes */ +#define UAC_HEADER 0x01 +#define UAC_INPUT_TERMINAL 0x02 +#define UAC_OUTPUT_TERMINAL 0x03 +#define UAC_MIXER_UNIT 0x04 +#define UAC_SELECTOR_UNIT 0x05 +#define UAC_FEATURE_UNIT 0x06 +#define UAC_PROCESSING_UNIT 0x07 +#define UAC_EXTENSION_UNIT 0x08 + +/* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */ +#define UAC_AS_GENERAL 0x01 +#define UAC_FORMAT_TYPE 0x02 +#define UAC_FORMAT_SPECIFIC 0x03 + +/* A.8 Audio Class-Specific Endpoint Descriptor Subtypes */ +#define UAC_EP_GENERAL 0x01 + +/* A.9 Audio Class-Specific Request Codes */ +#define UAC_SET_ 0x00 +#define UAC_GET_ 0x80 + +#define UAC__CUR 0x1 +#define UAC__MIN 0x2 +#define UAC__MAX 0x3 +#define UAC__RES 0x4 +#define UAC__MEM 0x5 + +#define UAC_SET_CUR (UAC_SET_ | UAC__CUR) +#define UAC_GET_CUR (UAC_GET_ | UAC__CUR) +#define UAC_SET_MIN (UAC_SET_ | UAC__MIN) +#define UAC_GET_MIN (UAC_GET_ | UAC__MIN) +#define UAC_SET_MAX (UAC_SET_ | UAC__MAX) +#define UAC_GET_MAX (UAC_GET_ | UAC__MAX) +#define UAC_SET_RES (UAC_SET_ | UAC__RES) +#define UAC_GET_RES (UAC_GET_ | UAC__RES) +#define UAC_SET_MEM (UAC_SET_ | UAC__MEM) +#define UAC_GET_MEM (UAC_GET_ | UAC__MEM) + +#define UAC_GET_STAT 0xff + +/* MIDI - A.1 MS Class-Specific Interface Descriptor Subtypes */ +#define UAC_MS_HEADER 0x01 +#define UAC_MIDI_IN_JACK 0x02 +#define UAC_MIDI_OUT_JACK 0x03 + +/* MIDI - A.1 MS Class-Specific Endpoint Descriptor Subtypes */ +#define UAC_MS_GENERAL 0x01 + +/* Terminals - 2.1 USB Terminal Types */ +#define UAC_TERMINAL_UNDEFINED 0x100 +#define UAC_TERMINAL_STREAMING 0x101 +#define UAC_TERMINAL_VENDOR_SPEC 0x1FF /* Terminal Control Selectors */ /* 4.3.2 Class-Specific AC Interface Descriptor */ -struct usb_ac_header_descriptor { +struct uac_ac_header_descriptor { __u8 bLength; /* 8 + n */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ - __u8 bDescriptorSubtype; /* USB_MS_HEADER */ + __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ __le16 bcdADC; /* 0x0100 */ __le16 wTotalLength; /* includes Unit and Terminal desc. */ __u8 bInCollection; /* n */ __u8 baInterfaceNr[]; /* [n] */ } __attribute__ ((packed)); -#define USB_DT_AC_HEADER_SIZE(n) (8 + (n)) +#define UAC_DT_AC_HEADER_SIZE(n) (8 + (n)) /* As above, but more useful for defining your own descriptors: */ -#define DECLARE_USB_AC_HEADER_DESCRIPTOR(n) \ -struct usb_ac_header_descriptor_##n { \ +#define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \ +struct uac_ac_header_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ @@ -116,7 +106,7 @@ struct usb_ac_header_descriptor_##n { \ } __attribute__ ((packed)) /* 4.3.2.1 Input Terminal Descriptor */ -struct usb_input_terminal_descriptor { +struct uac_input_terminal_descriptor { __u8 bLength; /* in bytes: 12 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* INPUT_TERMINAL descriptor subtype */ @@ -129,18 +119,19 @@ struct usb_input_terminal_descriptor { __u8 iTerminal; } __attribute__ ((packed)); -#define USB_DT_AC_INPUT_TERMINAL_SIZE 12 +#define UAC_DT_INPUT_TERMINAL_SIZE 12 -#define USB_AC_INPUT_TERMINAL_UNDEFINED 0x200 -#define USB_AC_INPUT_TERMINAL_MICROPHONE 0x201 -#define USB_AC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202 -#define USB_AC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203 -#define USB_AC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204 -#define USB_AC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205 -#define USB_AC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206 +/* Terminals - 2.2 Input Terminal Types */ +#define UAC_INPUT_TERMINAL_UNDEFINED 0x200 +#define UAC_INPUT_TERMINAL_MICROPHONE 0x201 +#define UAC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202 +#define UAC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203 +#define UAC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204 +#define UAC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205 +#define UAC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206 /* 4.3.2.2 Output Terminal Descriptor */ -struct usb_output_terminal_descriptor { +struct uac_output_terminal_descriptor { __u8 bLength; /* in bytes: 9 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */ @@ -151,23 +142,24 @@ struct usb_output_terminal_descriptor { __u8 iTerminal; } __attribute__ ((packed)); -#define USB_DT_AC_OUTPUT_TERMINAL_SIZE 9 +#define UAC_DT_OUTPUT_TERMINAL_SIZE 9 -#define USB_AC_OUTPUT_TERMINAL_UNDEFINED 0x300 -#define USB_AC_OUTPUT_TERMINAL_SPEAKER 0x301 -#define USB_AC_OUTPUT_TERMINAL_HEADPHONES 0x302 -#define USB_AC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303 -#define USB_AC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304 -#define USB_AC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305 -#define USB_AC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306 -#define USB_AC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307 +/* Terminals - 2.3 Output Terminal Types */ +#define UAC_OUTPUT_TERMINAL_UNDEFINED 0x300 +#define UAC_OUTPUT_TERMINAL_SPEAKER 0x301 +#define UAC_OUTPUT_TERMINAL_HEADPHONES 0x302 +#define UAC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303 +#define UAC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304 +#define UAC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305 +#define UAC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306 +#define UAC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307 /* Set bControlSize = 2 as default setting */ -#define USB_DT_AC_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2) +#define UAC_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2) /* As above, but more useful for defining your own descriptors: */ -#define DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(ch) \ -struct usb_ac_feature_unit_descriptor_##ch { \ +#define DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(ch) \ +struct uac_feature_unit_descriptor_##ch { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ @@ -179,7 +171,7 @@ struct usb_ac_feature_unit_descriptor_##ch { \ } __attribute__ ((packed)) /* 4.5.2 Class-Specific AS Interface Descriptor */ -struct usb_as_header_descriptor { +struct uac_as_header_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* AS_GENERAL */ @@ -188,16 +180,17 @@ struct usb_as_header_descriptor { __le16 wFormatTag; /* The Audio Data Format */ } __attribute__ ((packed)); -#define USB_DT_AS_HEADER_SIZE 7 +#define UAC_DT_AS_HEADER_SIZE 7 -#define USB_AS_AUDIO_FORMAT_TYPE_I_UNDEFINED 0x0 -#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM 0x1 -#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM8 0x2 -#define USB_AS_AUDIO_FORMAT_TYPE_I_IEEE_FLOAT 0x3 -#define USB_AS_AUDIO_FORMAT_TYPE_I_ALAW 0x4 -#define USB_AS_AUDIO_FORMAT_TYPE_I_MULAW 0x5 +/* Formats - A.1.1 Audio Data Format Type I Codes */ +#define UAC_FORMAT_TYPE_I_UNDEFINED 0x0 +#define UAC_FORMAT_TYPE_I_PCM 0x1 +#define UAC_FORMAT_TYPE_I_PCM8 0x2 +#define UAC_FORMAT_TYPE_I_IEEE_FLOAT 0x3 +#define UAC_FORMAT_TYPE_I_ALAW 0x4 +#define UAC_FORMAT_TYPE_I_MULAW 0x5 -struct usb_as_format_type_i_continuous_descriptor { +struct uac_format_type_i_continuous_descriptor { __u8 bLength; /* in bytes: 8 + (ns * 3) */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ @@ -210,9 +203,9 @@ struct usb_as_format_type_i_continuous_descriptor { __u8 tUpperSamFreq[3]; } __attribute__ ((packed)); -#define USB_AS_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14 +#define UAC_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14 -struct usb_as_formate_type_i_discrete_descriptor { +struct uac_format_type_i_discrete_descriptor { __u8 bLength; /* in bytes: 8 + (ns * 3) */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ @@ -224,8 +217,8 @@ struct usb_as_formate_type_i_discrete_descriptor { __u8 tSamFreq[][3]; } __attribute__ ((packed)); -#define DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(n) \ -struct usb_as_formate_type_i_discrete_descriptor_##n { \ +#define DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(n) \ +struct uac_format_type_i_discrete_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ @@ -237,18 +230,15 @@ struct usb_as_formate_type_i_discrete_descriptor_##n { \ __u8 tSamFreq[n][3]; \ } __attribute__ ((packed)) -#define USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3)) - -#define USB_AS_FORMAT_TYPE_UNDEFINED 0x0 -#define USB_AS_FORMAT_TYPE_I 0x1 -#define USB_AS_FORMAT_TYPE_II 0x2 -#define USB_AS_FORMAT_TYPE_III 0x3 +#define UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3)) -#define USB_AS_ENDPOINT_ASYNC (1 << 2) -#define USB_AS_ENDPOINT_ADAPTIVE (2 << 2) -#define USB_AS_ENDPOINT_SYNC (3 << 2) +/* Formats - A.2 Format Type Codes */ +#define UAC_FORMAT_TYPE_UNDEFINED 0x0 +#define UAC_FORMAT_TYPE_I 0x1 +#define UAC_FORMAT_TYPE_II 0x2 +#define UAC_FORMAT_TYPE_III 0x3 -struct usb_as_iso_endpoint_descriptor { +struct uac_iso_endpoint_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ __u8 bDescriptorSubtype; /* EP_GENERAL */ @@ -256,30 +246,37 @@ struct usb_as_iso_endpoint_descriptor { __u8 bLockDelayUnits; __le16 wLockDelay; }; -#define USB_AS_ISO_ENDPOINT_DESC_SIZE 7 - -#define FU_CONTROL_UNDEFINED 0x00 -#define MUTE_CONTROL 0x01 -#define VOLUME_CONTROL 0x02 -#define BASS_CONTROL 0x03 -#define MID_CONTROL 0x04 -#define TREBLE_CONTROL 0x05 -#define GRAPHIC_EQUALIZER_CONTROL 0x06 -#define AUTOMATIC_GAIN_CONTROL 0x07 -#define DELAY_CONTROL 0x08 -#define BASS_BOOST_CONTROL 0x09 -#define LOUDNESS_CONTROL 0x0a - -#define FU_MUTE (1 << (MUTE_CONTROL - 1)) -#define FU_VOLUME (1 << (VOLUME_CONTROL - 1)) -#define FU_BASS (1 << (BASS_CONTROL - 1)) -#define FU_MID (1 << (MID_CONTROL - 1)) -#define FU_TREBLE (1 << (TREBLE_CONTROL - 1)) -#define FU_GRAPHIC_EQ (1 << (GRAPHIC_EQUALIZER_CONTROL - 1)) -#define FU_AUTO_GAIN (1 << (AUTOMATIC_GAIN_CONTROL - 1)) -#define FU_DELAY (1 << (DELAY_CONTROL - 1)) -#define FU_BASS_BOOST (1 << (BASS_BOOST_CONTROL - 1)) -#define FU_LOUDNESS (1 << (LOUDNESS_CONTROL - 1)) +#define UAC_ISO_ENDPOINT_DESC_SIZE 7 + +#define UAC_EP_CS_ATTR_SAMPLE_RATE 0x01 +#define UAC_EP_CS_ATTR_PITCH_CONTROL 0x02 +#define UAC_EP_CS_ATTR_FILL_MAX 0x80 + +/* A.10.2 Feature Unit Control Selectors */ +#define UAC_FU_CONTROL_UNDEFINED 0x00 +#define UAC_MUTE_CONTROL 0x01 +#define UAC_VOLUME_CONTROL 0x02 +#define UAC_BASS_CONTROL 0x03 +#define UAC_MID_CONTROL 0x04 +#define UAC_TREBLE_CONTROL 0x05 +#define UAC_GRAPHIC_EQUALIZER_CONTROL 0x06 +#define UAC_AUTOMATIC_GAIN_CONTROL 0x07 +#define UAC_DELAY_CONTROL 0x08 +#define UAC_BASS_BOOST_CONTROL 0x09 +#define UAC_LOUDNESS_CONTROL 0x0a + +#define UAC_FU_MUTE (1 << (UAC_MUTE_CONTROL - 1)) +#define UAC_FU_VOLUME (1 << (UAC_VOLUME_CONTROL - 1)) +#define UAC_FU_BASS (1 << (UAC_BASS_CONTROL - 1)) +#define UAC_FU_MID (1 << (UAC_MID_CONTROL - 1)) +#define UAC_FU_TREBLE (1 << (UAC_TREBLE_CONTROL - 1)) +#define UAC_FU_GRAPHIC_EQ (1 << (UAC_GRAPHIC_EQUALIZER_CONTROL - 1)) +#define UAC_FU_AUTO_GAIN (1 << (UAC_AUTOMATIC_GAIN_CONTROL - 1)) +#define UAC_FU_DELAY (1 << (UAC_DELAY_CONTROL - 1)) +#define UAC_FU_BASS_BOOST (1 << (UAC_BASS_BOOST_CONTROL - 1)) +#define UAC_FU_LOUDNESS (1 << (UAC_LOUDNESS_CONTROL - 1)) + +#ifdef __KERNEL__ struct usb_audio_control { struct list_head list; @@ -290,18 +287,6 @@ struct usb_audio_control { int (*get)(struct usb_audio_control *con, u8 cmd); }; -static inline int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value) -{ - con->data[cmd] = value; - - return 0; -} - -static inline int generic_get_cmd(struct usb_audio_control *con, u8 cmd) -{ - return con->data[cmd]; -} - struct usb_audio_control_selector { struct list_head list; struct list_head control; @@ -311,4 +296,6 @@ struct usb_audio_control_selector { struct usb_descriptor_header *desc; }; +#endif /* __KERNEL__ */ + #endif /* __LINUX_USB_AUDIO_H */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 93223638f702..94012e649d86 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -258,6 +258,8 @@ struct usb_device_descriptor { #define USB_CLASS_APP_SPEC 0xfe #define USB_CLASS_VENDOR_SPEC 0xff +#define USB_SUBCLASS_VENDOR_SPEC 0xff + /*-------------------------------------------------------------------------*/ /* USB_DT_CONFIG: Configuration descriptor information. @@ -348,6 +350,12 @@ struct usb_endpoint_descriptor { #define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */ #define USB_ENDPOINT_DIR_MASK 0x80 +#define USB_ENDPOINT_SYNCTYPE 0x0c +#define USB_ENDPOINT_SYNC_NONE (0 << 2) +#define USB_ENDPOINT_SYNC_ASYNC (1 << 2) +#define USB_ENDPOINT_SYNC_ADAPTIVE (2 << 2) +#define USB_ENDPOINT_SYNC_SYNC (3 << 2) + #define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */ #define USB_ENDPOINT_XFER_CONTROL 0 #define USB_ENDPOINT_XFER_ISOC 1 diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 5b88e36c9103..af4b86f3aca3 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -105,6 +105,7 @@ struct ehci_regs { #define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */ #define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */ /* 19:16 for port testing */ +#define PORT_TEST_PKT (0x4<<16) /* Port Test Control - packet test */ #define PORT_LED_OFF (0<<14) #define PORT_LED_AMBER (1<<14) #define PORT_LED_GREEN (2<<14) @@ -132,6 +133,19 @@ struct ehci_regs { #define USBMODE_CM_HC (3<<0) /* host controller mode */ #define USBMODE_CM_IDLE (0<<0) /* idle state */ +/* Moorestown has some non-standard registers, partially due to the fact that + * its EHCI controller has both TT and LPM support. HOSTPCx are extentions to + * PORTSCx + */ +#define HOSTPC0 0x84 /* HOSTPC extension */ +#define HOSTPC_PHCD (1<<22) /* Phy clock disable */ +#define HOSTPC_PSPD (3<<25) /* Port speed detection */ +#define USBMODE_EX 0xc8 /* USB Device mode extension */ +#define USBMODE_EX_VBPS (1<<5) /* VBus Power Select On */ +#define USBMODE_EX_HC (3<<0) /* host controller mode */ +#define TXFILLTUNING 0x24 /* TX FIFO Tuning register */ +#define TXFIFO_DEFAULT (8<<16) /* FIFO burst threshold 8 */ + /* Appendix C, Debug port ... intended for use with special "debug devices" * that can help if there's no serial console. (nonstandard enumeration.) */ @@ -157,4 +171,25 @@ struct ehci_dbg_port { #define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) } __attribute__ ((packed)); +#ifdef CONFIG_EARLY_PRINTK_DBGP +#include <linux/init.h> +extern int __init early_dbgp_init(char *s); +extern struct console early_dbgp_console; +#endif /* CONFIG_EARLY_PRINTK_DBGP */ + +#ifdef CONFIG_EARLY_PRINTK_DBGP +/* Call backs from ehci host driver to ehci debug driver */ +extern int dbgp_external_startup(void); +extern int dbgp_reset_prep(void); +#else +static inline int dbgp_reset_prep(void) +{ + return 1; +} +static inline int dbgp_external_startup(void) +{ + return -1; +} +#endif + #endif /* __LINUX_USB_EHCI_DEF_H */ diff --git a/include/linux/usb/isp1362.h b/include/linux/usb/isp1362.h new file mode 100644 index 000000000000..642684bb9292 --- /dev/null +++ b/include/linux/usb/isp1362.h @@ -0,0 +1,46 @@ +/* + * board initialization code should put one of these into dev->platform_data + * and place the isp1362 onto platform_bus. + */ + +#ifndef __LINUX_USB_ISP1362_H__ +#define __LINUX_USB_ISP1362_H__ + +struct isp1362_platform_data { + /* Enable internal pulldown resistors on downstream ports */ + unsigned sel15Kres:1; + /* Clock cannot be stopped */ + unsigned clknotstop:1; + /* On-chip overcurrent protection */ + unsigned oc_enable:1; + /* INT output polarity */ + unsigned int_act_high:1; + /* INT edge or level triggered */ + unsigned int_edge_triggered:1; + /* DREQ output polarity */ + unsigned dreq_act_high:1; + /* DACK input polarity */ + unsigned dack_act_high:1; + /* chip can be resumed via H_WAKEUP pin */ + unsigned remote_wakeup_connected:1; + /* Switch or not to switch (keep always powered) */ + unsigned no_power_switching:1; + /* Ganged port power switching (0) or individual port power switching (1) */ + unsigned power_switching_mode:1; + /* Given port_power, msec/2 after power on till power good */ + u8 potpg; + /* Hardware reset set/clear */ + void (*reset) (struct device *dev, int set); + /* Clock start/stop */ + void (*clock) (struct device *dev, int start); + /* Inter-io delay (ns). The chip is picky about access timings; it + * expects at least: + * 110ns delay between consecutive accesses to DATA_REG, + * 300ns delay between access to ADDR_REG and DATA_REG (registers) + * 462ns delay between access to ADDR_REG and DATA_REG (buffer memory) + * WE MUST NOT be activated during these intervals (even without CS!) + */ + void (*delay) (struct device *dev, unsigned int delay); +}; + +#endif diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h new file mode 100644 index 000000000000..de7de53c5531 --- /dev/null +++ b/include/linux/usb/isp1760.h @@ -0,0 +1,18 @@ +/* + * board initialization should put one of these into dev->platform_data + * and place the isp1760 onto platform_bus named "isp1760-hcd". + */ + +#ifndef __LINUX_USB_ISP1760_H +#define __LINUX_USB_ISP1760_H + +struct isp1760_platform_data { + unsigned is_isp1761:1; /* Chip is ISP1761 */ + unsigned bus_width_16:1; /* 16/32-bit data bus width */ + unsigned port1_otg:1; /* Port 1 supports OTG */ + unsigned analog_oc:1; /* Analog overcurrent */ + unsigned dack_polarity_high:1; /* DACK active high */ + unsigned dreq_polarity_high:1; /* DREQ active high */ +}; + +#endif /* __LINUX_USB_ISP1760_H */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 7b85e327af91..ce911ebf91e8 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -59,6 +59,7 @@ enum port_dev_state { * @bulk_out_buffer: pointer to the bulk out buffer for this port. * @bulk_out_size: the size of the bulk_out_buffer, in bytes. * @write_urb: pointer to the bulk out struct urb for this port. + * @write_fifo: kfifo used to buffer outgoing data * @write_urb_busy: port`s writing status * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this * port. @@ -96,6 +97,7 @@ struct usb_serial_port { unsigned char *bulk_out_buffer; int bulk_out_size; struct urb *write_urb; + struct kfifo *write_fifo; int write_urb_busy; __u8 bulk_out_endpointAddress; @@ -148,6 +150,7 @@ struct usb_serial { struct usb_interface *interface; unsigned char disconnected:1; unsigned char suspending:1; + unsigned char attached:1; unsigned char minor; unsigned char num_ports; unsigned char num_port_pointers; diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index bb69e256cd16..f81473052059 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -89,6 +89,7 @@ struct driver_info { #define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */ #define FLAG_WLAN 0x0080 /* use "wlan%d" names */ #define FLAG_AVOID_UNLINK_URBS 0x0100 /* don't unlink urbs at usbnet_stop() */ +#define FLAG_SEND_ZLP 0x0200 /* hw requires ZLPs are sent */ /* init device ... can sleep, or cause probe() failure */ diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h index 0044d9b4cb85..b2a7d8ba6ee3 100644 --- a/include/linux/usbdevice_fs.h +++ b/include/linux/usbdevice_fs.h @@ -77,6 +77,7 @@ struct usbdevfs_connectinfo { #define USBDEVFS_URB_SHORT_NOT_OK 0x01 #define USBDEVFS_URB_ISO_ASAP 0x02 +#define USBDEVFS_URB_BULK_CONTINUATION 0x04 #define USBDEVFS_URB_NO_FSBR 0x20 #define USBDEVFS_URB_ZERO_PACKET 0x40 #define USBDEVFS_URB_NO_INTERRUPT 0x80 @@ -175,4 +176,6 @@ struct usbdevfs_ioctl32 { #define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int) #define USBDEVFS_DISCONNECT _IO('U', 22) #define USBDEVFS_CONNECT _IO('U', 23) +#define USBDEVFS_CLAIM_PORT _IOR('U', 24, unsigned int) +#define USBDEVFS_RELEASE_PORT _IOR('U', 25, unsigned int) #endif /* _LINUX_USBDEVICE_FS_H */ diff --git a/include/linux/utsname.h b/include/linux/utsname.h index 3656b300de3a..69f39974c041 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h @@ -36,7 +36,6 @@ struct new_utsname { #include <linux/kref.h> #include <linux/nsproxy.h> #include <linux/err.h> -#include <asm/atomic.h> struct uts_namespace { struct kref kref; diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index e81c64af80c1..2dfaa293ae8c 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -1,5 +1,6 @@ /* - * vgaarb.c + * The VGA aribiter manages VGA space routing and VGA resource decode to + * allow multiple VGA devices to be used in a system in a safe way. * * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> @@ -41,7 +42,7 @@ * interrupts at any time. */ extern void vga_set_legacy_decoding(struct pci_dev *pdev, - unsigned int decodes); + unsigned int decodes); /** * vga_get - acquire & locks VGA resources @@ -193,8 +194,17 @@ static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) * They driver will get a callback when VGA arbitration is first used * by userspace since we some older X servers have issues. */ +#if defined(CONFIG_VGA_ARB) int vga_client_register(struct pci_dev *pdev, void *cookie, void (*irq_set_state)(void *cookie, bool state), unsigned int (*set_vga_decode)(void *cookie, bool state)); +#else +static inline int vga_client_register(struct pci_dev *pdev, void *cookie, + void (*irq_set_state)(void *cookie, bool state), + unsigned int (*set_vga_decode)(void *cookie, bool state)) +{ + return 0; +} +#endif #endif /* LINUX_VGA_H */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 4fca4f5440ba..057a2e010758 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -34,7 +34,7 @@ struct virtqueue { * out_num: the number of sg readable by other side * in_num: the number of sg which are writable (after readable ones) * data: the token identifying the buffer. - * Returns 0 or an error. + * Returns remaining capacity of queue (sg segments) or a negative error. * @kick: update after add_buf * vq: the struct virtqueue * After one or more add_buf calls, invoke this to kick the other side. diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h index b3c4a60ceeb3..095e10d148b4 100644 --- a/include/linux/virtio_9p.h +++ b/include/linux/virtio_9p.h @@ -2,10 +2,9 @@ #define _LINUX_VIRTIO_9P_H /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. */ +#include <linux/virtio_ids.h> #include <linux/virtio_config.h> -/* The ID for virtio console */ -#define VIRTIO_ID_9P 9 /* Maximum number of virtio channels per partition (1 for now) */ #define MAX_9P_CHAN 1 diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h index 8726ff77763e..1418f048cb34 100644 --- a/include/linux/virtio_balloon.h +++ b/include/linux/virtio_balloon.h @@ -2,11 +2,9 @@ #define _LINUX_VIRTIO_BALLOON_H /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. */ +#include <linux/virtio_ids.h> #include <linux/virtio_config.h> -/* The ID for virtio_balloon */ -#define VIRTIO_ID_BALLOON 5 - /* The feature bitmap for virtio balloon */ #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index 8dab9f2b8832..fd294c56d571 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h @@ -3,11 +3,9 @@ /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. */ #include <linux/types.h> +#include <linux/virtio_ids.h> #include <linux/virtio_config.h> -/* The ID for virtio_block */ -#define VIRTIO_ID_BLOCK 2 - /* Feature bits */ #define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */ #define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */ @@ -16,9 +14,7 @@ #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ #define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ -#define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */ - -#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ +#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */ struct virtio_blk_config { /* The capacity (in 512-byte sectors). */ @@ -35,9 +31,19 @@ struct virtio_blk_config { } geometry; /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ __u32 blk_size; - __u8 identify[VIRTIO_BLK_ID_BYTES]; } __attribute__((packed)); +/* + * Command types + * + * Usage is a bit tricky as some bits are used as flags and some are not. + * + * Rules: + * VIRTIO_BLK_T_OUT may be combined with VIRTIO_BLK_T_SCSI_CMD or + * VIRTIO_BLK_T_BARRIER. VIRTIO_BLK_T_FLUSH is a command of its own + * and may not be combined with any of the other flags. + */ + /* These two define direction. */ #define VIRTIO_BLK_T_IN 0 #define VIRTIO_BLK_T_OUT 1 @@ -45,6 +51,9 @@ struct virtio_blk_config { /* This bit says it's a scsi command, not an actual read or write. */ #define VIRTIO_BLK_T_SCSI_CMD 2 +/* Cache flush command */ +#define VIRTIO_BLK_T_FLUSH 4 + /* Barrier before this op. */ #define VIRTIO_BLK_T_BARRIER 0x80000000 diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index e547e3c8ee9a..0093dd7c1d6f 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -109,8 +109,7 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ - if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 32); + MAYBE_BUILD_BUG_ON(fbit >= 32); if (fbit < VIRTIO_TRANSPORT_F_START) virtio_check_driver_offered_feature(vdev, fbit); diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index dc161115ae35..fe885174cc1f 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h @@ -1,13 +1,11 @@ #ifndef _LINUX_VIRTIO_CONSOLE_H #define _LINUX_VIRTIO_CONSOLE_H #include <linux/types.h> +#include <linux/virtio_ids.h> #include <linux/virtio_config.h> /* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so * anyone can use the definitions to implement compatible drivers/servers. */ -/* The ID for virtio console */ -#define VIRTIO_ID_CONSOLE 3 - /* Feature bits */ #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h new file mode 100644 index 000000000000..06660c0a78d7 --- /dev/null +++ b/include/linux/virtio_ids.h @@ -0,0 +1,17 @@ +#ifndef _LINUX_VIRTIO_IDS_H +#define _LINUX_VIRTIO_IDS_H +/* + * Virtio IDs + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + */ + +#define VIRTIO_ID_NET 1 /* virtio net */ +#define VIRTIO_ID_BLOCK 2 /* virtio block */ +#define VIRTIO_ID_CONSOLE 3 /* virtio console */ +#define VIRTIO_ID_RNG 4 /* virtio ring */ +#define VIRTIO_ID_BALLOON 5 /* virtio balloon */ +#define VIRTIO_ID_9P 9 /* 9p virtio console */ + +#endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index d8dd539c9f48..085e42298ce5 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -3,12 +3,10 @@ /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. */ #include <linux/types.h> +#include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/if_ether.h> -/* The ID for virtio_net */ -#define VIRTIO_ID_NET 1 - /* The feature bitmap for virtio net */ #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ diff --git a/include/linux/virtio_rng.h b/include/linux/virtio_rng.h index 1a85dab8a940..c4d5de896f0c 100644 --- a/include/linux/virtio_rng.h +++ b/include/linux/virtio_rng.h @@ -2,9 +2,7 @@ #define _LINUX_VIRTIO_RNG_H /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. */ +#include <linux/virtio_ids.h> #include <linux/virtio_config.h> -/* The ID for virtio_rng */ -#define VIRTIO_ID_RNG 4 - #endif /* _LINUX_VIRTIO_RNG_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 81a97cf8f0a0..2d0f222388a8 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -166,15 +166,8 @@ static inline unsigned long zone_page_state(struct zone *zone, return x; } -extern unsigned long global_lru_pages(void); - -static inline unsigned long zone_lru_pages(struct zone *zone) -{ - return (zone_page_state(zone, NR_ACTIVE_ANON) - + zone_page_state(zone, NR_ACTIVE_FILE) - + zone_page_state(zone, NR_INACTIVE_ANON) - + zone_page_state(zone, NR_INACTIVE_FILE)); -} +extern unsigned long global_reclaimable_pages(void); +extern unsigned long zone_reclaimable_pages(struct zone *zone); #ifdef CONFIG_NUMA /* @@ -210,11 +203,6 @@ extern void zone_statistics(struct zone *, struct zone *); #endif /* CONFIG_NUMA */ -#define __add_zone_page_state(__z, __i, __d) \ - __mod_zone_page_state(__z, __i, __d) -#define __sub_zone_page_state(__z, __i, __d) \ - __mod_zone_page_state(__z, __i,-(__d)) - #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) diff --git a/include/linux/vt.h b/include/linux/vt.h index 7afca0d72139..7ffa11f06232 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -70,8 +70,8 @@ struct vt_event { #define VT_EVENT_UNBLANK 0x0004 /* Screen unblank */ #define VT_EVENT_RESIZE 0x0008 /* Resize display */ #define VT_MAX_EVENT 0x000F - unsigned int old; /* Old console */ - unsigned int new; /* New console (if changing) */ + unsigned int oldev; /* Old console */ + unsigned int newev; /* New console (if changing) */ unsigned int pad[4]; /* Padding for expansion */ }; diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h index 0c9878123d5f..38e8c4d9289e 100644 --- a/include/linux/wm97xx.h +++ b/include/linux/wm97xx.h @@ -293,6 +293,24 @@ struct wm97xx { u16 suspend_mode; /* PRP in suspend mode */ }; +struct wm97xx_batt_pdata { + int batt_aux; + int temp_aux; + int charge_gpio; + int min_voltage; + int max_voltage; + int batt_div; + int batt_mult; + int temp_div; + int temp_mult; + int batt_tech; + char *batt_name; +}; + +struct wm97xx_pdata { + struct wm97xx_batt_pdata *batt_pdata; /* battery data */ +}; + /* * Codec GPIO access (not supported on WM9705) * This can be used to set/get codec GPIO and Virtual GPIO status. diff --git a/include/linux/wm97xx_batt.h b/include/linux/wm97xx_batt.h index 9681d1ab0e4f..a1d6419c2ff8 100644 --- a/include/linux/wm97xx_batt.h +++ b/include/linux/wm97xx_batt.h @@ -3,22 +3,12 @@ #include <linux/wm97xx.h> -struct wm97xx_batt_info { - int batt_aux; - int temp_aux; - int charge_gpio; - int min_voltage; - int max_voltage; - int batt_div; - int batt_mult; - int temp_div; - int temp_mult; - int batt_tech; - char *batt_name; -}; +#warning This file will be removed soon, use wm97xx.h instead! + +#define wm97xx_batt_info wm97xx_batt_pdata #ifdef CONFIG_BATTERY_WM97XX -void __init wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); +void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); #else static inline void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) {} #endif diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 6273fa97b527..cf24c20de9e4 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -94,7 +94,7 @@ struct execute_work { /* * initialize all of a work item in one go * - * NOTE! No point in using "atomic_long_set()": useing a direct + * NOTE! No point in using "atomic_long_set()": using a direct * assignment of the work data initializer allows the compiler * to generate better code. */ @@ -207,6 +207,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, extern void flush_workqueue(struct workqueue_struct *wq); extern void flush_scheduled_work(void); +extern void flush_delayed_work(struct delayed_work *work); extern int schedule_work(struct work_struct *work); extern int schedule_work_on(int cpu, struct work_struct *work); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 75cf58666ff9..66ebddcff664 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -110,21 +110,20 @@ extern int laptop_mode; extern unsigned long determine_dirtyable_memory(void); extern int dirty_background_ratio_handler(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, size_t *lenp, + void __user *buffer, size_t *lenp, loff_t *ppos); extern int dirty_background_bytes_handler(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, size_t *lenp, + void __user *buffer, size_t *lenp, loff_t *ppos); extern int dirty_ratio_handler(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, size_t *lenp, + void __user *buffer, size_t *lenp, loff_t *ppos); extern int dirty_bytes_handler(struct ctl_table *table, int write, - struct file *filp, void __user *buffer, size_t *lenp, + void __user *buffer, size_t *lenp, loff_t *ppos); struct ctl_table; -struct file; -int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *, +int dirty_writeback_centisecs_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index b77c1478c99f..a7fb54808a23 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -38,6 +38,8 @@ * @P9_DEBUG_SLABS: memory management tracing * @P9_DEBUG_FCALL: verbose dump of protocol messages * @P9_DEBUG_FID: fid allocation/deallocation tracking + * @P9_DEBUG_PKT: packet marshalling/unmarshalling + * @P9_DEBUG_FSC: FS-cache tracing * * These flags are passed at mount time to turn on various levels of * verbosity and tracing which will be output to the system logs. @@ -54,6 +56,7 @@ enum p9_debug_flags { P9_DEBUG_FCALL = (1<<8), P9_DEBUG_FID = (1<<9), P9_DEBUG_PKT = (1<<10), + P9_DEBUG_FSC = (1<<11), }; #ifdef CONFIG_NET_9P_DEBUG diff --git a/include/net/9p/client.h b/include/net/9p/client.h index e26812274b75..fb00b329f0d3 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -159,8 +159,7 @@ struct p9_client { * @qid: the &p9_qid server identifier this handle points to * @iounit: the server reported maximum transaction size for this file * @uid: the numeric uid of the local user who owns this handle - * @aux: transport specific information (unused?) - * @rdir_fpos: tracks offset of file position when reading directory contents + * @rdir: readdir accounting structure (allocated on demand) * @flist: per-client-instance fid tracking * @dlist: per-dentry fid tracking * @@ -174,9 +173,9 @@ struct p9_fid { struct p9_qid qid; u32 iounit; uid_t uid; - void *aux; - int rdir_fpos; + void *rdir; + struct list_head flist; struct list_head dlist; /* list of all fids attached to a dentry */ }; diff --git a/include/net/compat.h b/include/net/compat.h index 5bbf8bf9efea..7c3002832d05 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -40,8 +40,8 @@ extern int put_cmsg_compat(struct msghdr*, int, int, int, void *); extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int); -extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, int, - int (*)(struct sock *, int, int, char __user *, int)); +extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int, + int (*)(struct sock *, int, int, char __user *, unsigned int)); extern int compat_mc_getsockopt(struct sock *, int, int, char __user *, int __user *, int (*)(struct sock *, int, int, char __user *, int __user *)); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 03cffd9f64e3..696d6e4ce68a 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -48,13 +48,13 @@ struct inet_connection_sock_af_ops { u16 net_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, - char __user *optval, int optlen); + char __user *optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); #ifdef CONFIG_COMPAT int (*compat_setsockopt)(struct sock *sk, int level, int optname, - char __user *optval, int optlen); + char __user *optval, unsigned int optlen); int (*compat_getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); @@ -332,5 +332,5 @@ extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, int optlen); + char __user *optval, unsigned int optlen); #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index b63b80fac567..f93ad90a601b 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -130,11 +130,11 @@ struct inet_timewait_sock { __u16 tw_num; kmemcheck_bitfield_begin(flags); /* And these are ours. */ - __u8 tw_ipv6only:1, - tw_transparent:1; - /* 14 bits hole, try to pack */ + unsigned int tw_ipv6only : 1, + tw_transparent : 1, + tw_pad : 14, /* 14 bits hole */ + tw_ipv6_offset : 16; kmemcheck_bitfield_end(flags); - __u16 tw_ipv6_offset; unsigned long tw_ttd; struct inet_bind_bucket *tw_tb; struct hlist_node tw_death_node; diff --git a/include/net/ip.h b/include/net/ip.h index 72c36926c26d..2f47e5482b55 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -381,10 +381,10 @@ extern int ip_options_rcv_srr(struct sk_buff *skb); extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); extern int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc); -extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen); +extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); extern int compat_ip_setsockopt(struct sock *sk, int level, - int optname, char __user *optval, int optlen); + int optname, char __user *optval, unsigned int optlen); extern int compat_ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)); @@ -399,7 +399,7 @@ extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, * fed into the routing cache should use these handlers. */ int ipv4_doint_and_flush(ctl_table *ctl, int write, - struct file* filp, void __user *buffer, + void __user *buffer, size_t *lenp, loff_t *ppos); int ipv4_doint_and_flush_strategy(ctl_table *table, void __user *oldval, size_t __user *oldlenp, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index ef91fe924ba4..4d22fabc7719 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -210,7 +210,8 @@ extern struct fib_table *fib_get_table(struct net *net, u32 id); extern const struct nla_policy rtm_ipv4_policy[]; extern void ip_fib_init(void); extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, - struct net_device *dev, __be32 *spec_dst, u32 *itag); + struct net_device *dev, __be32 *spec_dst, + u32 *itag, u32 mark); extern void fib_select_default(struct net *net, const struct flowi *flp, struct fib_result *res); diff --git a/include/net/ipip.h b/include/net/ipip.h index 5d3036fa1511..87acf8f3a155 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h @@ -12,7 +12,6 @@ struct ip_tunnel struct ip_tunnel *next; struct net_device *dev; - int recursion; /* Depth of hard_start_xmit recursion */ int err_count; /* Number of arrived ICMP errors */ unsigned long err_time; /* Time when the last ICMP error arrived */ @@ -28,18 +27,11 @@ struct ip_tunnel unsigned int prl_count; /* # of entries in PRL */ }; -/* ISATAP: default interval between RS in secondy */ -#define IPTUNNEL_RS_DEFAULT_DELAY (900) - struct ip_tunnel_prl_entry { struct ip_tunnel_prl_entry *next; __be32 addr; u16 flags; - unsigned long rs_delay; - struct timer_list rs_timer; - struct ip_tunnel *tunnel; - spinlock_t lock; }; #define IPTUNNEL_XMIT() do { \ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index ad9a51130254..8c31d8a0c1fe 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -550,7 +550,7 @@ extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); extern int ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); extern int ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, @@ -559,7 +559,7 @@ extern int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); extern int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 466859b285e1..998c30fc8981 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1283,6 +1283,12 @@ enum ieee80211_filter_flags { * * These flags are used with the ampdu_action() callback in * &struct ieee80211_ops to indicate which action is needed. + * + * Note that drivers MUST be able to deal with a TX aggregation + * session being stopped even before they OK'ed starting it by + * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer + * might receive the addBA frame and send a delBA right away! + * * @IEEE80211_AMPDU_RX_START: start Rx aggregation * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation * @IEEE80211_AMPDU_TX_START: start Tx aggregation @@ -1669,6 +1675,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw); * to this function and ieee80211_rx_irqsafe() may not be mixed for a * single hardware. * + * Note that right now, this function must be called with softirqs disabled. + * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call */ diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 1459ed3e2697..f76f22d05721 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -55,7 +55,6 @@ enum { #include <net/neighbour.h> struct ctl_table; -struct file; struct inet6_dev; struct net_device; struct net_proto_family; @@ -139,7 +138,6 @@ extern int igmp6_event_report(struct sk_buff *skb); #ifdef CONFIG_SYSCTL extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, - struct file * filp, void __user *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index cbdd6284996d..5cf7270e3ffc 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -255,11 +255,9 @@ static inline bool nf_ct_kill(struct nf_conn *ct) } /* These are for NAT. Icky. */ -/* Update TCP window tracking data when NAT mangles the packet */ -extern void nf_conntrack_tcp_update(const struct sk_buff *skb, - unsigned int dataoff, - struct nf_conn *ct, int dir, - s16 offset); +extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, + enum ip_conntrack_dir dir, + u32 seq); /* Fake conntrack entry for untracked connections */ extern struct nf_conn nf_conntrack_untracked; diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h index 237a961f40e1..4222220920a5 100644 --- a/include/net/netfilter/nf_nat_helper.h +++ b/include/net/netfilter/nf_nat_helper.h @@ -32,4 +32,8 @@ extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, * to port ct->master->saved_proto. */ extern void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this); + +extern s16 nf_nat_get_offset(const struct nf_conn *ct, + enum ip_conntrack_dir dir, + u32 seq); #endif diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 42d00ced5eb8..0a474568b003 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -544,7 +544,7 @@ struct sctp_af { int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); int (*getsockopt) (struct sock *sk, int level, int optname, @@ -554,7 +554,7 @@ struct sctp_af { int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); int (*compat_getsockopt) (struct sock *sk, int level, int optname, @@ -893,7 +893,6 @@ struct sctp_transport { */ /* RTO : The current retransmission timeout value. */ unsigned long rto; - unsigned long last_rto; __u32 rtt; /* This is the most recent RTT. */ @@ -1980,7 +1979,7 @@ void sctp_assoc_set_primary(struct sctp_association *, void sctp_assoc_del_nonprimary_peers(struct sctp_association *, struct sctp_transport *); int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, - gfp_t); + sctp_scope_t, gfp_t); int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, struct sctp_cookie*, gfp_t gfp); diff --git a/include/net/sock.h b/include/net/sock.h index 950409dcec3d..9f96394f694e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -226,12 +226,12 @@ struct sock { #define sk_prot __sk_common.skc_prot #define sk_net __sk_common.skc_net kmemcheck_bitfield_begin(flags); - unsigned char sk_shutdown : 2, - sk_no_check : 2, - sk_userlocks : 4; + unsigned int sk_shutdown : 2, + sk_no_check : 2, + sk_userlocks : 4, + sk_protocol : 8, + sk_type : 16; kmemcheck_bitfield_end(flags); - unsigned char sk_protocol; - unsigned short sk_type; int sk_rcvbuf; socket_lock_t sk_lock; /* @@ -624,7 +624,7 @@ struct proto { void (*shutdown)(struct sock *sk, int how); int (*setsockopt)(struct sock *sk, int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *option); @@ -632,7 +632,7 @@ struct proto { int (*compat_setsockopt)(struct sock *sk, int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); int (*compat_getsockopt)(struct sock *sk, int level, int optname, char __user *optval, @@ -951,7 +951,7 @@ extern void sock_rfree(struct sk_buff *skb); extern int sock_setsockopt(struct socket *sock, int level, int op, char __user *optval, - int optlen); + unsigned int optlen); extern int sock_getsockopt(struct socket *sock, int level, int op, char __user *optval, @@ -993,7 +993,7 @@ extern int sock_no_shutdown(struct socket *, int); extern int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); extern int sock_no_setsockopt(struct socket *, int, int, - char __user *, int); + char __user *, unsigned int); extern int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t); extern int sock_no_recvmsg(struct kiocb *, struct socket *, @@ -1015,11 +1015,11 @@ extern int sock_common_getsockopt(struct socket *sock, int level, int optname, extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags); extern int sock_common_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, int optlen); + char __user *optval, unsigned int optlen); extern int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); extern int compat_sock_common_setsockopt(struct socket *sock, int level, - int optname, char __user *optval, int optlen); + int optname, char __user *optval, unsigned int optlen); extern void sk_common_release(struct sock *sk); diff --git a/include/net/tcp.h b/include/net/tcp.h index 56b76027b85e..03a49c703377 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -394,13 +394,13 @@ extern int tcp_getsockopt(struct sock *sk, int level, int __user *optlen); extern int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, - int optlen); + unsigned int optlen); extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, int optlen); + char __user *optval, unsigned int optlen); extern void tcp_set_keepalive(struct sock *sk, int val); extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, diff --git a/include/net/udp.h b/include/net/udp.h index 5fb029f817a3..f98abd2ce709 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -144,7 +144,7 @@ extern unsigned int udp_poll(struct file *file, struct socket *sock, extern int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, int optlen, + char __user *optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)); extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, diff --git a/include/net/wext.h b/include/net/wext.h index 6d76a39a9c5b..3f2b94de2cfa 100644 --- a/include/net/wext.h +++ b/include/net/wext.h @@ -14,6 +14,7 @@ extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cm void __user *arg); extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, unsigned long arg); +extern struct iw_statistics *get_wireless_stats(struct net_device *dev); #else static inline int wext_proc_init(struct net *net) { diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h index 904468a191ef..afc2bfb9e917 100644 --- a/include/pcmcia/cs.h +++ b/include/pcmcia/cs.h @@ -15,6 +15,10 @@ #ifndef _LINUX_CS_H #define _LINUX_CS_H +#ifdef __KERNEL__ +#include <linux/interrupt.h> +#endif + /* For AccessConfigurationRegister */ typedef struct conf_reg_t { u_char Function; @@ -111,11 +115,9 @@ typedef struct io_req_t { /* For RequestIRQ and ReleaseIRQ */ typedef struct irq_req_t { - u_int Attributes; - u_int AssignedIRQ; - u_int IRQInfo1, IRQInfo2; /* IRQInfo2 is ignored */ - void *Handler; - void *Instance; + u_int Attributes; + u_int AssignedIRQ; + irq_handler_t Handler; } irq_req_t; /* Attributes for RequestIRQ and ReleaseIRQ */ @@ -125,7 +127,7 @@ typedef struct irq_req_t { #define IRQ_TYPE_DYNAMIC_SHARING 0x02 #define IRQ_FORCED_PULSE 0x04 #define IRQ_FIRST_SHARED 0x08 -#define IRQ_HANDLE_PRESENT 0x10 +//#define IRQ_HANDLE_PRESENT 0x10 #define IRQ_PULSE_ALLOCATED 0x100 /* Bits in IRQInfo1 field */ diff --git a/include/pcmcia/cs_types.h b/include/pcmcia/cs_types.h index 315965a37930..f5e3b8386c8f 100644 --- a/include/pcmcia/cs_types.h +++ b/include/pcmcia/cs_types.h @@ -26,8 +26,7 @@ typedef u_int event_t; typedef u_char cisdata_t; typedef u_short page_t; -struct window_t; -typedef struct window_t *window_handle_t; +typedef unsigned long window_handle_t; struct region_t; typedef struct region_t *memory_handle_t; diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index a2be80b9a095..d403c12f7978 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h @@ -34,6 +34,7 @@ struct pcmcia_socket; struct pcmcia_device; struct config_t; +struct net_device; /* dynamic device IDs for PCMCIA device drivers. See * Documentation/pcmcia/driver.txt for details. @@ -137,65 +138,39 @@ struct pcmcia_device { #define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev) #define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv) -/* deprecated -- don't use! */ -#define handle_to_dev(handle) (handle->dev) - -/* (deprecated) error reporting by PCMCIA devices. Use dev_printk() - * or dev_dbg() directly in the driver, without referring to pcmcia_error_func() - * and/or pcmcia_error_ret() for those functions will go away soon. - */ -enum service { - AccessConfigurationRegister, AddSocketServices, - AdjustResourceInfo, CheckEraseQueue, CloseMemory, CopyMemory, - DeregisterClient, DeregisterEraseQueue, GetCardServicesInfo, - GetClientInfo, GetConfigurationInfo, GetEventMask, - GetFirstClient, GetFirstPartion, GetFirstRegion, GetFirstTuple, - GetNextClient, GetNextPartition, GetNextRegion, GetNextTuple, - GetStatus, GetTupleData, MapLogSocket, MapLogWindow, MapMemPage, - MapPhySocket, MapPhyWindow, ModifyConfiguration, ModifyWindow, - OpenMemory, ParseTuple, ReadMemory, RegisterClient, - RegisterEraseQueue, RegisterMTD, RegisterTimer, - ReleaseConfiguration, ReleaseExclusive, ReleaseIO, ReleaseIRQ, - ReleaseSocketMask, ReleaseWindow, ReplaceSocketServices, - RequestConfiguration, RequestExclusive, RequestIO, RequestIRQ, - RequestSocketMask, RequestWindow, ResetCard, ReturnSSEntry, - SetEventMask, SetRegion, ValidateCIS, VendorSpecific, - WriteMemory, BindDevice, BindMTD, ReportError, - SuspendCard, ResumeCard, EjectCard, InsertCard, ReplaceCIS, - GetFirstWindow, GetNextWindow, GetMemPage -}; -const char *pcmcia_error_func(int func); -const char *pcmcia_error_ret(int ret); - -#define cs_error(p_dev, func, ret) \ - { \ - dev_printk(KERN_NOTICE, &p_dev->dev, \ - "%s : %s\n", \ - pcmcia_error_func(func), \ - pcmcia_error_ret(ret)); \ - } - -/* CIS access. - * Use the pcmcia_* versions in PCMCIA drivers +/* + * CIS access. + * + * Please use the following functions to access CIS tuples: + * - pcmcia_get_tuple() + * - pcmcia_loop_tuple() + * - pcmcia_get_mac_from_cis() + * + * To parse a tuple_t, pcmcia_parse_tuple() exists. Its interface + * might change in future. */ -int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse); -int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, - tuple_t *tuple); -#define pcmcia_get_first_tuple(p_dev, tuple) \ - pccard_get_first_tuple(p_dev->socket, p_dev->func, tuple) +/* get the very first CIS entry of type @code. Note that buf is pointer + * to u8 *buf; and that you need to kfree(buf) afterwards. */ +size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code, + u8 **buf); -int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, - tuple_t *tuple); -#define pcmcia_get_next_tuple(p_dev, tuple) \ - pccard_get_next_tuple(p_dev->socket, p_dev->func, tuple) +/* loop over CIS entries */ +int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code, + int (*loop_tuple) (struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv_data), + void *priv_data); -int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple); -#define pcmcia_get_tuple_data(p_dev, tuple) \ - pccard_get_tuple_data(p_dev->socket, tuple) +/* get the MAC address from CISTPL_FUNCE */ +int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, + struct net_device *dev); +/* parse a tuple_t */ +int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse); + /* loop CIS entries for valid configuration */ int pcmcia_loop_config(struct pcmcia_device *p_dev, int (*conf_check) (struct pcmcia_device *p_dev, @@ -221,12 +196,11 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req); int pcmcia_request_configuration(struct pcmcia_device *p_dev, config_req_t *req); -int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, +int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_handle_t *wh); -int pcmcia_release_window(window_handle_t win); - -int pcmcia_get_mem_page(window_handle_t win, memreq_t *req); -int pcmcia_map_mem_page(window_handle_t win, memreq_t *req); +int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t win); +int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t win, + memreq_t *req); int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod); void pcmcia_disable_device(struct pcmcia_device *p_dev); diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index 9a3b49865173..7c23be706f12 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h @@ -107,15 +107,6 @@ typedef struct io_window_t { struct resource *res; } io_window_t; -#define WINDOW_MAGIC 0xB35C -typedef struct window_t { - u_short magic; - u_short index; - struct pcmcia_device *handle; - struct pcmcia_socket *sock; - pccard_mem_map ctl; -} window_t; - /* Maximum number of IO windows per socket */ #define MAX_IO_WIN 2 @@ -155,7 +146,7 @@ struct pcmcia_socket { u_int Config; } irq; io_window_t io[MAX_IO_WIN]; - window_t win[MAX_WIN]; + pccard_mem_map win[MAX_WIN]; struct list_head cis_cache; size_t fake_cis_len; u8 *fake_cis; @@ -172,7 +163,7 @@ struct pcmcia_socket { u_int irq_mask; u_int map_size; u_int io_offset; - u_char pci_irq; + u_int pci_irq; struct pci_dev * cb_dev; @@ -262,6 +253,8 @@ struct pcmcia_socket { struct device dev; /* data internal to the socket driver */ void *driver_data; + /* status of the card during resume from a system sleep state */ + int resume_status; }; @@ -279,7 +272,9 @@ extern struct pccard_resource_ops pccard_iodyn_ops; extern struct pccard_resource_ops pccard_nonstatic_ops; /* socket drivers are expected to use these callbacks in their .drv struct */ -extern int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state); +extern int pcmcia_socket_dev_suspend(struct device *dev); +extern void pcmcia_socket_dev_early_resume(struct device *dev); +extern void pcmcia_socket_dev_late_resume(struct device *dev); extern int pcmcia_socket_dev_resume(struct device *dev); /* socket drivers use this callback in their IRQ handler */ diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 938858304300..c8f94e8db69c 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -482,7 +482,7 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id, * message. * @cm_id: Connection identifier associated with the connection message. * @service_timeout: The lower 5-bits specify the maximum time required for - * the sender to reply to to the connection message. The upper 3-bits + * the sender to reply to the connection message. The upper 3-bits * specify additional control flags. * @private_data: Optional user-defined private data sent with the * message receipt acknowledgement. diff --git a/include/scsi/fc/fc_fc2.h b/include/scsi/fc/fc_fc2.h index cff8a8c22f50..f87777d0d5bd 100644 --- a/include/scsi/fc/fc_fc2.h +++ b/include/scsi/fc/fc_fc2.h @@ -92,8 +92,7 @@ struct fc_esb { __u8 _esb_resvd[4]; __u8 esb_service_params[112]; /* TBD */ __u8 esb_seq_status[8]; /* sequence statuses, 8 bytes each */ -} __attribute__((packed));; - +} __attribute__((packed)); /* * Define expected size for ASSERTs. diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 887e57e3e223..a72edd4eceec 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -303,6 +303,7 @@ struct iscsi_session { int cmds_max; /* size of cmds array */ struct iscsi_task **cmds; /* Original Cmds arr */ struct iscsi_pool cmdpool; /* PDU's pool */ + void *dd_data; /* LLD private data */ }; enum { @@ -363,7 +364,7 @@ extern int iscsi_target_alloc(struct scsi_target *starget); */ extern struct iscsi_cls_session * iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost, - uint16_t, int, uint32_t, unsigned int); + uint16_t, int, int, uint32_t, unsigned int); extern void iscsi_session_teardown(struct iscsi_cls_session *); extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *); extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn, diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 084478e14d24..34c46ab5c31b 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -129,6 +129,9 @@ struct scsi_cmnd; #define MI_REPORT_TARGET_PGS 0x0a /* values for maintenance out */ #define MO_SET_TARGET_PGS 0x0a +/* values for variable length command */ +#define READ_32 0x09 +#define WRITE_32 0x0b /* Values for T10/04-262r7 */ #define ATA_16 0x85 /* 16-byte pass-thru */ diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 3878d1dc7f59..a5e885a111df 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -229,10 +229,6 @@ enum scsi_prot_operations { /* OS-HBA: Protected, HBA-Target: Protected */ SCSI_PROT_READ_PASS, SCSI_PROT_WRITE_PASS, - - /* OS-HBA: Protected, HBA-Target: Protected, checksum conversion */ - SCSI_PROT_READ_CONVERT, - SCSI_PROT_WRITE_CONVERT, }; static inline void scsi_set_prot_op(struct scsi_cmnd *scmd, unsigned char op) diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 9af48cbf0036..f097ae340bc1 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -145,6 +145,7 @@ struct scsi_device { unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ unsigned last_sector_bug:1; /* do not use multisector accesses on SD_LAST_BUGGY_SECTORS */ + unsigned is_visible:1; /* is the device visible in sysfs */ DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ struct list_head event_list; /* asserted events */ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index b62a097b3ecb..47941fc5aba7 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -797,24 +797,24 @@ static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) { - switch (target_type) { - case 1: return shost->prot_capabilities & SHOST_DIF_TYPE1_PROTECTION; - case 2: return shost->prot_capabilities & SHOST_DIF_TYPE2_PROTECTION; - case 3: return shost->prot_capabilities & SHOST_DIF_TYPE3_PROTECTION; - } + static unsigned char cap[] = { 0, + SHOST_DIF_TYPE1_PROTECTION, + SHOST_DIF_TYPE2_PROTECTION, + SHOST_DIF_TYPE3_PROTECTION }; - return 0; + return shost->prot_capabilities & cap[target_type] ? target_type : 0; } static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) { - switch (target_type) { - case 0: return shost->prot_capabilities & SHOST_DIX_TYPE0_PROTECTION; - case 1: return shost->prot_capabilities & SHOST_DIX_TYPE1_PROTECTION; - case 2: return shost->prot_capabilities & SHOST_DIX_TYPE2_PROTECTION; - case 3: return shost->prot_capabilities & SHOST_DIX_TYPE3_PROTECTION; - } +#if defined(CONFIG_BLK_DEV_INTEGRITY) + static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION, + SHOST_DIX_TYPE1_PROTECTION, + SHOST_DIX_TYPE2_PROTECTION, + SHOST_DIX_TYPE3_PROTECTION }; + return shost->prot_capabilities & cap[target_type]; +#endif return 0; } diff --git a/include/trace/events/block.h b/include/trace/events/block.h index d86af94691c2..00405b5f624a 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -488,6 +488,39 @@ TRACE_EVENT(block_remap, (unsigned long long)__entry->old_sector) ); +TRACE_EVENT(block_rq_remap, + + TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, + sector_t from), + + TP_ARGS(q, rq, dev, from), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __field( dev_t, old_dev ) + __field( sector_t, old_sector ) + __array( char, rwbs, 6 ) + ), + + TP_fast_assign( + __entry->dev = disk_devt(rq->rq_disk); + __entry->sector = blk_rq_pos(rq); + __entry->nr_sector = blk_rq_sectors(rq); + __entry->old_dev = dev; + __entry->old_sector = from; + blk_fill_rwbs_rq(__entry->rwbs, rq); + ), + + TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, + MAJOR(__entry->old_dev), MINOR(__entry->old_dev), + (unsigned long long)__entry->old_sector) +); + #endif /* _TRACE_BLOCK_H */ /* This part must be outside protection */ diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index c1bd8f1e8b94..d09550bf3f95 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -11,6 +11,7 @@ struct ext4_allocation_context; struct ext4_allocation_request; struct ext4_prealloc_space; struct ext4_inode_info; +struct mpage_da_data; #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode)) @@ -236,6 +237,7 @@ TRACE_EVENT(ext4_da_writepages, __field( char, for_kupdate ) __field( char, for_reclaim ) __field( char, range_cyclic ) + __field( pgoff_t, writeback_index ) ), TP_fast_assign( @@ -249,15 +251,17 @@ TRACE_EVENT(ext4_da_writepages, __entry->for_kupdate = wbc->for_kupdate; __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; + __entry->writeback_index = inode->i_mapping->writeback_index; ), - TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d", + TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu", jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, __entry->nr_to_write, __entry->pages_skipped, __entry->range_start, __entry->range_end, __entry->nonblocking, __entry->for_kupdate, __entry->for_reclaim, - __entry->range_cyclic) + __entry->range_cyclic, + (unsigned long) __entry->writeback_index) ); TRACE_EVENT(ext4_da_write_pages, @@ -309,6 +313,7 @@ TRACE_EVENT(ext4_da_writepages_result, __field( char, encountered_congestion ) __field( char, more_io ) __field( char, no_nrwrite_index_update ) + __field( pgoff_t, writeback_index ) ), TP_fast_assign( @@ -320,14 +325,16 @@ TRACE_EVENT(ext4_da_writepages_result, __entry->encountered_congestion = wbc->encountered_congestion; __entry->more_io = wbc->more_io; __entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update; + __entry->writeback_index = inode->i_mapping->writeback_index; ), - TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d", + TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d writeback_index %lu", jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, __entry->ret, __entry->pages_written, __entry->pages_skipped, __entry->encountered_congestion, __entry->more_io, - __entry->no_nrwrite_index_update) + __entry->no_nrwrite_index_update, + (unsigned long) __entry->writeback_index) ); TRACE_EVENT(ext4_da_write_begin, @@ -737,6 +744,169 @@ TRACE_EVENT(ext4_alloc_da_blocks, __entry->data_blocks, __entry->meta_blocks) ); +TRACE_EVENT(ext4_mballoc_alloc, + TP_PROTO(struct ext4_allocation_context *ac), + + TP_ARGS(ac), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u16, found ) + __field( __u16, groups ) + __field( __u16, buddy ) + __field( __u16, flags ) + __field( __u16, tail ) + __field( __u8, cr ) + __field( __u32, orig_logical ) + __field( int, orig_start ) + __field( __u32, orig_group ) + __field( int, orig_len ) + __field( __u32, goal_logical ) + __field( int, goal_start ) + __field( __u32, goal_group ) + __field( int, goal_len ) + __field( __u32, result_logical ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + ), + + TP_fast_assign( + __entry->dev = ac->ac_inode->i_sb->s_dev; + __entry->ino = ac->ac_inode->i_ino; + __entry->found = ac->ac_found; + __entry->flags = ac->ac_flags; + __entry->groups = ac->ac_groups_scanned; + __entry->buddy = ac->ac_buddy; + __entry->tail = ac->ac_tail; + __entry->cr = ac->ac_criteria; + __entry->orig_logical = ac->ac_o_ex.fe_logical; + __entry->orig_start = ac->ac_o_ex.fe_start; + __entry->orig_group = ac->ac_o_ex.fe_group; + __entry->orig_len = ac->ac_o_ex.fe_len; + __entry->goal_logical = ac->ac_g_ex.fe_logical; + __entry->goal_start = ac->ac_g_ex.fe_start; + __entry->goal_group = ac->ac_g_ex.fe_group; + __entry->goal_len = ac->ac_g_ex.fe_len; + __entry->result_logical = ac->ac_f_ex.fe_logical; + __entry->result_start = ac->ac_f_ex.fe_start; + __entry->result_group = ac->ac_f_ex.fe_group; + __entry->result_len = ac->ac_f_ex.fe_len; + ), + + TP_printk("dev %s inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u " + "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x " + "tail %u broken %u", + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->orig_group, __entry->orig_start, + __entry->orig_len, __entry->orig_logical, + __entry->goal_group, __entry->goal_start, + __entry->goal_len, __entry->goal_logical, + __entry->result_group, __entry->result_start, + __entry->result_len, __entry->result_logical, + __entry->found, __entry->groups, __entry->cr, + __entry->flags, __entry->tail, + __entry->buddy ? 1 << __entry->buddy : 0) +); + +TRACE_EVENT(ext4_mballoc_prealloc, + TP_PROTO(struct ext4_allocation_context *ac), + + TP_ARGS(ac), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u32, orig_logical ) + __field( int, orig_start ) + __field( __u32, orig_group ) + __field( int, orig_len ) + __field( __u32, result_logical ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + ), + + TP_fast_assign( + __entry->dev = ac->ac_inode->i_sb->s_dev; + __entry->ino = ac->ac_inode->i_ino; + __entry->orig_logical = ac->ac_o_ex.fe_logical; + __entry->orig_start = ac->ac_o_ex.fe_start; + __entry->orig_group = ac->ac_o_ex.fe_group; + __entry->orig_len = ac->ac_o_ex.fe_len; + __entry->result_logical = ac->ac_b_ex.fe_logical; + __entry->result_start = ac->ac_b_ex.fe_start; + __entry->result_group = ac->ac_b_ex.fe_group; + __entry->result_len = ac->ac_b_ex.fe_len; + ), + + TP_printk("dev %s inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u", + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->orig_group, __entry->orig_start, + __entry->orig_len, __entry->orig_logical, + __entry->result_group, __entry->result_start, + __entry->result_len, __entry->result_logical) +); + +TRACE_EVENT(ext4_mballoc_discard, + TP_PROTO(struct ext4_allocation_context *ac), + + TP_ARGS(ac), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u32, result_logical ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + ), + + TP_fast_assign( + __entry->dev = ac->ac_inode->i_sb->s_dev; + __entry->ino = ac->ac_inode->i_ino; + __entry->result_logical = ac->ac_b_ex.fe_logical; + __entry->result_start = ac->ac_b_ex.fe_start; + __entry->result_group = ac->ac_b_ex.fe_group; + __entry->result_len = ac->ac_b_ex.fe_len; + ), + + TP_printk("dev %s inode %lu extent %u/%d/%u@%u ", + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->result_group, __entry->result_start, + __entry->result_len, __entry->result_logical) +); + +TRACE_EVENT(ext4_mballoc_free, + TP_PROTO(struct ext4_allocation_context *ac), + + TP_ARGS(ac), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( __u32, result_logical ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + ), + + TP_fast_assign( + __entry->dev = ac->ac_inode->i_sb->s_dev; + __entry->ino = ac->ac_inode->i_ino; + __entry->result_logical = ac->ac_b_ex.fe_logical; + __entry->result_start = ac->ac_b_ex.fe_start; + __entry->result_group = ac->ac_b_ex.fe_group; + __entry->result_len = ac->ac_b_ex.fe_len; + ), + + TP_printk("dev %s inode %lu extent %u/%d/%u@%u ", + jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, + __entry->result_group, __entry->result_start, + __entry->result_len, __entry->result_logical) +); + #endif /* _TRACE_EXT4_H */ /* This part must be outside protection */ diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index b851f0b4701c..3c60b75adb9e 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -7,6 +7,9 @@ #include <linux/jbd2.h> #include <linux/tracepoint.h> +struct transaction_chp_stats_s; +struct transaction_run_stats_s; + TRACE_EVENT(jbd2_checkpoint, TP_PROTO(journal_t *journal, int result), @@ -162,6 +165,81 @@ TRACE_EVENT(jbd2_submit_inode_data, jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino) ); +TRACE_EVENT(jbd2_run_stats, + TP_PROTO(dev_t dev, unsigned long tid, + struct transaction_run_stats_s *stats), + + TP_ARGS(dev, tid, stats), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, tid ) + __field( unsigned long, wait ) + __field( unsigned long, running ) + __field( unsigned long, locked ) + __field( unsigned long, flushing ) + __field( unsigned long, logging ) + __field( __u32, handle_count ) + __field( __u32, blocks ) + __field( __u32, blocks_logged ) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->wait = stats->rs_wait; + __entry->running = stats->rs_running; + __entry->locked = stats->rs_locked; + __entry->flushing = stats->rs_flushing; + __entry->logging = stats->rs_logging; + __entry->handle_count = stats->rs_handle_count; + __entry->blocks = stats->rs_blocks; + __entry->blocks_logged = stats->rs_blocks_logged; + ), + + TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u " + "logging %u handle_count %u blocks %u blocks_logged %u", + jbd2_dev_to_name(__entry->dev), __entry->tid, + jiffies_to_msecs(__entry->wait), + jiffies_to_msecs(__entry->running), + jiffies_to_msecs(__entry->locked), + jiffies_to_msecs(__entry->flushing), + jiffies_to_msecs(__entry->logging), + __entry->handle_count, __entry->blocks, + __entry->blocks_logged) +); + +TRACE_EVENT(jbd2_checkpoint_stats, + TP_PROTO(dev_t dev, unsigned long tid, + struct transaction_chp_stats_s *stats), + + TP_ARGS(dev, tid, stats), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( unsigned long, tid ) + __field( unsigned long, chp_time ) + __field( __u32, forced_to_close ) + __field( __u32, written ) + __field( __u32, dropped ) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->chp_time = stats->cs_chp_time; + __entry->forced_to_close= stats->cs_forced_to_close; + __entry->written = stats->cs_written; + __entry->dropped = stats->cs_dropped; + ), + + TP_printk("dev %s tid %lu chp_time %u forced_to_close %u " + "written %u dropped %u", + jbd2_dev_to_name(__entry->dev), __entry->tid, + jiffies_to_msecs(__entry->chp_time), + __entry->forced_to_close, __entry->written, __entry->dropped) +); + #endif /* _TRACE_JBD2_H */ /* This part must be outside protection */ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 1493c541f9c4..eaf46bdd18a5 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -225,6 +225,169 @@ TRACE_EVENT(kmem_cache_free, TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) ); + +TRACE_EVENT(mm_page_free_direct, + + TP_PROTO(struct page *page, unsigned int order), + + TP_ARGS(page, order), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( unsigned int, order ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + ), + + TP_printk("page=%p pfn=%lu order=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->order) +); + +TRACE_EVENT(mm_pagevec_free, + + TP_PROTO(struct page *page, int cold), + + TP_ARGS(page, cold), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( int, cold ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->cold = cold; + ), + + TP_printk("page=%p pfn=%lu order=0 cold=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->cold) +); + +TRACE_EVENT(mm_page_alloc, + + TP_PROTO(struct page *page, unsigned int order, + gfp_t gfp_flags, int migratetype), + + TP_ARGS(page, order, gfp_flags, migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( unsigned int, order ) + __field( gfp_t, gfp_flags ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + __entry->gfp_flags = gfp_flags; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", + __entry->page, + page_to_pfn(__entry->page), + __entry->order, + __entry->migratetype, + show_gfp_flags(__entry->gfp_flags)) +); + +TRACE_EVENT(mm_page_alloc_zone_locked, + + TP_PROTO(struct page *page, unsigned int order, int migratetype), + + TP_ARGS(page, order, migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( unsigned int, order ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->order, + __entry->migratetype, + __entry->order == 0) +); + +TRACE_EVENT(mm_page_pcpu_drain, + + TP_PROTO(struct page *page, int order, int migratetype), + + TP_ARGS(page, order, migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( int, order ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%d migratetype=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->order, + __entry->migratetype) +); + +TRACE_EVENT(mm_page_alloc_extfrag, + + TP_PROTO(struct page *page, + int alloc_order, int fallback_order, + int alloc_migratetype, int fallback_migratetype), + + TP_ARGS(page, + alloc_order, fallback_order, + alloc_migratetype, fallback_migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( int, alloc_order ) + __field( int, fallback_order ) + __field( int, alloc_migratetype ) + __field( int, fallback_migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->alloc_order = alloc_order; + __entry->fallback_order = fallback_order; + __entry->alloc_migratetype = alloc_migratetype; + __entry->fallback_migratetype = fallback_migratetype; + ), + + TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->alloc_order, + __entry->fallback_order, + pageblock_order, + __entry->alloc_migratetype, + __entry->fallback_migratetype, + __entry->fallback_order < pageblock_order, + __entry->alloc_migratetype == __entry->fallback_migratetype) +); + #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h new file mode 100644 index 000000000000..1844c48d640e --- /dev/null +++ b/include/trace/events/timer.h @@ -0,0 +1,342 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM timer + +#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TIMER_H + +#include <linux/tracepoint.h> +#include <linux/hrtimer.h> +#include <linux/timer.h> + +/** + * timer_init - called when the timer is initialized + * @timer: pointer to struct timer_list + */ +TRACE_EVENT(timer_init, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer %p", __entry->timer) +); + +/** + * timer_start - called when the timer is started + * @timer: pointer to struct timer_list + * @expires: the timers expiry time + */ +TRACE_EVENT(timer_start, + + TP_PROTO(struct timer_list *timer, unsigned long expires), + + TP_ARGS(timer, expires), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( void *, function ) + __field( unsigned long, expires ) + __field( unsigned long, now ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->function = timer->function; + __entry->expires = expires; + __entry->now = jiffies; + ), + + TP_printk("timer %p: func %pf, expires %lu, timeout %ld", + __entry->timer, __entry->function, __entry->expires, + (long)__entry->expires - __entry->now) +); + +/** + * timer_expire_entry - called immediately before the timer callback + * @timer: pointer to struct timer_list + * + * Allows to determine the timer latency. + */ +TRACE_EVENT(timer_expire_entry, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( unsigned long, now ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->now = jiffies; + ), + + TP_printk("timer %p: now %lu", __entry->timer, __entry->now) +); + +/** + * timer_expire_exit - called immediately after the timer callback returns + * @timer: pointer to struct timer_list + * + * When used in combination with the timer_expire_entry tracepoint we can + * determine the runtime of the timer callback function. + * + * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might + * be invalid. We solely track the pointer. + */ +TRACE_EVENT(timer_expire_exit, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field(void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer %p", __entry->timer) +); + +/** + * timer_cancel - called when the timer is canceled + * @timer: pointer to struct timer_list + */ +TRACE_EVENT(timer_cancel, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer %p", __entry->timer) +); + +/** + * hrtimer_init - called when the hrtimer is initialized + * @timer: pointer to struct hrtimer + * @clockid: the hrtimers clock + * @mode: the hrtimers mode + */ +TRACE_EVENT(hrtimer_init, + + TP_PROTO(struct hrtimer *timer, clockid_t clockid, + enum hrtimer_mode mode), + + TP_ARGS(timer, clockid, mode), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( clockid_t, clockid ) + __field( enum hrtimer_mode, mode ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->clockid = clockid; + __entry->mode = mode; + ), + + TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer, + __entry->clockid == CLOCK_REALTIME ? + "CLOCK_REALTIME" : "CLOCK_MONOTONIC", + __entry->mode == HRTIMER_MODE_ABS ? + "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL") +); + +/** + * hrtimer_start - called when the hrtimer is started + * @timer: pointer to struct hrtimer + */ +TRACE_EVENT(hrtimer_start, + + TP_PROTO(struct hrtimer *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( void *, function ) + __field( s64, expires ) + __field( s64, softexpires ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->function = timer->function; + __entry->expires = hrtimer_get_expires(timer).tv64; + __entry->softexpires = hrtimer_get_softexpires(timer).tv64; + ), + + TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu", + __entry->timer, __entry->function, + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->expires }), + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->softexpires })) +); + +/** + * htimmer_expire_entry - called immediately before the hrtimer callback + * @timer: pointer to struct hrtimer + * @now: pointer to variable which contains current time of the + * timers base. + * + * Allows to determine the timer latency. + */ +TRACE_EVENT(hrtimer_expire_entry, + + TP_PROTO(struct hrtimer *timer, ktime_t *now), + + TP_ARGS(timer, now), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( s64, now ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->now = now->tv64; + ), + + TP_printk("hrtimer %p, now %llu", __entry->timer, + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->now })) + ); + +/** + * hrtimer_expire_exit - called immediately after the hrtimer callback returns + * @timer: pointer to struct hrtimer + * + * When used in combination with the hrtimer_expire_entry tracepoint we can + * determine the runtime of the callback function. + */ +TRACE_EVENT(hrtimer_expire_exit, + + TP_PROTO(struct hrtimer *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("hrtimer %p", __entry->timer) +); + +/** + * hrtimer_cancel - called when the hrtimer is canceled + * @timer: pointer to struct hrtimer + */ +TRACE_EVENT(hrtimer_cancel, + + TP_PROTO(struct hrtimer *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("hrtimer %p", __entry->timer) +); + +/** + * itimer_state - called when itimer is started or canceled + * @which: name of the interval timer + * @value: the itimers value, itimer is canceled if value->it_value is + * zero, otherwise it is started + * @expires: the itimers expiry time + */ +TRACE_EVENT(itimer_state, + + TP_PROTO(int which, const struct itimerval *const value, + cputime_t expires), + + TP_ARGS(which, value, expires), + + TP_STRUCT__entry( + __field( int, which ) + __field( cputime_t, expires ) + __field( long, value_sec ) + __field( long, value_usec ) + __field( long, interval_sec ) + __field( long, interval_usec ) + ), + + TP_fast_assign( + __entry->which = which; + __entry->expires = expires; + __entry->value_sec = value->it_value.tv_sec; + __entry->value_usec = value->it_value.tv_usec; + __entry->interval_sec = value->it_interval.tv_sec; + __entry->interval_usec = value->it_interval.tv_usec; + ), + + TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu", + __entry->which, __entry->expires, + __entry->value_sec, __entry->value_usec, + __entry->interval_sec, __entry->interval_usec) +); + +/** + * itimer_expire - called when itimer expires + * @which: type of the interval timer + * @pid: pid of the process which owns the timer + * @now: current time, used to calculate the latency of itimer + */ +TRACE_EVENT(itimer_expire, + + TP_PROTO(int which, struct pid *pid, cputime_t now), + + TP_ARGS(which, pid, now), + + TP_STRUCT__entry( + __field( int , which ) + __field( pid_t, pid ) + __field( cputime_t, now ) + ), + + TP_fast_assign( + __entry->which = which; + __entry->now = now; + __entry->pid = pid_nr(pid); + ), + + TP_printk("which %d, pid %d, now %lu", __entry->which, + (int) __entry->pid, __entry->now) +); + +#endif /* _TRACE_TIMER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index fcfd9a1e4b96..e4612dbd7ba6 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -26,7 +26,7 @@ TRACE_EVENT(workqueue_insertion, __entry->func = work->func; ), - TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, + TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, __entry->thread_pid, __entry->func) ); @@ -48,7 +48,7 @@ TRACE_EVENT(workqueue_execution, __entry->func = work->func; ), - TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, + TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, __entry->thread_pid, __entry->func) ); diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index a0361cb69769..cc0d9667e182 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -378,7 +378,7 @@ static inline int ftrace_get_offsets_##call( \ #ifdef CONFIG_EVENT_PROFILE /* - * Generate the functions needed for tracepoint perf_counter support. + * Generate the functions needed for tracepoint perf_event support. * * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later * @@ -644,7 +644,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * { * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; * struct ftrace_event_call *event_call = &event_<call>; - * extern void perf_tpcounter_event(int, u64, u64, void *, int); + * extern void perf_tp_event(int, u64, u64, void *, int); * struct ftrace_raw_##call *entry; * u64 __addr = 0, __count = 1; * unsigned long irq_flags; @@ -690,7 +690,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * * <assign> <- affect our values * - * perf_tpcounter_event(event_call->id, __addr, __count, entry, + * perf_tp_event(event_call->id, __addr, __count, entry, * __entry_size); <- submit them to perf counter * * } @@ -710,7 +710,7 @@ static void ftrace_profile_##call(proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_event_call *event_call = &event_##call; \ - extern void perf_tpcounter_event(int, u64, u64, void *, int); \ + extern void perf_tp_event(int, u64, u64, void *, int); \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ @@ -755,7 +755,7 @@ static void ftrace_profile_##call(proto) \ \ { assign; } \ \ - perf_tpcounter_event(event_call->id, __addr, __count, entry, \ + perf_tp_event(event_call->id, __addr, __count, entry, \ __entry_size); \ \ end: \ diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h new file mode 100644 index 000000000000..c051a50ed528 --- /dev/null +++ b/include/video/da8xx-fb.h @@ -0,0 +1,103 @@ +/* + * Header file for TI DA8XX LCD controller platform data. + * + * Copyright (C) 2008-2009 MontaVista Software Inc. + * Copyright (C) 2008-2009 Texas Instruments Inc + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef DA8XX_FB_H +#define DA8XX_FB_H + +enum panel_type { + QVGA = 0 +}; + +enum panel_shade { + MONOCHROME = 0, + COLOR_ACTIVE, + COLOR_PASSIVE, +}; + +enum raster_load_mode { + LOAD_DATA = 1, + LOAD_PALETTE, +}; + +struct display_panel { + enum panel_type panel_type; /* QVGA */ + int max_bpp; + int min_bpp; + enum panel_shade panel_shade; +}; + +struct da8xx_lcdc_platform_data { + const char manu_name[10]; + void *controller_data; + const char type[25]; +}; + +struct lcd_ctrl_config { + const struct display_panel *p_disp_panel; + + /* AC Bias Pin Frequency */ + int ac_bias; + + /* AC Bias Pin Transitions per Interrupt */ + int ac_bias_intrpt; + + /* DMA burst size */ + int dma_burst_sz; + + /* Bits per pixel */ + int bpp; + + /* FIFO DMA Request Delay */ + int fdd; + + /* TFT Alternative Signal Mapping (Only for active) */ + unsigned char tft_alt_mode; + + /* 12 Bit Per Pixel (5-6-5) Mode (Only for passive) */ + unsigned char stn_565_mode; + + /* Mono 8-bit Mode: 1=D0-D7 or 0=D0-D3 */ + unsigned char mono_8bit_mode; + + /* Invert line clock */ + unsigned char invert_line_clock; + + /* Invert frame clock */ + unsigned char invert_frm_clock; + + /* Horizontal and Vertical Sync Edge: 0=rising 1=falling */ + unsigned char sync_edge; + + /* Horizontal and Vertical Sync: Control: 0=ignore */ + unsigned char sync_ctrl; + + /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */ + unsigned char raster_order; +}; + +struct lcd_sync_arg { + int back_porch; + int front_porch; + int pulse_width; +}; + +/* ioctls */ +#define FBIOGET_CONTRAST _IOR('F', 1, int) +#define FBIOPUT_CONTRAST _IOW('F', 2, int) +#define FBIGET_BRIGHTNESS _IOR('F', 3, int) +#define FBIPUT_BRIGHTNESS _IOW('F', 3, int) +#define FBIGET_COLOR _IOR('F', 5, int) +#define FBIPUT_COLOR _IOW('F', 6, int) +#define FBIPUT_HSYNC _IOW('F', 9, int) +#define FBIPUT_VSYNC _IOW('F', 10, int) + +#endif /* ifndef DA8XX_FB_H */ + |