summaryrefslogtreecommitdiff
path: root/drivers/staging
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-02-10 10:58:25 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-02-10 10:58:25 -0800
commitb91867f2ee5c84b550f95ce54c91b180f70f48cb (patch)
treebd7a5be4484d539af13e6b345320455e989612cf /drivers/staging
parentb05ee6bf9e6c7acc38dca1466b63bb24ae5df6f3 (diff)
parent9196dc1129fbb3ecf93027224a6bdbc86d086e3a (diff)
Merge tag 'staging-3.3-rc3' into staging-next
This was done to resolve some merge issues with the following files that had changed in both branches: drivers/staging/rtl8712/rtl871x_sta_mgt.c drivers/staging/tidspbridge/rmgr/drv_interface.c drivers/staging/zcache/zcache-main.c Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/Kconfig26
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/TODO2
-rw-r--r--drivers/staging/android/alarm-dev.c297
-rw-r--r--drivers/staging/android/alarm.c601
-rw-r--r--drivers/staging/android/android_alarm.h121
-rw-r--r--drivers/staging/android/ashmem.c4
-rw-r--r--drivers/staging/android/binder.c2
-rw-r--r--drivers/staging/android/logger.c73
-rw-r--r--drivers/staging/android/lowmemorykiller.c30
-rw-r--r--drivers/staging/android/ram_console.c7
-rw-r--r--drivers/staging/android/timed_gpio.c6
-rw-r--r--drivers/staging/bcm/Bcmchar.c41
-rw-r--r--drivers/staging/bcm/CmHost.c3094
-rw-r--r--drivers/staging/bcm/led_control.h74
-rw-r--r--drivers/staging/comedi/Kconfig5
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c29
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c12
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c4
-rw-r--r--drivers/staging/comedi/drivers/me4000.c12
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c61
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c27
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c3
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c6
-rw-r--r--drivers/staging/hv/storvsc_drv.c1020
-rw-r--r--drivers/staging/iio/Makefile2
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c2
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad7192.c45
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c3
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c83
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad7793.c2
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c2
-rw-r--r--drivers/staging/iio/adc/adt7310.c21
-rw-r--r--drivers/staging/iio/adc/adt7410.c21
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c2
-rw-r--r--drivers/staging/iio/dac/ad5446.c33
-rw-r--r--drivers/staging/iio/dds/ad9834.c53
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c2
-rw-r--r--drivers/staging/iio/iio_core.h4
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c3
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c2
-rw-r--r--drivers/staging/iio/industrialio-core.c458
-rw-r--r--drivers/staging/iio/industrialio-event.c454
-rw-r--r--drivers/staging/iio/kfifo_buf.c46
-rw-r--r--drivers/staging/iio/kfifo_buf.h2
-rw-r--r--drivers/staging/iio/light/isl29018.c7
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c8
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c2
-rw-r--r--drivers/staging/iio/ring_sw.c22
-rw-r--r--drivers/staging/iio/ring_sw.h5
-rw-r--r--drivers/staging/line6/capture.c54
-rw-r--r--drivers/staging/line6/capture.h2
-rw-r--r--drivers/staging/line6/driver.c2
-rw-r--r--drivers/staging/line6/pcm.c109
-rw-r--r--drivers/staging/line6/pcm.h167
-rw-r--r--drivers/staging/line6/playback.c68
-rw-r--r--drivers/staging/line6/playback.h2
-rw-r--r--drivers/staging/line6/toneport.c12
-rw-r--r--drivers/staging/line6/usbdefs.h44
-rw-r--r--drivers/staging/media/easycap/easycap_main.c1
-rw-r--r--drivers/staging/mei/TODO3
-rw-r--r--drivers/staging/mei/hw.h2
-rw-r--r--drivers/staging/mei/init.c2
-rw-r--r--drivers/staging/mei/interface.c25
-rw-r--r--drivers/staging/mei/interface.h5
-rw-r--r--drivers/staging/mei/interrupt.c20
-rw-r--r--drivers/staging/mei/iorw.c2
-rw-r--r--drivers/staging/mei/main.c2
-rw-r--r--drivers/staging/mei/mei-amt-version.c479
-rw-r--r--drivers/staging/mei/mei.h125
-rw-r--r--drivers/staging/mei/mei_dev.h8
-rw-r--r--drivers/staging/mei/mei_version.h2
-rw-r--r--drivers/staging/mei/wd.c2
-rw-r--r--drivers/staging/nvec/Kconfig6
-rw-r--r--drivers/staging/nvec/nvec.c19
-rw-r--r--drivers/staging/nvec/nvec_ps2.c53
-rw-r--r--drivers/staging/omapdrm/omap_gem_helpers.c2
-rw-r--r--drivers/staging/quickstart/quickstart.c365
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8712/Kconfig7
-rw-r--r--drivers/staging/rtl8712/drv_types.h1
-rw-r--r--drivers/staging/rtl8712/os_intfs.c1
-rw-r--r--drivers/staging/rtl8712/osdep_service.h17
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h3
-rw-r--r--drivers/staging/rtl8712/sta_info.h4
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rts5139/ms.h4
-rw-r--r--drivers/staging/rts5139/rts51x_chip.c14
-rw-r--r--drivers/staging/rts5139/rts51x_chip.h6
-rw-r--r--drivers/staging/rts5139/rts51x_fop.h2
-rw-r--r--drivers/staging/rts5139/rts51x_transport.c2
-rw-r--r--drivers/staging/rts5139/rts51x_transport.h2
-rw-r--r--drivers/staging/rts5139/sd_cprm.c2
-rw-r--r--drivers/staging/sbe-2t3e3/intr.c2
-rw-r--r--drivers/staging/sep/Kconfig3
-rw-r--r--drivers/staging/sep/Makefile5
-rw-r--r--drivers/staging/sep/TODO5
-rw-r--r--drivers/staging/sep/sep_crypto.c4054
-rw-r--r--drivers/staging/sep/sep_crypto.h359
-rw-r--r--drivers/staging/sep/sep_dev.h98
-rw-r--r--drivers/staging/sep/sep_driver.c2932
-rw-r--r--drivers/staging/sep/sep_driver_api.h293
-rw-r--r--drivers/staging/sep/sep_driver_config.h79
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h185
-rw-r--r--drivers/staging/sep/sep_main.c4517
-rw-r--r--drivers/staging/sep/sep_trace_events.h188
-rw-r--r--drivers/staging/telephony/Kconfig47
-rw-r--r--drivers/staging/telephony/Makefile7
-rw-r--r--drivers/staging/telephony/TODO10
-rw-r--r--drivers/staging/telephony/ixj-ver.h4
-rw-r--r--drivers/staging/telephony/ixj.c10552
-rw-r--r--drivers/staging/telephony/ixj.h1322
-rw-r--r--drivers/staging/telephony/ixj_pcmcia.c187
-rw-r--r--drivers/staging/telephony/phonedev.c167
-rw-r--r--drivers/staging/tidspbridge/Kconfig6
-rw-r--r--drivers/staging/tidspbridge/Makefile4
-rw-r--r--drivers/staging/tidspbridge/core/chnl_sm.c34
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c3
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c29
-rw-r--r--drivers/staging/tidspbridge/core/msg_sm.c3
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c9
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c1
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c18
-rw-r--r--drivers/staging/tidspbridge/gen/uuidutil.c7
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbc.h46
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io_sm.h2
-rw-r--r--drivers/staging/tidspbridge/pmgr/chnl.c19
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c68
-rw-r--r--drivers/staging/tidspbridge/pmgr/cod.c74
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c113
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c132
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c14
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c7
-rw-r--r--drivers/staging/tidspbridge/pmgr/io.c17
-rw-r--r--drivers/staging/tidspbridge/pmgr/msg.c17
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c88
-rw-r--r--drivers/staging/tidspbridge/rmgr/disp.c44
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c48
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c357
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.h28
-rw-r--r--drivers/staging/tidspbridge/rmgr/dspdrv.c5
-rw-r--r--drivers/staging/tidspbridge/rmgr/mgr.c36
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c71
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c98
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c91
-rw-r--r--drivers/staging/tidspbridge/rmgr/rmm.c36
-rw-r--r--drivers/staging/tidspbridge/rmgr/strm.c86
-rw-r--r--drivers/staging/usbip/stub.h1
-rw-r--r--drivers/staging/usbip/stub_rx.c9
-rw-r--r--drivers/staging/usbip/usbip_common.c11
-rw-r--r--drivers/staging/usbip/usbip_common.h2
-rw-r--r--drivers/staging/usbip/vhci_hcd.c39
-rw-r--r--drivers/staging/usbip/vhci_rx.c3
-rw-r--r--drivers/staging/vme/vme.h2
-rw-r--r--drivers/staging/vt6655/ioctl.c23
-rw-r--r--drivers/staging/vt6656/iwctl.c230
-rw-r--r--drivers/staging/vt6656/iwctl.h13
-rw-r--r--drivers/staging/vt6656/main_usb.c13
-rw-r--r--drivers/staging/vt6656/wpactl.c937
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c1
-rw-r--r--drivers/staging/xgifb/XGI_main.h78
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c208
-rw-r--r--drivers/staging/xgifb/XGIfb.h2
-rw-r--r--drivers/staging/xgifb/vb_def.h178
-rw-r--r--drivers/staging/xgifb/vb_init.c20
-rw-r--r--drivers/staging/xgifb/vb_setmode.c836
-rw-r--r--drivers/staging/xgifb/vb_struct.h79
-rw-r--r--drivers/staging/xgifb/vb_table.h346
-rw-r--r--drivers/staging/xgifb/vgatypes.h9
-rw-r--r--drivers/staging/zcache/Kconfig9
-rw-r--r--drivers/staging/zcache/zcache-main.c231
-rw-r--r--drivers/staging/zram/Kconfig6
-rw-r--r--drivers/staging/zram/Makefile1
-rw-r--r--drivers/staging/zram/xvmalloc.c510
-rw-r--r--drivers/staging/zram/xvmalloc.h30
-rw-r--r--drivers/staging/zram/xvmalloc_int.h95
-rw-r--r--drivers/staging/zram/zram_drv.c89
-rw-r--r--drivers/staging/zram/zram_drv.h10
-rw-r--r--drivers/staging/zram/zram_sysfs.c2
-rw-r--r--drivers/staging/zsmalloc/Kconfig11
-rw-r--r--drivers/staging/zsmalloc/Makefile3
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c756
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h31
-rw-r--r--drivers/staging/zsmalloc/zsmalloc_int.h126
206 files changed, 29190 insertions, 10937 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 9e6347249783..fae2e242025c 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -88,6 +88,8 @@ source "drivers/staging/zram/Kconfig"
source "drivers/staging/zcache/Kconfig"
+source "drivers/staging/zsmalloc/Kconfig"
+
source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
@@ -128,4 +130,6 @@ source "drivers/staging/omapdrm/Kconfig"
source "drivers/staging/android/Kconfig"
+source "drivers/staging/telephony/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 943e14830753..84a6db25cc82 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -34,8 +34,8 @@ obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_ZRAM) += zram/
-obj-$(CONFIG_XVMALLOC) += zram/
obj-$(CONFIG_ZCACHE) += zcache/
+obj-$(CONFIG_ZSMALLOC) += zsmalloc/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
@@ -55,3 +55,4 @@ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_ANDROID) += android/
+obj-$(CONFIG_PHONE) += telephony/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index fef3580ce8de..f3b7c759bcfd 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -102,6 +102,32 @@ config ANDROID_LOW_MEMORY_KILLER
source "drivers/staging/android/switch/Kconfig"
+config ANDROID_INTF_ALARM
+ bool "Android alarm driver"
+ depends on RTC_CLASS
+ default n
+ help
+ Provides non-wakeup and rtc backed wakeup alarms based on rtc or
+ elapsed realtime, and a non-wakeup alarm on the monotonic clock.
+ Also provides an interface to set the wall time which must be used
+ for elapsed realtime to work.
+
+config ANDROID_INTF_ALARM_DEV
+ bool "Android alarm device"
+ depends on ANDROID_INTF_ALARM
+ default y
+ help
+ Exports the alarm interface to user-space.
+
+config ANDROID_ALARM_OLDDRV_COMPAT
+ bool "Android Alarm compatability with old drivers"
+ depends on ANDROID_INTF_ALARM
+ default n
+ help
+ Provides preprocessor alias to aid compatability with
+ older out-of-tree drivers that use the Android Alarm
+ in-kernel API. This will be removed eventually.
+
endif # if ANDROID
endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 5fcc24ffdd58..785f26999108 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_ANDROID_SWITCH) += switch/
+obj-$(CONFIG_ANDROID_INTF_ALARM) += alarm.o
+obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index e59c5be4be2b..b15fb0d6b152 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -3,7 +3,7 @@ TODO:
- sparse fixes
- rename files to be not so "generic"
- make sure things build as modules properly
- - add proper arch dependancies as needed
+ - add proper arch dependencies as needed
- audit userspace interfaces to make sure they are sane
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
new file mode 100644
index 000000000000..03efb34cbe2e
--- /dev/null
+++ b/drivers/staging/android/alarm-dev.c
@@ -0,0 +1,297 @@
+/* drivers/rtc/alarm-dev.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include "android_alarm.h"
+
+/* XXX - Hack out wakelocks, while they are out of tree */
+struct wake_lock {
+ int i;
+};
+#define wake_lock(x)
+#define wake_lock_timeout(x, y)
+#define wake_unlock(x)
+#define WAKE_LOCK_SUSPEND 0
+#define wake_lock_init(x, y, z) ((x)->i = 1)
+#define wake_lock_destroy(x)
+
+#define ANDROID_ALARM_PRINT_INFO (1U << 0)
+#define ANDROID_ALARM_PRINT_IO (1U << 1)
+#define ANDROID_ALARM_PRINT_INT (1U << 2)
+
+
+static int debug_mask = ANDROID_ALARM_PRINT_INFO;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+static int alarm_opened;
+static DEFINE_SPINLOCK(alarm_slock);
+static struct wake_lock alarm_wake_lock;
+static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
+static uint32_t alarm_pending;
+static uint32_t alarm_enabled;
+static uint32_t wait_pending;
+
+static struct android_alarm alarms[ANDROID_ALARM_TYPE_COUNT];
+
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int rv = 0;
+ unsigned long flags;
+ struct timespec new_alarm_time;
+ struct timespec new_rtc_time;
+ struct timespec tmp_time;
+ enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
+ uint32_t alarm_type_mask = 1U << alarm_type;
+
+ if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
+ return -EINVAL;
+
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+ if (file->private_data == NULL &&
+ cmd != ANDROID_ALARM_SET_RTC) {
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_opened) {
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return -EBUSY;
+ }
+ alarm_opened = 1;
+ file->private_data = (void *)1;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+
+ switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+ case ANDROID_ALARM_CLEAR(0):
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d clear\n", alarm_type);
+ android_alarm_try_to_cancel(&alarms[alarm_type]);
+ if (alarm_pending) {
+ alarm_pending &= ~alarm_type_mask;
+ if (!alarm_pending && !wait_pending)
+ wake_unlock(&alarm_wake_lock);
+ }
+ alarm_enabled &= ~alarm_type_mask;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+
+ case ANDROID_ALARM_SET_OLD:
+ case ANDROID_ALARM_SET_AND_WAIT_OLD:
+ if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ new_alarm_time.tv_nsec = 0;
+ goto from_old_alarm_set;
+
+ case ANDROID_ALARM_SET_AND_WAIT(0):
+ case ANDROID_ALARM_SET(0):
+ if (copy_from_user(&new_alarm_time, (void __user *)arg,
+ sizeof(new_alarm_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+from_old_alarm_set:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
+ new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
+ alarm_enabled |= alarm_type_mask;
+ android_alarm_start_range(&alarms[alarm_type],
+ timespec_to_ktime(new_alarm_time),
+ timespec_to_ktime(new_alarm_time));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
+ && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
+ break;
+ /* fall though */
+ case ANDROID_ALARM_WAIT:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm wait\n");
+ if (!alarm_pending && wait_pending) {
+ wake_unlock(&alarm_wake_lock);
+ wait_pending = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+ if (rv)
+ goto err1;
+ spin_lock_irqsave(&alarm_slock, flags);
+ rv = alarm_pending;
+ wait_pending = 1;
+ alarm_pending = 0;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+ case ANDROID_ALARM_SET_RTC:
+ if (copy_from_user(&new_rtc_time, (void __user *)arg,
+ sizeof(new_rtc_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ rv = android_alarm_set_rtc(new_rtc_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+ wake_up(&alarm_wait_queue);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (rv < 0)
+ goto err1;
+ break;
+ case ANDROID_ALARM_GET_TIME(0):
+ switch (alarm_type) {
+ case ANDROID_ALARM_RTC_WAKEUP:
+ case ANDROID_ALARM_RTC:
+ getnstimeofday(&tmp_time);
+ break;
+ case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
+ case ANDROID_ALARM_ELAPSED_REALTIME:
+ tmp_time =
+ ktime_to_timespec(alarm_get_elapsed_realtime());
+ break;
+ case ANDROID_ALARM_TYPE_COUNT:
+ case ANDROID_ALARM_SYSTEMTIME:
+ ktime_get_ts(&tmp_time);
+ break;
+ }
+ if (copy_to_user((void __user *)arg, &tmp_time,
+ sizeof(tmp_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto err1;
+ }
+err1:
+ return rv;
+}
+
+static int alarm_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return 0;
+}
+
+static int alarm_release(struct inode *inode, struct file *file)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (file->private_data != 0) {
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+ uint32_t alarm_type_mask = 1U << i;
+ if (alarm_enabled & alarm_type_mask) {
+ pr_alarm(INFO, "alarm_release: clear alarm, "
+ "pending %d\n",
+ !!(alarm_pending & alarm_type_mask));
+ alarm_enabled &= ~alarm_type_mask;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ android_alarm_cancel(&alarms[i]);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (alarm_pending | wait_pending) {
+ if (alarm_pending)
+ pr_alarm(INFO, "alarm_release: clear "
+ "pending alarms %x\n", alarm_pending);
+ wake_unlock(&alarm_wake_lock);
+ wait_pending = 0;
+ alarm_pending = 0;
+ }
+ alarm_opened = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static void alarm_triggered(struct android_alarm *alarm)
+{
+ unsigned long flags;
+ uint32_t alarm_type_mask = 1U << alarm->type;
+
+ pr_alarm(INT, "alarm_triggered type %d\n", alarm->type);
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_enabled & alarm_type_mask) {
+ wake_lock_timeout(&alarm_wake_lock, 5 * HZ);
+ alarm_enabled &= ~alarm_type_mask;
+ alarm_pending |= alarm_type_mask;
+ wake_up(&alarm_wait_queue);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+static const struct file_operations alarm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = alarm_ioctl,
+ .open = alarm_open,
+ .release = alarm_release,
+};
+
+static struct miscdevice alarm_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "alarm",
+ .fops = &alarm_fops,
+};
+
+static int __init alarm_dev_init(void)
+{
+ int err;
+ int i;
+
+ err = misc_register(&alarm_device);
+ if (err)
+ return err;
+
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++)
+ android_alarm_init(&alarms[i], i, alarm_triggered);
+ wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm");
+
+ return 0;
+}
+
+static void __exit alarm_dev_exit(void)
+{
+ misc_deregister(&alarm_device);
+ wake_lock_destroy(&alarm_wake_lock);
+}
+
+module_init(alarm_dev_init);
+module_exit(alarm_dev_exit);
+
diff --git a/drivers/staging/android/alarm.c b/drivers/staging/android/alarm.c
new file mode 100644
index 000000000000..c68950b9e08f
--- /dev/null
+++ b/drivers/staging/android/alarm.c
@@ -0,0 +1,601 @@
+/* drivers/rtc/alarm.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include "android_alarm.h"
+
+/* XXX - Hack out wakelocks, while they are out of tree */
+struct wake_lock {
+ int i;
+};
+#define wake_lock(x)
+#define wake_lock_timeout(x, y)
+#define wake_unlock(x)
+#define WAKE_LOCK_SUSPEND 0
+#define wake_lock_init(x, y, z) ((x)->i = 1)
+#define wake_lock_destroy(x)
+
+#define ANDROID_ALARM_PRINT_ERROR (1U << 0)
+#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1)
+#define ANDROID_ALARM_PRINT_TSET (1U << 2)
+#define ANDROID_ALARM_PRINT_CALL (1U << 3)
+#define ANDROID_ALARM_PRINT_SUSPEND (1U << 4)
+#define ANDROID_ALARM_PRINT_INT (1U << 5)
+#define ANDROID_ALARM_PRINT_FLOW (1U << 6)
+
+static int debug_mask = ANDROID_ALARM_PRINT_ERROR | \
+ ANDROID_ALARM_PRINT_INIT_STATUS;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+struct alarm_queue {
+ struct rb_root alarms;
+ struct rb_node *first;
+ struct hrtimer timer;
+ ktime_t delta;
+ bool stopped;
+ ktime_t stopped_time;
+};
+
+static struct rtc_device *alarm_rtc_dev;
+static DEFINE_SPINLOCK(alarm_slock);
+static DEFINE_MUTEX(alarm_setrtc_mutex);
+static struct wake_lock alarm_rtc_wake_lock;
+static struct platform_device *alarm_platform_dev;
+struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT];
+static bool suspended;
+
+static void update_timer_locked(struct alarm_queue *base, bool head_removed)
+{
+ struct android_alarm *alarm;
+ bool is_wakeup = base == &alarms[ANDROID_ALARM_RTC_WAKEUP] ||
+ base == &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+
+ if (base->stopped) {
+ pr_alarm(FLOW, "changed alarm while setting the wall time\n");
+ return;
+ }
+
+ if (is_wakeup && !suspended && head_removed)
+ wake_unlock(&alarm_rtc_wake_lock);
+
+ if (!base->first)
+ return;
+
+ alarm = container_of(base->first, struct android_alarm, node);
+
+ pr_alarm(FLOW, "selected alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (is_wakeup && suspended) {
+ pr_alarm(FLOW, "changed alarm while suspened\n");
+ wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+ return;
+ }
+
+ hrtimer_try_to_cancel(&base->timer);
+ base->timer.node.expires = ktime_add(base->delta, alarm->expires);
+ base->timer._softexpires = ktime_add(base->delta, alarm->softexpires);
+ hrtimer_start_expires(&base->timer, HRTIMER_MODE_ABS);
+}
+
+static void alarm_enqueue_locked(struct android_alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ struct rb_node **link = &base->alarms.rb_node;
+ struct rb_node *parent = NULL;
+ struct android_alarm *entry;
+ int leftmost = 1;
+ bool was_first = false;
+
+ pr_alarm(FLOW, "added alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (base->first == &alarm->node) {
+ base->first = rb_next(&alarm->node);
+ was_first = true;
+ }
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ }
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct android_alarm, node);
+ /*
+ * We dont care about collisions. Nodes with
+ * the same expiry time stay together.
+ */
+ if (alarm->expires.tv64 < entry->expires.tv64) {
+ link = &(*link)->rb_left;
+ } else {
+ link = &(*link)->rb_right;
+ leftmost = 0;
+ }
+ }
+ if (leftmost)
+ base->first = &alarm->node;
+ if (leftmost || was_first)
+ update_timer_locked(base, was_first);
+
+ rb_link_node(&alarm->node, parent, link);
+ rb_insert_color(&alarm->node, &base->alarms);
+}
+
+/**
+ * android_alarm_init - initialize an alarm
+ * @alarm: the alarm to be initialized
+ * @type: the alarm type to be used
+ * @function: alarm callback function
+ */
+void android_alarm_init(struct android_alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct android_alarm *))
+{
+ RB_CLEAR_NODE(&alarm->node);
+ alarm->type = type;
+ alarm->function = function;
+
+ pr_alarm(FLOW, "created alarm, type %d, func %pF\n", type, function);
+}
+
+
+/**
+ * android_alarm_start_range - (re)start an alarm
+ * @alarm: the alarm to be added
+ * @start: earliest expiry time
+ * @end: expiry time
+ */
+void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
+ ktime_t end)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm->softexpires = start;
+ alarm->expires = end;
+ alarm_enqueue_locked(alarm);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+/**
+ * android_alarm_try_to_cancel - try to deactivate an alarm
+ * @alarm: alarm to stop
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ * -1 when the alarm may currently be excuting the callback function and
+ * cannot be stopped (it may also be inactive)
+ */
+int android_alarm_try_to_cancel(struct android_alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ unsigned long flags;
+ bool first = false;
+ int ret = 0;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ pr_alarm(FLOW, "canceled alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires));
+ ret = 1;
+ if (base->first == &alarm->node) {
+ base->first = rb_next(&alarm->node);
+ first = true;
+ }
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ if (first)
+ update_timer_locked(base, true);
+ } else
+ pr_alarm(FLOW, "tried to cancel alarm, type %d, func %pF\n",
+ alarm->type, alarm->function);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (!ret && hrtimer_callback_running(&base->timer))
+ ret = -1;
+ return ret;
+}
+
+/**
+ * android_alarm_cancel - cancel an alarm and wait for the handler to finish.
+ * @alarm: the alarm to be cancelled
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ */
+int android_alarm_cancel(struct android_alarm *alarm)
+{
+ for (;;) {
+ int ret = android_alarm_try_to_cancel(alarm);
+ if (ret >= 0)
+ return ret;
+ cpu_relax();
+ }
+}
+
+/**
+ * alarm_set_rtc - set the kernel and rtc walltime
+ * @new_time: timespec value containing the new time
+ */
+int android_alarm_set_rtc(struct timespec new_time)
+{
+ int i;
+ int ret;
+ unsigned long flags;
+ struct rtc_time rtc_new_rtc_time;
+ struct timespec tmp_time;
+
+ rtc_time_to_tm(new_time.tv_sec, &rtc_new_rtc_time);
+
+ pr_alarm(TSET, "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n",
+ new_time.tv_sec, new_time.tv_nsec,
+ rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min,
+ rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1,
+ rtc_new_rtc_time.tm_mday,
+ rtc_new_rtc_time.tm_year + 1900);
+
+ mutex_lock(&alarm_setrtc_mutex);
+ spin_lock_irqsave(&alarm_slock, flags);
+ wake_lock(&alarm_rtc_wake_lock);
+ getnstimeofday(&tmp_time);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_try_to_cancel(&alarms[i].timer);
+ alarms[i].stopped = true;
+ alarms[i].stopped_time = timespec_to_ktime(tmp_time);
+ }
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta,
+ timespec_to_ktime(timespec_sub(tmp_time, new_time)));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ ret = do_settimeofday(&new_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ alarms[i].stopped = false;
+ update_timer_locked(&alarms[i], false);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ret < 0) {
+ pr_alarm(ERROR, "alarm_set_rtc: Failed to set time\n");
+ goto err;
+ }
+ if (!alarm_rtc_dev) {
+ pr_alarm(ERROR,
+ "alarm_set_rtc: no RTC, time will be lost on reboot\n");
+ goto err;
+ }
+ ret = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time);
+ if (ret < 0)
+ pr_alarm(ERROR, "alarm_set_rtc: "
+ "Failed to set RTC, time will be lost on reboot\n");
+err:
+ wake_unlock(&alarm_rtc_wake_lock);
+ mutex_unlock(&alarm_setrtc_mutex);
+ return ret;
+}
+
+/**
+ * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format
+ *
+ * returns the time in ktime_t format
+ */
+ktime_t alarm_get_elapsed_realtime(void)
+{
+ ktime_t now;
+ unsigned long flags;
+ struct alarm_queue *base = &alarms[ANDROID_ALARM_ELAPSED_REALTIME];
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ now = base->stopped ? base->stopped_time : ktime_get_real();
+ now = ktime_sub(now, base->delta);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return now;
+}
+
+static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer)
+{
+ struct alarm_queue *base;
+ struct android_alarm *alarm;
+ unsigned long flags;
+ ktime_t now;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+
+ base = container_of(timer, struct alarm_queue, timer);
+ now = base->stopped ? base->stopped_time : hrtimer_cb_get_time(timer);
+ now = ktime_sub(now, base->delta);
+
+ pr_alarm(INT, "alarm_timer_triggered type %ld at %lld\n",
+ base - alarms, ktime_to_ns(now));
+
+ while (base->first) {
+ alarm = container_of(base->first, struct android_alarm, node);
+ if (alarm->softexpires.tv64 > now.tv64) {
+ pr_alarm(FLOW, "don't call alarm, %pF, %lld (s %lld)\n",
+ alarm->function, ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ break;
+ }
+ base->first = rb_next(&alarm->node);
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ pr_alarm(CALL, "call alarm, type %d, func %pF, %lld (s %lld)\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ alarm->function(alarm);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (!base->first)
+ pr_alarm(FLOW, "no more alarms of type %ld\n", base - alarms);
+ update_timer_locked(base, true);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return HRTIMER_NORESTART;
+}
+
+static void alarm_triggered_func(void *p)
+{
+ struct rtc_device *rtc = alarm_rtc_dev;
+ if (!(rtc->irq_data & RTC_AF))
+ return;
+ pr_alarm(INT, "rtc alarm triggered\n");
+ wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+}
+
+static int alarm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int err = 0;
+ unsigned long flags;
+ struct rtc_wkalrm rtc_alarm;
+ struct rtc_time rtc_current_rtc_time;
+ unsigned long rtc_current_time;
+ unsigned long rtc_alarm_time;
+ struct timespec rtc_delta;
+ struct timespec wall_time;
+ struct alarm_queue *wakeup_queue = NULL;
+ struct alarm_queue *tmp_queue = NULL;
+
+ pr_alarm(SUSPEND, "alarm_suspend(%p, %d)\n", pdev, state.event);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = true;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer);
+ hrtimer_cancel(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].timer);
+
+ tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP];
+ if (tmp_queue->first)
+ wakeup_queue = tmp_queue;
+ tmp_queue = &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+ if (tmp_queue->first && (!wakeup_queue ||
+ hrtimer_get_expires(&tmp_queue->timer).tv64 <
+ hrtimer_get_expires(&wakeup_queue->timer).tv64))
+ wakeup_queue = tmp_queue;
+ if (wakeup_queue) {
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ getnstimeofday(&wall_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ set_normalized_timespec(&rtc_delta,
+ wall_time.tv_sec - rtc_current_time,
+ wall_time.tv_nsec);
+
+ rtc_alarm_time = timespec_sub(ktime_to_timespec(
+ hrtimer_get_expires(&wakeup_queue->timer)),
+ rtc_delta).tv_sec;
+
+ rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time);
+ rtc_alarm.enabled = 1;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ pr_alarm(SUSPEND,
+ "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n",
+ rtc_alarm_time, rtc_current_time,
+ rtc_delta.tv_sec, rtc_delta.tv_nsec);
+ if (rtc_current_time + 1 >= rtc_alarm_time) {
+ pr_alarm(SUSPEND, "alarm about to go off\n");
+ memset(&rtc_alarm, 0, sizeof(rtc_alarm));
+ rtc_alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ wake_lock_timeout(&alarm_rtc_wake_lock, 2 * HZ);
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP],
+ false);
+ update_timer_locked(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], false);
+ err = -EBUSY;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+ return err;
+}
+
+static int alarm_resume(struct platform_device *pdev)
+{
+ struct rtc_wkalrm alarm;
+ unsigned long flags;
+
+ pr_alarm(SUSPEND, "alarm_resume(%p)\n", pdev);
+
+ memset(&alarm, 0, sizeof(alarm));
+ alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], false);
+ update_timer_locked(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP],
+ false);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ return 0;
+}
+
+static struct rtc_task alarm_rtc_task = {
+ .func = alarm_triggered_func
+};
+
+static int rtc_alarm_add_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ int err;
+ struct rtc_device *rtc = to_rtc_device(dev);
+
+ mutex_lock(&alarm_setrtc_mutex);
+
+ if (alarm_rtc_dev) {
+ err = -EBUSY;
+ goto err1;
+ }
+
+ alarm_platform_dev =
+ platform_device_register_simple("alarm", -1, NULL, 0);
+ if (IS_ERR(alarm_platform_dev)) {
+ err = PTR_ERR(alarm_platform_dev);
+ goto err2;
+ }
+ err = rtc_irq_register(rtc, &alarm_rtc_task);
+ if (err)
+ goto err3;
+ alarm_rtc_dev = rtc;
+ pr_alarm(INIT_STATUS, "using rtc device, %s, for alarms", rtc->name);
+ mutex_unlock(&alarm_setrtc_mutex);
+
+ return 0;
+
+err3:
+ platform_device_unregister(alarm_platform_dev);
+err2:
+err1:
+ mutex_unlock(&alarm_setrtc_mutex);
+ return err;
+}
+
+static void rtc_alarm_remove_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ if (dev == &alarm_rtc_dev->dev) {
+ pr_alarm(INIT_STATUS, "lost rtc device for alarms");
+ rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task);
+ platform_device_unregister(alarm_platform_dev);
+ alarm_rtc_dev = NULL;
+ }
+}
+
+static struct class_interface rtc_alarm_interface = {
+ .add_dev = &rtc_alarm_add_device,
+ .remove_dev = &rtc_alarm_remove_device,
+};
+
+static struct platform_driver alarm_driver = {
+ .suspend = alarm_suspend,
+ .resume = alarm_resume,
+ .driver = {
+ .name = "alarm"
+ }
+};
+
+static int __init alarm_late_init(void)
+{
+ unsigned long flags;
+ struct timespec tmp_time, system_time;
+
+ /* this needs to run after the rtc is read at boot */
+ spin_lock_irqsave(&alarm_slock, flags);
+ /* We read the current rtc and system time so we can later calulate
+ * elasped realtime to be (boot_systemtime + rtc - boot_rtc) ==
+ * (rtc - (boot_rtc - boot_systemtime))
+ */
+ getnstimeofday(&tmp_time);
+ ktime_get_ts(&system_time);
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ timespec_to_ktime(timespec_sub(tmp_time, system_time));
+
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static int __init alarm_driver_init(void)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_init(&alarms[i].timer,
+ CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ alarms[i].timer.function = alarm_timer_triggered;
+ }
+ hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ alarms[ANDROID_ALARM_SYSTEMTIME].timer.function = alarm_timer_triggered;
+ err = platform_driver_register(&alarm_driver);
+ if (err < 0)
+ goto err1;
+ wake_lock_init(&alarm_rtc_wake_lock, WAKE_LOCK_SUSPEND, "alarm_rtc");
+ rtc_alarm_interface.class = rtc_class;
+ err = class_interface_register(&rtc_alarm_interface);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ wake_lock_destroy(&alarm_rtc_wake_lock);
+ platform_driver_unregister(&alarm_driver);
+err1:
+ return err;
+}
+
+static void __exit alarm_exit(void)
+{
+ class_interface_unregister(&rtc_alarm_interface);
+ wake_lock_destroy(&alarm_rtc_wake_lock);
+ platform_driver_unregister(&alarm_driver);
+}
+
+late_initcall(alarm_late_init);
+module_init(alarm_driver_init);
+module_exit(alarm_exit);
+
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
new file mode 100644
index 000000000000..6eecbde2ef6f
--- /dev/null
+++ b/drivers/staging/android/android_alarm.h
@@ -0,0 +1,121 @@
+/* include/linux/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_ALARM_H
+#define _LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+#ifdef __KERNEL__
+
+#include <linux/ktime.h>
+#include <linux/rbtree.h>
+
+/*
+ * The alarm interface is similar to the hrtimer interface but adds support
+ * for wakeup from suspend. It also adds an elapsed realtime clock that can
+ * be used for periodic timers that need to keep runing while the system is
+ * suspended and not be disrupted when the wall time is set.
+ */
+
+/**
+ * struct alarm - the basic alarm structure
+ * @node: red black tree node for time ordered insertion
+ * @type: alarm type. rtc/elapsed-realtime/systemtime, wakeup/non-wakeup.
+ * @softexpires: the absolute earliest expiry time of the alarm.
+ * @expires: the absolute expiry time.
+ * @function: alarm expiry callback function
+ *
+ * The alarm structure must be initialized by alarm_init()
+ *
+ */
+
+struct android_alarm {
+ struct rb_node node;
+ enum android_alarm_type type;
+ ktime_t softexpires;
+ ktime_t expires;
+ void (*function)(struct android_alarm *);
+};
+
+void android_alarm_init(struct android_alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct android_alarm *));
+void android_alarm_start_range(struct android_alarm *alarm, ktime_t start,
+ ktime_t end);
+int android_alarm_try_to_cancel(struct android_alarm *alarm);
+int android_alarm_cancel(struct android_alarm *alarm);
+ktime_t alarm_get_elapsed_realtime(void);
+
+/* set rtc while preserving elapsed realtime */
+int android_alarm_set_rtc(const struct timespec ts);
+
+#ifdef CONFIG_ANDROID_ALARM_OLDDRV_COMPAT
+/*
+ * Some older drivers depend on the old API,
+ * so provide compatability macros for now.
+ */
+#define alarm android_alarm
+#define alarm_init(x, y, z) android_alarm_init(x, y, z)
+#define alarm_start_range(x, y, z) android_alarm_start_range(x, y, z)
+#define alarm_try_to_cancel(x) android_alarm_try_to_cancel(x)
+#define alarm_cancel(x) android_alarm_cancel(x)
+#define alarm_set_rtc(x) android_alarm_set_rtc(x)
+#endif
+
+
+#endif
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 99052bfd3a2d..9f1f27e7c86e 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -315,7 +315,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
get_file(asma->file);
/*
- * XXX - Reworked to use shmem_zero_setup() instead of
+ * XXX - Reworked to use shmem_zero_setup() instead of
* shmem_set_file while we're in staging. -jstultz
*/
if (vma->vm_flags & VM_SHARED) {
@@ -680,7 +680,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-static struct file_operations ashmem_fops = {
+static const struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,
.release = ashmem_release,
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index f0b7e6605ab5..094e18c495c5 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -258,7 +258,7 @@ struct binder_ref {
};
struct binder_buffer {
- struct list_head entry; /* free and allocated entries by addesss */
+ struct list_head entry; /* free and allocated entries by address */
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index ffc2d043dd8e..0d2367f2c15f 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -60,7 +60,11 @@ struct logger_reader {
};
/* logger_offset - returns index 'n' into the log via (optimized) modulus */
-#define logger_offset(n) ((n) & (log->size - 1))
+size_t logger_offset(struct logger_log *log, size_t n)
+{
+ return n & (log->size-1);
+}
+
/*
* file_get_log - Given a file structure, return the associated log
@@ -89,20 +93,24 @@ static inline struct logger_log *file_get_log(struct file *file)
* get_entry_len - Grabs the length of the payload of the next entry starting
* from 'off'.
*
+ * An entry length is 2 bytes (16 bits) in host endian order.
+ * In the log, the length does not include the size of the log entry structure.
+ * This function returns the size including the log entry structure.
+ *
* Caller needs to hold log->mutex.
*/
static __u32 get_entry_len(struct logger_log *log, size_t off)
{
__u16 val;
- switch (log->size - off) {
- case 1:
- memcpy(&val, log->buffer + off, 1);
- memcpy(((char *) &val) + 1, log->buffer, 1);
- break;
- default:
- memcpy(&val, log->buffer + off, 2);
- }
+ /* copy 2 bytes from buffer, in memcpy order, */
+ /* handling possible wrap at end of buffer */
+
+ ((__u8 *)&val)[0] = log->buffer[off];
+ if (likely(off+1 < log->size))
+ ((__u8 *)&val)[1] = log->buffer[off+1];
+ else
+ ((__u8 *)&val)[1] = log->buffer[0];
return sizeof(struct logger_entry) + val;
}
@@ -137,7 +145,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
if (copy_to_user(buf + len, log->buffer, count - len))
return -EFAULT;
- reader->r_off = logger_offset(reader->r_off + count);
+ reader->r_off = logger_offset(log, reader->r_off + count);
return count;
}
@@ -164,9 +172,10 @@ static ssize_t logger_read(struct file *file, char __user *buf,
start:
while (1) {
+ mutex_lock(&log->mutex);
+
prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
- mutex_lock(&log->mutex);
ret = (log->w_off == reader->r_off);
mutex_unlock(&log->mutex);
if (!ret)
@@ -225,7 +234,7 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
do {
size_t nr = get_entry_len(log, off);
- off = logger_offset(off + nr);
+ off = logger_offset(log, off + nr);
count += nr;
} while (count < len);
@@ -233,16 +242,28 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
}
/*
- * clock_interval - is a < c < b in mod-space? Put another way, does the line
- * from a to b cross c?
+ * is_between - is a < c < b, accounting for wrapping of a, b, and c
+ * positions in the buffer
+ *
+ * That is, if a<b, check for c between a and b
+ * and if a>b, check for c outside (not between) a and b
+ *
+ * |------- a xxxxxxxx b --------|
+ * c^
+ *
+ * |xxxxx b --------- a xxxxxxxxx|
+ * c^
+ * or c^
*/
-static inline int clock_interval(size_t a, size_t b, size_t c)
+static inline int is_between(size_t a, size_t b, size_t c)
{
- if (b < a) {
- if (a < c || b >= c)
+ if (a < b) {
+ /* is c between a and b? */
+ if (a < c && c <= b)
return 1;
} else {
- if (a < c && b >= c)
+ /* is c outside of b through a? */
+ if (c <= b || a < c)
return 1;
}
@@ -260,14 +281,14 @@ static inline int clock_interval(size_t a, size_t b, size_t c)
static void fix_up_readers(struct logger_log *log, size_t len)
{
size_t old = log->w_off;
- size_t new = logger_offset(old + len);
+ size_t new = logger_offset(log, old + len);
struct logger_reader *reader;
- if (clock_interval(old, new, log->head))
+ if (is_between(old, new, log->head))
log->head = get_next_entry(log, log->head, len);
list_for_each_entry(reader, &log->readers, list)
- if (clock_interval(old, new, reader->r_off))
+ if (is_between(old, new, reader->r_off))
reader->r_off = get_next_entry(log, reader->r_off, len);
}
@@ -286,7 +307,7 @@ static void do_write_log(struct logger_log *log, const void *buf, size_t count)
if (count != len)
memcpy(log->buffer, buf + len, count - len);
- log->w_off = logger_offset(log->w_off + count);
+ log->w_off = logger_offset(log, log->w_off + count);
}
@@ -309,9 +330,15 @@ static ssize_t do_write_log_from_user(struct logger_log *log,
if (count != len)
if (copy_from_user(log->buffer, buf + len, count - len))
+ /*
+ * Note that by not updating w_off, this abandons the
+ * portion of the new entry that *was* successfully
+ * copied, just above. This is intentional to avoid
+ * message corruption from missing fragments.
+ */
return -EFAULT;
- log->w_off = logger_offset(log->w_off + count);
+ log->w_off = logger_offset(log, log->w_off + count);
return count;
}
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index efc7dc1f4831..8b8a537ed063 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -34,6 +34,7 @@
#include <linux/mm.h>
#include <linux/oom.h>
#include <linux/sched.h>
+#include <linux/rcupdate.h>
#include <linux/profile.h>
#include <linux/notifier.h>
@@ -82,7 +83,7 @@ task_notify_func(struct notifier_block *self, unsigned long val, void *data)
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
- struct task_struct *p;
+ struct task_struct *tsk;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
@@ -134,25 +135,24 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
}
selected_oom_adj = min_adj;
- read_lock(&tasklist_lock);
- for_each_process(p) {
- struct mm_struct *mm;
- struct signal_struct *sig;
+ rcu_read_lock();
+ for_each_process(tsk) {
+ struct task_struct *p;
int oom_adj;
- task_lock(p);
- mm = p->mm;
- sig = p->signal;
- if (!mm || !sig) {
- task_unlock(p);
+ if (tsk->flags & PF_KTHREAD)
continue;
- }
- oom_adj = sig->oom_adj;
+
+ p = find_lock_task_mm(tsk);
+ if (!p)
+ continue;
+
+ oom_adj = p->signal->oom_adj;
if (oom_adj < min_adj) {
task_unlock(p);
continue;
}
- tasksize = get_mm_rss(mm);
+ tasksize = get_mm_rss(p->mm);
task_unlock(p);
if (tasksize <= 0)
continue;
@@ -183,12 +183,12 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
lowmem_deathpending_timeout = jiffies + HZ;
task_handoff_register(&task_nb);
#endif
- force_sig(SIGKILL, selected);
+ send_sig(SIGKILL, selected, 0);
rem -= selected_tasksize;
}
lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return rem;
}
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
index 6d4d67924f22..fabd398ec1bc 100644
--- a/drivers/staging/android/ram_console.c
+++ b/drivers/staging/android/ram_console.c
@@ -351,7 +351,7 @@ static int ram_console_driver_probe(struct platform_device *pdev)
"%lx\n", res, pdev->num_resources, res ? res->flags : 0);
return -ENXIO;
}
- buffer_size = res->end - res->start + 1;
+ buffer_size = resource_size(res);
start = res->start;
printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
start, buffer_size);
@@ -411,15 +411,14 @@ static int __init ram_console_late_init(void)
if (ram_console_old_log == NULL)
return 0;
#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
- ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
+ ram_console_old_log = kmemdup(ram_console_old_log_init_buffer,
+ ram_console_old_log_size, GFP_KERNEL);
if (ram_console_old_log == NULL) {
printk(KERN_ERR
"ram_console: failed to allocate buffer for old log\n");
ram_console_old_log_size = 0;
return 0;
}
- memcpy(ram_console_old_log,
- ram_console_old_log_init_buffer, ram_console_old_log_size);
#endif
entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
if (!entry) {
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index a64481c3e86d..bc723eff11af 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -29,9 +29,9 @@ struct timed_gpio_data {
struct timed_output_dev dev;
struct hrtimer timer;
spinlock_t lock;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
};
static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 179707b5e7c7..cf3059216958 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -728,14 +728,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
return -EINVAL;
- pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
- if (!pvBuffer)
- return -ENOMEM;
-
- if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- kfree(pvBuffer);
- return -EFAULT;
- }
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
down(&Adapter->LowPowerModeSync);
Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
@@ -1140,15 +1136,10 @@ cntrlEnd:
if (IoBuffer.InputLength < sizeof(ULONG) * 2)
return -EINVAL;
- pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
- if (!pvBuffer)
- return -ENOMEM;
-
- /* Get WrmBuffer structure */
- if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- kfree(pvBuffer);
- return -EFAULT;
- }
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer;
@@ -1302,20 +1293,18 @@ cntrlEnd:
/*
* Deny the access if the offset crosses the cal area limit.
*/
+ if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize)
+ return STATUS_FAILURE;
- if ((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) > Adapter->uiNVMDSDSize) {
+ if (stNVMReadWrite.uiOffset > Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes) {
/* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
return STATUS_FAILURE;
}
- pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
- if (!pReadData)
- return -ENOMEM;
-
- if (copy_from_user(pReadData, stNVMReadWrite.pBuffer, stNVMReadWrite.uiNumBytes)) {
- kfree(pReadData);
- return -EFAULT;
- }
+ pReadData = memdup_user(stNVMReadWrite.pBuffer,
+ stNVMReadWrite.uiNumBytes);
+ if (IS_ERR(pReadData))
+ return PTR_ERR(pReadData);
do_gettimeofday(&tv0);
if (IOCTL_BCM_NVM_READ == cmd) {
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index c0ee95a71343..522d0052e839 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -1,431 +1,359 @@
/************************************************************
-* CMHOST.C
-* This file contains the routines for handling Connection
-* Management.
-************************************************************/
+ * CMHOST.C
+ * This file contains the routines for handling Connection
+ * Management.
+ ************************************************************/
-//#define CONN_MSG
+/* #define CONN_MSG */
#include "headers.h"
-typedef enum _E_CLASSIFIER_ACTION
-{
+enum E_CLASSIFIER_ACTION {
eInvalidClassifierAction,
eAddClassifier,
eReplaceClassifier,
eDeleteClassifier
-}E_CLASSIFIER_ACTION;
+};
-static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid);
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid);
/************************************************************
-* Function - SearchSfid
-*
-* Description - This routinue would search QOS queues having
-* specified SFID as input parameter.
-*
-* Parameters - Adapter: Pointer to the Adapter structure
-* uiSfid : Given SFID for matching
-*
-* Returns - Queue index for this SFID(If matched)
- Else Invalid Queue Index(If Not matched)
-************************************************************/
-INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid)
+ * Function - SearchSfid
+ *
+ * Description - This routinue would search QOS queues having
+ * specified SFID as input parameter.
+ *
+ * Parameters - Adapter: Pointer to the Adapter structure
+ * uiSfid : Given SFID for matching
+ *
+ * Returns - Queue index for this SFID(If matched)
+ * Else Invalid Queue Index(If Not matched)
+ ************************************************************/
+int SearchSfid(PMINI_ADAPTER Adapter, UINT uiSfid)
{
- INT iIndex=0;
- for(iIndex=(NO_OF_QUEUES-1); iIndex>=0; iIndex--)
- if(Adapter->PackInfo[iIndex].ulSFID==uiSfid)
- return iIndex;
+ int i;
+
+ for (i = (NO_OF_QUEUES-1); i >= 0; i--)
+ if (Adapter->PackInfo[i].ulSFID == uiSfid)
+ return i;
+
return NO_OF_QUEUES+1;
}
/***************************************************************
-* Function - SearchFreeSfid
-*
-* Description - This routinue would search Free available SFID.
-*
-* Parameter - Adapter: Pointer to the Adapter structure
-*
-* Returns - Queue index for the free SFID
-* Else returns Invalid Index.
-****************************************************************/
-static INT SearchFreeSfid(PMINI_ADAPTER Adapter)
+ * Function -SearchFreeSfid
+ *
+ * Description - This routinue would search Free available SFID.
+ *
+ * Parameter - Adapter: Pointer to the Adapter structure
+ *
+ * Returns - Queue index for the free SFID
+ * Else returns Invalid Index.
+ ****************************************************************/
+static int SearchFreeSfid(PMINI_ADAPTER Adapter)
{
- UINT uiIndex=0;
+ int i;
+
+ for (i = 0; i < (NO_OF_QUEUES-1); i++)
+ if (Adapter->PackInfo[i].ulSFID == 0)
+ return i;
- for(uiIndex=0; uiIndex < (NO_OF_QUEUES-1); uiIndex++)
- if(Adapter->PackInfo[uiIndex].ulSFID==0)
- return uiIndex;
return NO_OF_QUEUES+1;
}
/*
-Function: SearchClsid
-Description: This routinue would search Classifier having specified ClassifierID as input parameter
-Input parameters: PMINI_ADAPTER Adapter - Adapter Context
- unsigned int uiSfid - The SF in which the classifier is to searched
- B_UINT16 uiClassifierID - The classifier ID to be searched
-Return: int :Classifier table index of matching entry
-*/
-
-static int SearchClsid(PMINI_ADAPTER Adapter,ULONG ulSFID,B_UINT16 uiClassifierID)
+ * Function: SearchClsid
+ * Description: This routinue would search Classifier having specified ClassifierID as input parameter
+ * Input parameters: PMINI_ADAPTER Adapter - Adapter Context
+ * unsigned int uiSfid - The SF in which the classifier is to searched
+ * B_UINT16 uiClassifierID - The classifier ID to be searched
+ * Return: int :Classifier table index of matching entry
+ */
+static int SearchClsid(PMINI_ADAPTER Adapter, ULONG ulSFID, B_UINT16 uiClassifierID)
{
- unsigned int uiClassifierIndex = 0;
- for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
- {
- if((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
- (Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex == uiClassifierID)&&
- (Adapter->astClassifierTable[uiClassifierIndex].ulSFID == ulSFID))
- return uiClassifierIndex;
+ int i;
+
+ for (i = 0; i < MAX_CLASSIFIERS; i++) {
+ if ((Adapter->astClassifierTable[i].bUsed) &&
+ (Adapter->astClassifierTable[i].uiClassifierRuleIndex == uiClassifierID) &&
+ (Adapter->astClassifierTable[i].ulSFID == ulSFID))
+ return i;
}
+
return MAX_CLASSIFIERS+1;
}
-/**
-@ingroup ctrl_pkt_functions
-This routinue would search Free available Classifier entry in classifier table.
-@return free Classifier Entry index in classifier table for specified SF
-*/
-static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/
- )
+/*
+ * @ingroup ctrl_pkt_functions
+ * This routinue would search Free available Classifier entry in classifier table.
+ * @return free Classifier Entry index in classifier table for specified SF
+ */
+static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/)
{
- unsigned int uiClassifierIndex = 0;
- for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
- {
- if(!Adapter->astClassifierTable[uiClassifierIndex].bUsed)
- return uiClassifierIndex;
+ int i;
+
+ for (i = 0; i < MAX_CLASSIFIERS; i++) {
+ if (!Adapter->astClassifierTable[i].bUsed)
+ return i;
}
+
return MAX_CLASSIFIERS+1;
}
static VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
{
- //deleting all the packet held in the SF
- flush_queue(Adapter,uiSearchRuleIndex);
+ /* deleting all the packet held in the SF */
+ flush_queue(Adapter, uiSearchRuleIndex);
- //Deleting the all classifiers for this SF
- DeleteAllClassifiersForSF(Adapter,uiSearchRuleIndex);
+ /* Deleting the all classifiers for this SF */
+ DeleteAllClassifiersForSF(Adapter, uiSearchRuleIndex);
- //Resetting only MIBS related entries in the SF
+ /* Resetting only MIBS related entries in the SF */
memset((PVOID)&Adapter->PackInfo[uiSearchRuleIndex], 0, sizeof(S_MIBS_SERVICEFLOW_TABLE));
}
static inline VOID
-CopyIpAddrToClassifier(S_CLASSIFIER_RULE *pstClassifierEntry ,
- B_UINT8 u8IpAddressLen , B_UINT8 *pu8IpAddressMaskSrc ,
- BOOLEAN bIpVersion6 , E_IPADDR_CONTEXT eIpAddrContext)
+CopyIpAddrToClassifier(S_CLASSIFIER_RULE *pstClassifierEntry,
+ B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc,
+ BOOLEAN bIpVersion6, E_IPADDR_CONTEXT eIpAddrContext)
{
- UINT ucLoopIndex=0;
- UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
- UCHAR *ptrClassifierIpAddress = NULL;
- UCHAR *ptrClassifierIpMask = NULL;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
+ int i = 0;
+ UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
+ UCHAR *ptrClassifierIpAddress = NULL;
+ UCHAR *ptrClassifierIpMask = NULL;
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(bIpVersion6)
- {
+ if (bIpVersion6)
nSizeOfIPAddressInBytes = IPV6_ADDRESS_SIZEINBYTES;
- }
- //Destination Ip Address
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Address Range Length:0x%X ",
- u8IpAddressLen);
- if((bIpVersion6?(IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2):
- (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen)
- {
+
+ /* Destination Ip Address */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Address Range Length:0x%X ", u8IpAddressLen);
+ if ((bIpVersion6 ? (IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2) :
+ (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen) {
/*
- //checking both the mask and address togethor in Classification.
- //So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
- //(nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
- */
- if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->ucIPDestinationAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if(bIpVersion6)
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stDestIpAddress.ucIpv6Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stDestIpAddress.ucIpv6Mask;
- }
- else
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stDestIpAddress.ucIpv4Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stDestIpAddress.ucIpv4Mask;
- }
- }
- else if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->ucIPSourceAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if(bIpVersion6)
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stSrcIpAddress.ucIpv6Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stSrcIpAddress.ucIpv6Mask;
+ * checking both the mask and address togethor in Classification.
+ * So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
+ * (nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
+ */
+ if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->ucIPDestinationAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
+ if (bIpVersion6) {
+ ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv6Address;
+ ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv6Mask;
+ } else {
+ ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv4Address;
+ ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv4Mask;
}
- else
- {
- ptrClassifierIpAddress =
- pstClassifierEntry->stSrcIpAddress.ucIpv4Address;
- ptrClassifierIpMask =
- pstClassifierEntry->stSrcIpAddress.ucIpv4Mask;
+ } else if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->ucIPSourceAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
+ if (bIpVersion6) {
+ ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv6Address;
+ ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv6Mask;
+ } else {
+ ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv4Address;
+ ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv4Mask;
}
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Address Length:0x%X \n",
- pstClassifierEntry->ucIPDestinationAddressLength);
- while((u8IpAddressLen>= nSizeOfIPAddressInBytes) &&
- (ucLoopIndex < MAX_IP_RANGE_LENGTH))
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Address Length:0x%X\n", pstClassifierEntry->ucIPDestinationAddressLength);
+ while ((u8IpAddressLen >= nSizeOfIPAddressInBytes) && (i < MAX_IP_RANGE_LENGTH)) {
memcpy(ptrClassifierIpAddress +
- (ucLoopIndex * nSizeOfIPAddressInBytes),
- (pu8IpAddressMaskSrc+(ucLoopIndex*nSizeOfIPAddressInBytes*2)),
+ (i * nSizeOfIPAddressInBytes),
+ (pu8IpAddressMaskSrc+(i*nSizeOfIPAddressInBytes*2)),
nSizeOfIPAddressInBytes);
- if(!bIpVersion6)
- {
- if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[ucLoopIndex]=
- ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv4Addr[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Src Ip Address:0x%luX ",pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[ucLoopIndex]);
- }
- else if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->stDestIpAddress.ulIpv4Addr[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv4Addr[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Dest Ip Address:0x%luX ",pstClassifierEntry->stDestIpAddress.ulIpv4Addr[ucLoopIndex]);
+
+ if (!bIpVersion6) {
+ if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Address:0x%luX ",
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
+ } else if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Address:0x%luX ",
+ pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
}
}
- u8IpAddressLen-=nSizeOfIPAddressInBytes;
- if(u8IpAddressLen >= nSizeOfIPAddressInBytes)
- {
+ u8IpAddressLen -= nSizeOfIPAddressInBytes;
+ if (u8IpAddressLen >= nSizeOfIPAddressInBytes) {
memcpy(ptrClassifierIpMask +
- (ucLoopIndex * nSizeOfIPAddressInBytes),
+ (i * nSizeOfIPAddressInBytes),
(pu8IpAddressMaskSrc+nSizeOfIPAddressInBytes +
- (ucLoopIndex*nSizeOfIPAddressInBytes*2)),
+ (i*nSizeOfIPAddressInBytes*2)),
nSizeOfIPAddressInBytes);
- if(!bIpVersion6)
- {
- if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->stSrcIpAddress.
- ulIpv4Mask[ucLoopIndex]=
- ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv4Mask[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Src Ip Mask Address:0x%luX ",pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[ucLoopIndex]);
- }
- else if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->stDestIpAddress.
- ulIpv4Mask[ucLoopIndex] =
- ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv4Mask[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Dest Ip Mask Address:0x%luX ",pstClassifierEntry->stDestIpAddress.ulIpv4Mask[ucLoopIndex]);
+
+ if (!bIpVersion6) {
+ if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i] =
+ ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Mask Address:0x%luX ",
+ pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
+ } else if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i] =
+ ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Mask Address:0x%luX ",
+ pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
}
}
- u8IpAddressLen-=nSizeOfIPAddressInBytes;
- }
- if(0==u8IpAddressLen)
- {
- pstClassifierEntry->bDestIpValid=TRUE;
+ u8IpAddressLen -= nSizeOfIPAddressInBytes;
}
- ucLoopIndex++;
+ if (u8IpAddressLen == 0)
+ pstClassifierEntry->bDestIpValid = TRUE;
+
+ i++;
}
- if(bIpVersion6)
- {
- //Restore EndianNess of Struct
- for(ucLoopIndex =0 ; ucLoopIndex < MAX_IP_RANGE_LENGTH * 4 ;
- ucLoopIndex++)
- {
- if(eIpAddrContext == eSrcIpAddress)
- {
- pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[ucLoopIndex]=
- ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv6Addr[ucLoopIndex]);
- pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[ucLoopIndex]= ntohl(pstClassifierEntry->stSrcIpAddress.
- ulIpv6Mask[ucLoopIndex]);
- }
- else if(eIpAddrContext == eDestIpAddress)
- {
- pstClassifierEntry->stDestIpAddress.ulIpv6Addr[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv6Addr[ucLoopIndex]);
- pstClassifierEntry->stDestIpAddress.ulIpv6Mask[ucLoopIndex]= ntohl(pstClassifierEntry->stDestIpAddress.
- ulIpv6Mask[ucLoopIndex]);
+ if (bIpVersion6) {
+ /* Restore EndianNess of Struct */
+ for (i = 0; i < MAX_IP_RANGE_LENGTH * 4; i++) {
+ if (eIpAddrContext == eSrcIpAddress) {
+ pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i]);
+ pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i]);
+ } else if (eIpAddrContext == eDestIpAddress) {
+ pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i]);
+ pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i]);
}
}
}
}
}
-
-void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter,B_UINT16 TID,BOOLEAN bFreeAll)
+void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter, B_UINT16 TID, BOOLEAN bFreeAll)
{
- ULONG ulIndex;
- for(ulIndex=0; ulIndex < Adapter->ulTotalTargetBuffersAvailable; ulIndex++)
- {
- if(Adapter->astTargetDsxBuffer[ulIndex].valid)
+ int i;
+
+ for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
+ if (Adapter->astTargetDsxBuffer[i].valid)
continue;
- if ((bFreeAll) || (Adapter->astTargetDsxBuffer[ulIndex].tid == TID)){
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
- TID, Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer);
- Adapter->astTargetDsxBuffer[ulIndex].valid=1;
- Adapter->astTargetDsxBuffer[ulIndex].tid=0;
+
+ if ((bFreeAll) || (Adapter->astTargetDsxBuffer[i].tid == TID)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
+ TID, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
+ Adapter->astTargetDsxBuffer[i].valid = 1;
+ Adapter->astTargetDsxBuffer[i].tid = 0;
Adapter->ulFreeTargetBufferCnt++;
- }
+ }
}
}
-/**
-@ingroup ctrl_pkt_functions
-copy classifier rule into the specified SF index
-*/
-static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter,stConvergenceSLTypes *psfCSType,UINT uiSearchRuleIndex,UINT nClassifierIndex)
+/*
+ * @ingroup ctrl_pkt_functions
+ * copy classifier rule into the specified SF index
+ */
+static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter, stConvergenceSLTypes *psfCSType, UINT uiSearchRuleIndex, UINT nClassifierIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- //VOID *pvPhsContext = NULL;
- UINT ucLoopIndex=0;
- //UCHAR ucProtocolLength=0;
- //ULONG ulPhsStatus;
-
+ /* VOID *pvPhsContext = NULL; */
+ int i;
+ /* UCHAR ucProtocolLength=0; */
+ /* ULONG ulPhsStatus; */
- if(Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
+ if (Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
nClassifierIndex > (MAX_CLASSIFIERS-1))
return;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Storing Classifier Rule Index : %X",
+ ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Storing Classifier Rule Index : %X",ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex));
-
- if(nClassifierIndex > MAX_CLASSIFIERS-1)
+ if (nClassifierIndex > MAX_CLASSIFIERS-1)
return;
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if(pstClassifierEntry)
- {
- //Store if Ipv6
- pstClassifierEntry->bIpv6Protocol =
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?TRUE:FALSE;
-
- //Destinaiton Port
- pstClassifierEntry->ucDestPortRangeLength=psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength/4;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Destination Port Range Length:0x%X ",pstClassifierEntry->ucDestPortRangeLength);
- if( MAX_PORT_RANGE >= psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength)
- {
- for(ucLoopIndex=0;ucLoopIndex<(pstClassifierEntry->ucDestPortRangeLength);ucLoopIndex++)
- {
- pstClassifierEntry->usDestPortRangeLo[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+ucLoopIndex));
- pstClassifierEntry->usDestPortRangeHi[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+2+ucLoopIndex));
- pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]=ntohs(pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Destination Port Range Lo:0x%X ",pstClassifierEntry->usDestPortRangeLo[ucLoopIndex]);
- pstClassifierEntry->usDestPortRangeHi[ucLoopIndex]=ntohs(pstClassifierEntry->usDestPortRangeHi[ucLoopIndex]);
+ if (pstClassifierEntry) {
+ /* Store if Ipv6 */
+ pstClassifierEntry->bIpv6Protocol = (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE;
+
+ /* Destinaiton Port */
+ pstClassifierEntry->ucDestPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength / 4;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Length:0x%X ", pstClassifierEntry->ucDestPortRangeLength);
+
+ if (psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength <= MAX_PORT_RANGE) {
+ for (i = 0; i < (pstClassifierEntry->ucDestPortRangeLength); i++) {
+ pstClassifierEntry->usDestPortRangeLo[i] = *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+i));
+ pstClassifierEntry->usDestPortRangeHi[i] =
+ *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+2+i));
+ pstClassifierEntry->usDestPortRangeLo[i] = ntohs(pstClassifierEntry->usDestPortRangeLo[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Lo:0x%X ",
+ pstClassifierEntry->usDestPortRangeLo[i]);
+ pstClassifierEntry->usDestPortRangeHi[i] = ntohs(pstClassifierEntry->usDestPortRangeHi[i]);
}
+ } else {
+ pstClassifierEntry->ucDestPortRangeLength = 0;
}
- else
- {
- pstClassifierEntry->ucDestPortRangeLength=0;
- }
- //Source Port
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Source Port Range Length:0x%X ",psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
- if(MAX_PORT_RANGE >=
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength)
- {
- pstClassifierEntry->ucSrcPortRangeLength =
- psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRangeLength/4;
- for(ucLoopIndex = 0; ucLoopIndex <
- (pstClassifierEntry->ucSrcPortRangeLength); ucLoopIndex++)
- {
- pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRange+ucLoopIndex));
- pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex] =
- *((PUSHORT)(psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRange+2+ucLoopIndex));
- pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex] =
- ntohs(pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Source Port Range Lo:0x%X ",pstClassifierEntry->usSrcPortRangeLo[ucLoopIndex]);
- pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex]=ntohs(pstClassifierEntry->usSrcPortRangeHi[ucLoopIndex]);
+
+ /* Source Port */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Length:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+ if (psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength <= MAX_PORT_RANGE) {
+ pstClassifierEntry->ucSrcPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength/4;
+ for (i = 0; i < (pstClassifierEntry->ucSrcPortRangeLength); i++) {
+ pstClassifierEntry->usSrcPortRangeLo[i] =
+ *((PUSHORT)(psfCSType->cCPacketClassificationRule.
+ u8ProtocolSourcePortRange+i));
+ pstClassifierEntry->usSrcPortRangeHi[i] =
+ *((PUSHORT)(psfCSType->cCPacketClassificationRule.
+ u8ProtocolSourcePortRange+2+i));
+ pstClassifierEntry->usSrcPortRangeLo[i] =
+ ntohs(pstClassifierEntry->usSrcPortRangeLo[i]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Lo:0x%X ",
+ pstClassifierEntry->usSrcPortRangeLo[i]);
+ pstClassifierEntry->usSrcPortRangeHi[i] = ntohs(pstClassifierEntry->usSrcPortRangeHi[i]);
}
}
- //Destination Ip Address and Mask
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Destination Parameters : ");
-
+ /* Destination Ip Address and Mask */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Destination Parameters : ");
CopyIpAddrToClassifier(pstClassifierEntry,
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength,
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?
- TRUE:FALSE, eDestIpAddress);
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength,
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress,
+ (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ?
+ TRUE : FALSE, eDestIpAddress);
- //Source Ip Address and Mask
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Ip Source Parameters : ");
+ /* Source Ip Address and Mask */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Source Parameters : ");
CopyIpAddrToClassifier(pstClassifierEntry,
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength,
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6)?TRUE:FALSE,
- eSrcIpAddress);
-
- //TOS
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"TOS Length:0x%X ",psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
- if(3 == psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength)
- {
- pstClassifierEntry->ucIPTypeOfServiceLength =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength;
- pstClassifierEntry->ucTosLow =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0];
- pstClassifierEntry->ucTosHigh =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1];
- pstClassifierEntry->ucTosMask =
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2];
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength,
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress,
+ (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE,
+ eSrcIpAddress);
+
+ /* TOS */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "TOS Length:0x%X ", psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ if (psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength == 3) {
+ pstClassifierEntry->ucIPTypeOfServiceLength = psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength;
+ pstClassifierEntry->ucTosLow = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0];
+ pstClassifierEntry->ucTosHigh = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1];
+ pstClassifierEntry->ucTosMask = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2];
pstClassifierEntry->bTOSValid = TRUE;
}
- if(psfCSType->cCPacketClassificationRule.u8Protocol == 0)
- {
- //we didn't get protocol field filled in by the BS
- pstClassifierEntry->ucProtocolLength=0;
- }
- else
- {
- pstClassifierEntry->ucProtocolLength=1;// 1 valid protocol
+ if (psfCSType->cCPacketClassificationRule.u8Protocol == 0) {
+ /* we didn't get protocol field filled in by the BS */
+ pstClassifierEntry->ucProtocolLength = 0;
+ } else {
+ pstClassifierEntry->ucProtocolLength = 1; /* 1 valid protocol */
}
- pstClassifierEntry->ucProtocol[0] =
- psfCSType->cCPacketClassificationRule.u8Protocol;
-
- pstClassifierEntry->u8ClassifierRulePriority =
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority;
-
- //store the classifier rule ID and set this classifier entry as valid
- pstClassifierEntry->ucDirection =
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
- pstClassifierEntry->uiClassifierRuleIndex = ntohs(psfCSType->
- cCPacketClassificationRule.u16PacketClassificationRuleIndex);
- pstClassifierEntry->usVCID_Value =
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- pstClassifierEntry->ulSFID =
- Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
- uiSearchRuleIndex, pstClassifierEntry->ucDirection,
- pstClassifierEntry->uiClassifierRuleIndex,
- pstClassifierEntry->usVCID_Value);
-
- if(psfCSType->cCPacketClassificationRule.u8AssociatedPHSI)
- {
+ pstClassifierEntry->ucProtocol[0] = psfCSType->cCPacketClassificationRule.u8Protocol;
+ pstClassifierEntry->u8ClassifierRulePriority = psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority;
+
+ /* store the classifier rule ID and set this classifier entry as valid */
+ pstClassifierEntry->ucDirection = Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
+ pstClassifierEntry->uiClassifierRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+ pstClassifierEntry->usVCID_Value = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
+ pstClassifierEntry->ulSFID = Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
+ uiSearchRuleIndex, pstClassifierEntry->ucDirection,
+ pstClassifierEntry->uiClassifierRuleIndex,
+ pstClassifierEntry->usVCID_Value);
+
+ if (psfCSType->cCPacketClassificationRule.u8AssociatedPHSI)
pstClassifierEntry->u8AssociatedPHSI = psfCSType->cCPacketClassificationRule.u8AssociatedPHSI;
- }
- //Copy ETH CS Parameters
+ /* Copy ETH CS Parameters */
pstClassifierEntry->ucEthCSSrcMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddressLength);
- memcpy(pstClassifierEntry->au8EThCSSrcMAC,psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress,MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSSrcMACMask,psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress+MAC_ADDRESS_SIZE,MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSSrcMAC, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress, MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSSrcMACMask, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
pstClassifierEntry->ucEthCSDestMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
- memcpy(pstClassifierEntry->au8EThCSDestMAC,psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress,MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSDestMACMask,psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress+MAC_ADDRESS_SIZE,MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSDestMAC, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress, MAC_ADDRESS_SIZE);
+ memcpy(pstClassifierEntry->au8EThCSDestMACMask, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
pstClassifierEntry->ucEtherTypeLen = (psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- memcpy(pstClassifierEntry->au8EthCSEtherType,psfCSType->cCPacketClassificationRule.u8Ethertype,NUM_ETHERTYPE_BYTES);
+ memcpy(pstClassifierEntry->au8EthCSEtherType, psfCSType->cCPacketClassificationRule.u8Ethertype, NUM_ETHERTYPE_BYTES);
memcpy(pstClassifierEntry->usUserPriority, &psfCSType->cCPacketClassificationRule.u16UserPriority, 2);
pstClassifierEntry->usVLANID = ntohs(psfCSType->cCPacketClassificationRule.u16VLANID);
pstClassifierEntry->usValidityBitMap = ntohs(psfCSType->cCPacketClassificationRule.u16ValidityBitMap);
@@ -434,244 +362,199 @@ static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter,stConvergenceSLT
}
}
-
-/**
-@ingroup ctrl_pkt_functions
-*/
-static inline VOID DeleteClassifierRuleFromSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex,UINT nClassifierIndex)
+/*
+ * @ingroup ctrl_pkt_functions
+ */
+static inline VOID DeleteClassifierRuleFromSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex, UINT nClassifierIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- B_UINT16 u16PacketClassificationRuleIndex;
- USHORT usVCID;
- //VOID *pvPhsContext = NULL;
- //ULONG ulPhsStatus;
+ B_UINT16 u16PacketClassificationRuleIndex;
+ USHORT usVCID;
+ /* VOID *pvPhsContext = NULL; */
+ /*ULONG ulPhsStatus; */
usVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- if(nClassifierIndex > MAX_CLASSIFIERS-1)
+ if (nClassifierIndex > MAX_CLASSIFIERS-1)
return;
- if(usVCID == 0)
+ if (usVCID == 0)
return;
u16PacketClassificationRuleIndex = Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex;
-
-
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if(pstClassifierEntry)
- {
+ if (pstClassifierEntry) {
pstClassifierEntry->bUsed = FALSE;
pstClassifierEntry->uiClassifierRuleIndex = 0;
- memset(pstClassifierEntry,0,sizeof(S_CLASSIFIER_RULE));
+ memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_RULE));
- //Delete the PHS Rule for this classifier
- PhsDeleteClassifierRule(
- &Adapter->stBCMPhsContext,
- usVCID,
- u16PacketClassificationRuleIndex);
+ /* Delete the PHS Rule for this classifier */
+ PhsDeleteClassifierRule(&Adapter->stBCMPhsContext, usVCID, u16PacketClassificationRuleIndex);
}
}
-/**
-@ingroup ctrl_pkt_functions
-*/
-VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex)
+/*
+ * @ingroup ctrl_pkt_functions
+ */
+VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
- UINT nClassifierIndex;
- //B_UINT16 u16PacketClassificationRuleIndex;
- USHORT ulVCID;
- //VOID *pvPhsContext = NULL;
- //ULONG ulPhsStatus;
+ int i;
+ /* B_UINT16 u16PacketClassificationRuleIndex; */
+ USHORT ulVCID;
+ /* VOID *pvPhsContext = NULL; */
+ /* ULONG ulPhsStatus; */
ulVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- if(ulVCID == 0)
+ if (ulVCID == 0)
return;
+ for (i = 0; i < MAX_CLASSIFIERS; i++) {
+ if (Adapter->astClassifierTable[i].usVCID_Value == ulVCID) {
+ pstClassifierEntry = &Adapter->astClassifierTable[i];
- for(nClassifierIndex =0 ; nClassifierIndex < MAX_CLASSIFIERS ; nClassifierIndex++)
- {
- if(Adapter->astClassifierTable[nClassifierIndex].usVCID_Value == ulVCID)
- {
- pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if(pstClassifierEntry->bUsed)
- {
- DeleteClassifierRuleFromSF(Adapter,uiSearchRuleIndex,nClassifierIndex);
- }
+ if (pstClassifierEntry->bUsed)
+ DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, i);
}
}
- //Delete All Phs Rules Associated with this SF
- PhsDeleteSFRules(
- &Adapter->stBCMPhsContext,
- ulVCID);
-
+ /* Delete All Phs Rules Associated with this SF */
+ PhsDeleteSFRules(&Adapter->stBCMPhsContext, ulVCID);
}
-
-/**
-This routinue copies the Connection Management
-related data into the Adapter structure.
-@ingroup ctrl_pkt_functions
-*/
-
-static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/
- register pstServiceFlowParamSI psfLocalSet, /**<Pointer to the ServiceFlowParamSI structure*/
- register UINT uiSearchRuleIndex, /**<Index of Queue, to which this data belongs*/
- register UCHAR ucDsxType,
- stLocalSFAddIndicationAlt *pstAddIndication)
-{
- //UCHAR ucProtocolLength=0;
- ULONG ulSFID;
- UINT nClassifierIndex = 0;
- E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
- B_UINT16 u16PacketClassificationRuleIndex=0;
- UINT nIndex=0;
+/*
+ * This routinue copies the Connection Management
+ * related data into the Adapter structure.
+ * @ingroup ctrl_pkt_functions
+ */
+static VOID CopyToAdapter(register PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
+ register pstServiceFlowParamSI psfLocalSet, /* <Pointer to the ServiceFlowParamSI structure */
+ register UINT uiSearchRuleIndex, /* <Index of Queue, to which this data belongs */
+ register UCHAR ucDsxType,
+ stLocalSFAddIndicationAlt *pstAddIndication) {
+
+ /* UCHAR ucProtocolLength = 0; */
+ ULONG ulSFID;
+ UINT nClassifierIndex = 0;
+ enum E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
+ B_UINT16 u16PacketClassificationRuleIndex = 0;
+ int i;
stConvergenceSLTypes *psfCSType = NULL;
S_PHS_RULE sPhsRule;
USHORT uVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
UINT UGIValue = 0;
-
- Adapter->PackInfo[uiSearchRuleIndex].bValid=TRUE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Rule Index = %d\n", uiSearchRuleIndex);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"%s: SFID= %x ",__FUNCTION__, ntohl(psfLocalSet->u32SFID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Updating Queue %d",uiSearchRuleIndex);
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = TRUE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Rule Index = %d\n", uiSearchRuleIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s: SFID= %x ", __func__, ntohl(psfLocalSet->u32SFID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Updating Queue %d", uiSearchRuleIndex);
ulSFID = ntohl(psfLocalSet->u32SFID);
- //Store IP Version used
- //Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF
+ /* Store IP Version used */
+ /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = 0;
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
- /*Enable IP/ETh CS Support As Required*/
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : u8CSSpecification : %X\n",psfLocalSet->u8CSSpecification);
- switch(psfLocalSet->u8CSSpecification)
+ /* Enable IP/ETh CS Support As Required */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : u8CSSpecification : %X\n", psfLocalSet->u8CSSpecification);
+ switch (psfLocalSet->u8CSSpecification) {
+ case eCSPacketIPV4:
{
- case eCSPacketIPV4:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
- break;
- }
- case eCSPacketIPV6:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
- break;
- }
-
- case eCS802_3PacketEthernet:
- case eCS802_1QPacketVLAN:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
- break;
- }
-
- case eCSPacketIPV4Over802_1QVLAN:
- case eCSPacketIPV4Over802_3Ethernet:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
- break;
- }
-
- case eCSPacketIPV6Over802_1QVLAN:
- case eCSPacketIPV6Over802_3Ethernet:
- {
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
- break;
- }
-
- default:
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error in value of CS Classification.. setting default to IP CS\n");
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
- break;
- }
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
+ break;
+ }
+ case eCSPacketIPV6:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
+ break;
+ }
+ case eCS802_3PacketEthernet:
+ case eCS802_1QPacketVLAN:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
+ break;
+ }
+ case eCSPacketIPV4Over802_1QVLAN:
+ case eCSPacketIPV4Over802_3Ethernet:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
+ break;
+ }
+ case eCSPacketIPV6Over802_1QVLAN:
+ case eCSPacketIPV6Over802_3Ethernet:
+ {
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
+ break;
+ }
+ default:
+ {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error in value of CS Classification.. setting default to IP CS\n");
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
+ break;
+ }
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X \n",
- uiSearchRuleIndex,
- Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport,
- Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X\n",
+ uiSearchRuleIndex,
+ Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport,
+ Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport);
- //Store IP Version used
- //Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF
- if(Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport == IPV6_CS)
- {
+ /* Store IP Version used */
+ /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
+ if (Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport == IPV6_CS)
Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV6;
- }
else
- {
Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV4;
- }
/* To ensure that the ETH CS code doesn't gets executed if the BS doesn't supports ETH CS */
- if(!Adapter->bETHCSEnabled)
+ if (!Adapter->bETHCSEnabled)
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
- if(psfLocalSet->u8ServiceClassNameLength > 0 &&
- psfLocalSet->u8ServiceClassNameLength < 32)
- {
- memcpy(Adapter->PackInfo[uiSearchRuleIndex].ucServiceClassName,
- psfLocalSet->u8ServiceClassName,
- psfLocalSet->u8ServiceClassNameLength);
- }
- Adapter->PackInfo[uiSearchRuleIndex].u8QueueType =
- psfLocalSet->u8ServiceFlowSchedulingType;
+ if (psfLocalSet->u8ServiceClassNameLength > 0 && psfLocalSet->u8ServiceClassNameLength < 32)
+ memcpy(Adapter->PackInfo[uiSearchRuleIndex].ucServiceClassName, psfLocalSet->u8ServiceClassName, psfLocalSet->u8ServiceClassNameLength);
- if(Adapter->PackInfo[uiSearchRuleIndex].u8QueueType==BE &&
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection)
- {
- Adapter->usBestEffortQueueIndex=uiSearchRuleIndex;
- }
+ Adapter->PackInfo[uiSearchRuleIndex].u8QueueType = psfLocalSet->u8ServiceFlowSchedulingType;
+
+ if (Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == BE && Adapter->PackInfo[uiSearchRuleIndex].ucDirection)
+ Adapter->usBestEffortQueueIndex = uiSearchRuleIndex;
Adapter->PackInfo[uiSearchRuleIndex].ulSFID = ntohl(psfLocalSet->u32SFID);
Adapter->PackInfo[uiSearchRuleIndex].u8TrafficPriority = psfLocalSet->u8TrafficPriority;
- //copy all the classifier in the Service Flow param structure
- for(nIndex=0; nIndex<psfLocalSet->u8TotalClassifiers; nIndex++)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Classifier index =%d",nIndex);
- psfCSType = &psfLocalSet->cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Classifier index =%d",nIndex);
-
- if(psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority=TRUE;
- }
+ /* copy all the classifier in the Service Flow param structure */
+ for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
+ psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
- if(psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority=TRUE;
- }
+ if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
+ Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
+ if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
+ Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
- if(ucDsxType== DSA_ACK)
- {
+ if (ucDsxType == DSA_ACK) {
eClassifierAction = eAddClassifier;
- }
- else if(ucDsxType == DSC_ACK)
- {
- switch(psfCSType->u8ClassfierDSCAction)
- {
- case 0://DSC Add Classifier
+ } else if (ucDsxType == DSC_ACK) {
+ switch (psfCSType->u8ClassfierDSCAction) {
+ case 0: /* DSC Add Classifier */
{
eClassifierAction = eAddClassifier;
}
break;
- case 1://DSC Replace Classifier
+ case 1: /* DSC Replace Classifier */
{
eClassifierAction = eReplaceClassifier;
}
break;
- case 2://DSC Delete Classifier
+ case 2: /* DSC Delete Classifier */
{
eClassifierAction = eDeleteClassifier;
-
}
break;
default:
@@ -683,163 +566,133 @@ static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the A
u16PacketClassificationRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
- switch(eClassifierAction)
- {
+ switch (eClassifierAction) {
case eAddClassifier:
{
- //Get a Free Classifier Index From Classifier table for this SF to add the Classifier
- //Contained in this message
- nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex);
+ /* Get a Free Classifier Index From Classifier table for this SF to add the Classifier */
+ /* Contained in this message */
+ nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
nClassifierIndex = SearchFreeClsid(Adapter);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
- //Failed To get a free Entry
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Failed To get a free Classifier Entry");
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
+ /* Failed To get a free Entry */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Failed To get a free Classifier Entry");
break;
}
- //Copy the Classifier Rule for this service flow into our Classifier table maintained per SF.
- CopyClassifierRuleToSF(Adapter,psfCSType,uiSearchRuleIndex,nClassifierIndex);
- }
-
- else
- {
- //This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"CopyToAdapter : Error The Specified Classifier Already Exists \
- and attempted To Add Classifier with Same PCRI : 0x%x\n", u16PacketClassificationRuleIndex);
+ /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
+ CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
+ } else {
+ /* This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
+ "CopyToAdapter: Error The Specified Classifier Already Exists and attempted To Add Classifier with Same PCRI : 0x%x\n",
+ u16PacketClassificationRuleIndex);
}
}
break;
-
case eReplaceClassifier:
{
- //Get the Classifier Index From Classifier table for this SF and replace existing Classifier
- //with the new classifier Contained in this message
- nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
- //Failed To search the classifier
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Search for Classifier To be replaced failed");
+ /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
+ /* with the new classifier Contained in this message */
+ nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
+ /* Failed To search the classifier */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be replaced failed");
break;
}
- //Copy the Classifier Rule for this service flow into our Classifier table maintained per SF.
- CopyClassifierRuleToSF(Adapter,psfCSType,uiSearchRuleIndex,nClassifierIndex);
+ /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
+ CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
}
break;
-
case eDeleteClassifier:
{
- //Get the Classifier Index From Classifier table for this SF and replace existing Classifier
- //with the new classifier Contained in this message
- nClassifierIndex = SearchClsid(Adapter,ulSFID,u16PacketClassificationRuleIndex);
- if(nClassifierIndex > MAX_CLASSIFIERS)
- {
- //Failed To search the classifier
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Error Search for Classifier To be deleted failed");
+ /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
+ /* with the new classifier Contained in this message */
+ nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
+ if (nClassifierIndex > MAX_CLASSIFIERS) {
+ /* Failed To search the classifier */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be deleted failed");
break;
}
- //Delete This classifier
- DeleteClassifierRuleFromSF(Adapter,uiSearchRuleIndex,nClassifierIndex);
+ /* Delete This classifier */
+ DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, nClassifierIndex);
}
break;
-
default:
{
- //Invalid Action for classifier
+ /* Invalid Action for classifier */
break;
}
}
}
- //Repeat parsing Classification Entries to process PHS Rules
- for(nIndex=0; nIndex < psfLocalSet->u8TotalClassifiers; nIndex++)
- {
- psfCSType = &psfLocalSet->cConvergenceSLTypes[nIndex];
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "psfCSType->u8PhsDSCAction : 0x%x\n",
- psfCSType->u8PhsDSCAction );
+ /* Repeat parsing Classification Entries to process PHS Rules */
+ for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
+ psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "psfCSType->u8PhsDSCAction : 0x%x\n", psfCSType->u8PhsDSCAction);
- switch (psfCSType->u8PhsDSCAction)
- {
+ switch (psfCSType->u8PhsDSCAction) {
case eDeleteAllPHSRules:
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Deleting All PHS Rules For VCID: 0x%X\n",uVCID);
-
- //Delete All the PHS rules for this Service flow
-
- PhsDeleteSFRules(
- &Adapter->stBCMPhsContext,
- uVCID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Deleting All PHS Rules For VCID: 0x%X\n", uVCID);
+ /* Delete All the PHS rules for this Service flow */
+ PhsDeleteSFRules(&Adapter->stBCMPhsContext, uVCID);
break;
}
case eDeletePHSRule:
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"PHS DSC Action = Delete PHS Rule \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "PHS DSC Action = Delete PHS Rule\n");
+
+ if (psfCSType->cPhsRule.u8PHSI)
+ PhsDeletePHSRule(&Adapter->stBCMPhsContext, uVCID, psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- if(psfCSType->cPhsRule.u8PHSI)
- {
- PhsDeletePHSRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- }
- else
- {
- //BCM_DEBUG_PRINT(CONN_MSG,("Error CPHSRule.PHSI is ZERO \n"));
- }
break;
}
- default :
+ default:
{
- if(ucDsxType == DSC_ACK)
- {
- //BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC \n",psfCSType->cPhsRule.u8PHSI));
- break; //FOr DSC ACK Case PHS DSC Action must be in valid set
+ if (ucDsxType == DSC_ACK) {
+ /* BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC\n",psfCSType->cPhsRule.u8PHSI)); */
+ break; /* FOr DSC ACK Case PHS DSC Action must be in valid set */
}
}
- //Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified
- //No Break Here . Intentionally!
+ /* Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified */
+ /* No Break Here . Intentionally! */
case eAddPHSRule:
case eSetPHSRule:
{
- if(psfCSType->cPhsRule.u8PHSI)
- {
- //Apply This PHS Rule to all classifiers whose Associated PHSI Match
+ if (psfCSType->cPhsRule.u8PHSI) {
+ /* Apply This PHS Rule to all classifiers whose Associated PHSI Match */
unsigned int uiClassifierIndex = 0;
- if(pstAddIndication->u8Direction == UPLINK_DIR )
- {
- for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
- {
- if((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
+ if (pstAddIndication->u8Direction == UPLINK_DIR) {
+ for (uiClassifierIndex = 0; uiClassifierIndex < MAX_CLASSIFIERS; uiClassifierIndex++) {
+ if ((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
(Adapter->astClassifierTable[uiClassifierIndex].ulSFID == Adapter->PackInfo[uiSearchRuleIndex].ulSFID) &&
- (Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI == psfCSType->cPhsRule.u8PHSI))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Adding PHS Rule For Classifier : 0x%x cPhsRule.u8PHSI : 0x%x\n",
- Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex,
- psfCSType->cPhsRule.u8PHSI);
- //Update The PHS Rule for this classifier as Associated PHSI id defined
-
- //Copy the PHS Rule
- sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
- sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
+ (Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI == psfCSType->cPhsRule.u8PHSI)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
+ "Adding PHS Rule For Classifier: 0x%x cPhsRule.u8PHSI: 0x%x\n",
+ Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex,
+ psfCSType->cPhsRule.u8PHSI);
+ /* Update The PHS Rule for this classifier as Associated PHSI id defined */
+
+ /* Copy the PHS Rule */
+ sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
+ sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
- memcpy(sPhsRule.u8PHSF,psfCSType->cPhsRule.u8PHSF,MAX_PHS_LENGTHS);
- memcpy(sPhsRule.u8PHSM,psfCSType->cPhsRule.u8PHSM,MAX_PHS_LENGTHS);
+ memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
+ memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
sPhsRule.u8RefCnt = 0;
sPhsRule.bUnclassifiedPHSRule = FALSE;
sPhsRule.PHSModifiedBytes = 0;
sPhsRule.PHSModifiedNumPackets = 0;
sPhsRule.PHSErrorNumPackets = 0;
- //bPHSRuleAssociated = TRUE;
- //Store The PHS Rule for this classifier
+ /* bPHSRuleAssociated = TRUE; */
+ /* Store The PHS Rule for this classifier */
PhsUpdateClassifierRule(
&Adapter->stBCMPhsContext,
@@ -848,184 +701,157 @@ static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the A
&sPhsRule,
Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI);
- //Update PHS Rule For the Classifier
- if(sPhsRule.u8PHSI)
- {
+ /* Update PHS Rule For the Classifier */
+ if (sPhsRule.u8PHSI) {
Adapter->astClassifierTable[uiClassifierIndex].u32PHSRuleID = sPhsRule.u8PHSI;
- memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule,&sPhsRule,sizeof(S_PHS_RULE));
+ memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule, &sPhsRule, sizeof(S_PHS_RULE));
}
-
}
}
+ } else {
+ /* Error PHS Rule specified in signaling could not be applied to any classifier */
+
+ /* Copy the PHS Rule */
+ sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
+ sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
+ sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
+ sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
+ sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
+ memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
+ memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
+ sPhsRule.u8RefCnt = 0;
+ sPhsRule.bUnclassifiedPHSRule = TRUE;
+ sPhsRule.PHSModifiedBytes = 0;
+ sPhsRule.PHSModifiedNumPackets = 0;
+ sPhsRule.PHSErrorNumPackets = 0;
+ /* Store The PHS Rule for this classifier */
+
+ /*
+ * Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
+ * clsid will be zero hence we can't have multiple PHS rules for the same SF.
+ * To support multiple PHS rule, passing u8PHSI.
+ */
+ PhsUpdateClassifierRule(
+ &Adapter->stBCMPhsContext,
+ uVCID,
+ sPhsRule.u8PHSI,
+ &sPhsRule,
+ sPhsRule.u8PHSI);
}
- else
- {
- //Error PHS Rule specified in signaling could not be applied to any classifier
-
- //Copy the PHS Rule
- sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
- sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
- sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
- sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
- sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
- memcpy(sPhsRule.u8PHSF,psfCSType->cPhsRule.u8PHSF,MAX_PHS_LENGTHS);
- memcpy(sPhsRule.u8PHSM,psfCSType->cPhsRule.u8PHSM,MAX_PHS_LENGTHS);
- sPhsRule.u8RefCnt = 0;
- sPhsRule.bUnclassifiedPHSRule = TRUE;
- sPhsRule.PHSModifiedBytes = 0;
- sPhsRule.PHSModifiedNumPackets = 0;
- sPhsRule.PHSErrorNumPackets = 0;
- //Store The PHS Rule for this classifier
-
- /*
- Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
- clsid will be zero hence we can't have multiple PHS rules for the same SF.
- To support multiple PHS rule, passing u8PHSI.
- */
-
- PhsUpdateClassifierRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- sPhsRule.u8PHSI,
- &sPhsRule,
- sPhsRule.u8PHSI);
-
- }
-
}
}
break;
}
}
- if(psfLocalSet->u32MaxSustainedTrafficRate == 0 )
- {
- //No Rate Limit . Set Max Sustained Traffic Rate to Maximum
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
- WIMAX_MAX_ALLOWED_RATE;
-
- }
- else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) >
- WIMAX_MAX_ALLOWED_RATE)
- {
- //Too large Allowed Rate specified. Limiting to Wi Max Allowed rate
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
- WIMAX_MAX_ALLOWED_RATE;
- }
- else
- {
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate =
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
+ if (psfLocalSet->u32MaxSustainedTrafficRate == 0) {
+ /* No Rate Limit . Set Max Sustained Traffic Rate to Maximum */
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
+ } else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) > WIMAX_MAX_ALLOWED_RATE) {
+ /* Too large Allowed Rate specified. Limiting to Wi Max Allowed rate */
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
+ } else {
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
}
Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = ntohl(psfLocalSet->u32MaximumLatency);
-
- if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency == 0) /* 0 should be treated as infinite */
+ if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency == 0) /* 0 should be treated as infinite */
Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = MAX_LATENCY_ALLOWED;
+ if ((Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == ERTPS ||
+ Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == UGS))
+ UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
- if(( Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == ERTPS ||
- Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == UGS ) )
- UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
-
- if(UGIValue == 0)
+ if (UGIValue == 0)
UGIValue = DEFAULT_UG_INTERVAL;
/*
- For UGI based connections...
- DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
- The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
- In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
- */
-
+ * For UGI based connections...
+ * DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
+ * The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
+ * In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
+ */
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
- (DEFAULT_UGI_FACTOR*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
+ (DEFAULT_UGI_FACTOR*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
- if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize < WIMAX_MAX_MTU*8)
- {
+ if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize < WIMAX_MAX_MTU*8) {
UINT UGIFactor = 0;
/* Special Handling to ensure the biggest size of packet can go out from host to FW as follows:
- 1. Any packet from Host to FW can go out in different packet size.
- 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
- 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
- */
+ * 1. Any packet from Host to FW can go out in different packet size.
+ * 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
+ * 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
+ */
UGIFactor = (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency/UGIValue + 1);
- if(UGIFactor > DEFAULT_UGI_FACTOR)
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
- (UGIFactor*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
+ if (UGIFactor > DEFAULT_UGI_FACTOR)
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
+ (UGIFactor*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
- if(Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize > WIMAX_MAX_MTU*8)
+ if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize > WIMAX_MAX_MTU*8)
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize = WIMAX_MAX_MTU*8;
}
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "LAT: %d, UGI: %d\n", Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency, UGIValue);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate,
+ ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
+ Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"LAT: %d, UGI: %d \n", Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency, UGIValue);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate,
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
- Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize);
-
- //copy the extended SF Parameters to Support MIBS
- CopyMIBSExtendedSFParameters(Adapter,psfLocalSet,uiSearchRuleIndex);
+ /* copy the extended SF Parameters to Support MIBS */
+ CopyMIBSExtendedSFParameters(Adapter, psfLocalSet, uiSearchRuleIndex);
- //store header suppression enabled flag per SF
+ /* store header suppression enabled flag per SF */
Adapter->PackInfo[uiSearchRuleIndex].bHeaderSuppressionEnabled =
- !(psfLocalSet->u8RequesttransmissionPolicy &
- MASK_DISABLE_HEADER_SUPPRESSION);
+ !(psfLocalSet->u8RequesttransmissionPolicy &
+ MASK_DISABLE_HEADER_SUPPRESSION);
kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication);
Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = pstAddIndication;
- //Re Sort the SF list in PackInfo according to Traffic Priority
+ /* Re Sort the SF list in PackInfo according to Traffic Priority */
SortPackInfo(Adapter);
/* Re Sort the Classifier Rules table and re - arrange
- according to Classifier Rule Priority */
+ * according to Classifier Rule Priority
+ */
SortClassifiers(Adapter);
-
DumpPhsRules(&Adapter->stBCMPhsContext);
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"%s <=====", __FUNCTION__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s <=====", __func__);
}
-
/***********************************************************************
-* Function - DumpCmControlPacket
-*
-* Description - This routinue Dumps the Contents of the AddIndication
-* Structure in the Connection Management Control Packet
-*
-* Parameter - pvBuffer: Pointer to the buffer containing the
-* AddIndication data.
-*
-* Returns - None
-*************************************************************************/
+ * Function - DumpCmControlPacket
+ *
+ * Description - This routinue Dumps the Contents of the AddIndication
+ * Structure in the Connection Management Control Packet
+ *
+ * Parameter - pvBuffer: Pointer to the buffer containing the
+ * AddIndication data.
+ *
+ * Returns - None
+ *************************************************************************/
static VOID DumpCmControlPacket(PVOID pvBuffer)
{
- UINT uiLoopIndex;
- UINT nIndex;
- stLocalSFAddIndicationAlt *pstAddIndication;
- UINT nCurClassifierCnt;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
+ int uiLoopIndex;
+ int nIndex;
+ stLocalSFAddIndicationAlt *pstAddIndication;
+ UINT nCurClassifierCnt;
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
pstAddIndication = (stLocalSFAddIndicationAlt *)pvBuffer;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type : 0x%X",pstAddIndication->u8Type);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction : 0x%X",pstAddIndication->u8Direction);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID : 0x%X",ntohs(pstAddIndication->u16VCID));
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",htons(pstAddIndication->sfAuthorizedSet.u16CID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type: 0x%X", pstAddIndication->u8Type);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction: 0x%X", pstAddIndication->u8Direction);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", ntohs(pstAddIndication->u16CID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID: 0x%X", ntohs(pstAddIndication->u16VCID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", htons(pstAddIndication->sfAuthorizedSet.u16CID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[0],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[1],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[2],
@@ -1033,207 +859,170 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[4],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[5]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8MBSService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8QosParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%X, %p",
- pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate : 0x%X 0x%p",
- pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%X", pstAddIndication->sfAuthorizedSet.u8MBSService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%X", pstAddIndication->sfAuthorizedSet.u8QosParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%X, %p",
+ pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate: 0x%X 0x%p",
+ pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
&pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8SDUSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16TargetSAID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ARQEnable);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8CSSpecification);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16TimeBase);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8PagingPreference);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval : 0x%X",
- pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
- *(unsigned int*)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
- *(unsigned int*)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
- *(USHORT*) &pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference : 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
+ pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfAuthorizedSet.u8SDUSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%X", pstAddIndication->sfAuthorizedSet.u16TargetSAID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQEnable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%X", pstAddIndication->sfAuthorizedSet.u8CSSpecification);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAuthorizedSet.u16TimeBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAuthorizedSet.u8PagingPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
+ *(unsigned int *)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
+ *(unsigned int *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
+ *(USHORT *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%X",
+ pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfAuthorizedSet.u8TotalClassifiers;
-
- if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- {
+ if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
- }
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
- if(!pstAddIndication->sfAuthorizedSet.bValid)
- pstAddIndication->sfAuthorizedSet.bValid=1;
- for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
- {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
+ if (!pstAddIndication->sfAuthorizedSet.bValid)
+ pstAddIndication->sfAuthorizedSet.bValid = 1;
+ for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
psfCSType = &pstAddIndication->sfAuthorizedSet.cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority :0x%X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3] :0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-
- for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3] : 0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16UserPriority);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16VLANID);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1] : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8Protocol);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthertypeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X ,0x%02X ,0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8Ethertype[0],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[1],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6] : 0x %02X %02X %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid : 0x%02X",pstAddIndication->sfAuthorizedSet.bValid);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",pstAddIndication->sfAdmittedSet.u32SFID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",pstAddIndication->sfAdmittedSet.u16CID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X",
- pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x %02X %02X %02X %02X %02X %02X",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%02X", pstAddIndication->sfAuthorizedSet.bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfAdmittedSet.u32SFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfAdmittedSet.u16CID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
+ pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
pstAddIndication->sfAdmittedSet.u8ServiceClassName[0],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[1],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[2],
@@ -1241,429 +1030,338 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAdmittedSet.u8ServiceClassName[4],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[5]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8MBSService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8QosParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TrafficPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X",
- pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MaximumLatency);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8SDUSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID : 0x%02X",
- pstAddIndication->sfAdmittedSet.u16TargetSAID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ARQEnable);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize : 0x%X",
- pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8CSSpecification);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime : 0x%X",
- pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase : 0x%X",
- pstAddIndication->sfAdmittedSet.u16TimeBase);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference : 0x%X",
- pstAddIndication->sfAdmittedSet.u8PagingPreference);
-
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference : 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfAdmittedSet.u8MBSService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfAdmittedSet.u8QosParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfAdmittedSet.u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
+ pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAdmittedSet.u32MaximumLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%02X", pstAddIndication->sfAdmittedSet.u8SDUSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%02X", pstAddIndication->sfAdmittedSet.u16TargetSAID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQEnable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%02X", pstAddIndication->sfAdmittedSet.u8CSSpecification);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAdmittedSet.u16TimeBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAdmittedSet.u8PagingPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%02X",
+ pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfAdmittedSet.u8TotalClassifiers;
-
- if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- {
+ if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
- }
-
-
- for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
- {
+ for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
- psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority :0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength :0x%02X",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3] :0x%02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
- for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4] : 0x %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4] : 0x %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6] : 0x %02X %02X %02X %02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3] : 0x%02X %02X %02X",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex : 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength : 0x%02X",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1] : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
+ psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%02X",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
+ for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", psfCSType->cCPacketClassificationRule.u8Protocol);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X %02X %02X",
+ psfCSType->cCPacketClassificationRule.u8Ethertype[0],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[1],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%02X",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6] : 0x %02X %02X %02X %02X %02X %02X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid : 0x%X",pstAddIndication->sfAdmittedSet.bValid);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID : 0x%X",pstAddIndication->sfActiveSet.u32SFID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID : 0x%X",pstAddIndication->sfActiveSet.u16CID);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength : 0x%X",
- pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName : 0x %02X %02X %02X %02X %02X %02X",
- pstAddIndication->sfActiveSet.u8ServiceClassName[0],
- pstAddIndication->sfActiveSet.u8ServiceClassName[1],
- pstAddIndication->sfActiveSet.u8ServiceClassName[2],
- pstAddIndication->sfActiveSet.u8ServiceClassName[3],
- pstAddIndication->sfActiveSet.u8ServiceClassName[4],
- pstAddIndication->sfActiveSet.u8ServiceClassName[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService : 0x%02X",
- pstAddIndication->sfActiveSet.u8MBSService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet : 0x%02X",
- pstAddIndication->sfActiveSet.u8QosParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X",
- pstAddIndication->sfActiveSet.u8TrafficPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
- pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType : 0x%02X",
- pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter : 0x%X",
- pstAddIndication->sfActiveSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency : 0x%X",
- pstAddIndication->sfActiveSet.u32MaximumLatency);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize : 0x%X",
- pstAddIndication->sfActiveSet.u8SDUSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID : 0x%X",
- pstAddIndication->sfActiveSet.u16TargetSAID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable : 0x%X",
- pstAddIndication->sfActiveSet.u8ARQEnable);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder : 0x%X",
- pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize : 0x%X",
- pstAddIndication->sfActiveSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification : 0x%X",
- pstAddIndication->sfActiveSet.u8CSSpecification);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService : 0x%X",
- pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime : 0x%X",
- pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase : 0x%X",
- pstAddIndication->sfActiveSet.u16TimeBase);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference : 0x%X",
- pstAddIndication->sfActiveSet.u8PagingPreference);
-
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference : 0x%X",
- pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received : 0x%X",pstAddIndication->sfActiveSet.u8TotalClassifiers);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%X", pstAddIndication->sfAdmittedSet.bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfActiveSet.u32SFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfActiveSet.u16CID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
+ pstAddIndication->sfActiveSet.u8ServiceClassName[0],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[1],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[2],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[3],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[4],
+ pstAddIndication->sfActiveSet.u8ServiceClassName[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfActiveSet.u8MBSService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfActiveSet.u8QosParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfActiveSet.u8TrafficPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
+ pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
+ pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
+ pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
+ pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfActiveSet.u32ToleratedJitter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfActiveSet.u32MaximumLatency);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
+ pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfActiveSet.u8SDUSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID: 0x%X", pstAddIndication->sfActiveSet.u16TargetSAID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable: 0x%X", pstAddIndication->sfActiveSet.u8ARQEnable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQWindowSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockSize);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification: 0x%X", pstAddIndication->sfActiveSet.u8CSSpecification);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService: 0x%X",
+ pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase: 0x%X", pstAddIndication->sfActiveSet.u16TimeBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference: 0x%X", pstAddIndication->sfActiveSet.u8PagingPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference: 0x%X",
+ pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfActiveSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfActiveSet.u8TotalClassifiers;
-
- if(nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- {
+ if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
- }
-
- for(nIndex = 0 ; nIndex < nCurClassifierCnt ; nIndex++)
- {
+ for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
- psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ClassifierRulePriority :0x%X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfServiceLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfService[3] :0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
- for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol : 0x%X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for(uiLoopIndex=0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]:0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength : 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for(uiLoopIndex=0;uiLoopIndex<32;uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPDestinationAddress[32]:0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRange[4]:0x%X ,0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRange[4]:0x%X ,0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddressLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddress[6]:0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetSourceMACAddressLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]:0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
- psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthertypeLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Ethertype[3] :0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16UserPriority :0x%X ",
- psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16VLANID :0x%X ",
- psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8AssociatedPHSI :0x%X ",
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16PacketClassificationRuleIndex:0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParamLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParam[1]:0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
+ psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ClassifierRulePriority: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfServiceLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
+ psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol: 0x%X ", psfCSType->cCPacketClassificationRule.u8Protocol);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
+
+ for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPDestinationAddress[32]:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRangeLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRangeLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
+ psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetSourceMACAddressLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
+ psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthertypeLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8EthertypeLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Ethertype[3]: 0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8Ethertype[0],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[1],
+ psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16UserPriority: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u16UserPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8AssociatedPHSI: 0x%X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16PacketClassificationRuleIndex:0x%X ",
+ psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParamLength:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParam[1]:0x%X ",
+ psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLableLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLable[6] :0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLableLength: 0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLable[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
+ psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " bValid : 0x%X",pstAddIndication->sfActiveSet.bValid);
-
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " bValid: 0x%X", pstAddIndication->sfActiveSet.bValid);
}
-static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet,PUCHAR pucDestBuffer)
+static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet, PUCHAR pucDestBuffer)
{
UINT nBytesToRead = sizeof(stServiceFlowParamSI);
- if(ulAddrSFParamSet == 0 || NULL == pucDestBuffer)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Got Param address as 0!!");
+ if (ulAddrSFParamSet == 0 || NULL == pucDestBuffer) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Got Param address as 0!!");
return 0;
}
ulAddrSFParamSet = ntohl(ulAddrSFParamSet);
- //Read out the SF Param Set At the indicated Location
- if(rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
+ /* Read out the SF Param Set At the indicated Location */
+ if (rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
return STATUS_FAILURE;
return 1;
}
-
-static ULONG StoreSFParam(PMINI_ADAPTER Adapter,PUCHAR pucSrcBuffer,ULONG ulAddrSFParamSet)
+static ULONG StoreSFParam(PMINI_ADAPTER Adapter, PUCHAR pucSrcBuffer, ULONG ulAddrSFParamSet)
{
- UINT nBytesToWrite = sizeof(stServiceFlowParamSI);
+ UINT nBytesToWrite = sizeof(stServiceFlowParamSI);
int ret = 0;
- if(ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
- {
+ if (ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
return 0;
- }
ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite);
if (ret < 0) {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed",__FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed", __func__, __LINE__);
return ret;
}
return 1;
}
-ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *puBufferLength)
+ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter, PVOID pvBuffer, UINT *puBufferLength)
{
stLocalSFAddIndicationAlt *pstAddIndicationAlt = NULL;
- stLocalSFAddIndication * pstAddIndication = NULL;
+ stLocalSFAddIndication *pstAddIndication = NULL;
stLocalSFDeleteRequest *pstDeletionRequest;
UINT uiSearchRuleIndex;
ULONG ulSFID;
@@ -1671,52 +1369,47 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
pstAddIndicationAlt = (stLocalSFAddIndicationAlt *)(pvBuffer);
/*
- * In case of DSD Req By MS, we should immediately delete this SF so that
- * we can stop the further classifying the pkt for this SF.
- */
- if(pstAddIndicationAlt->u8Type == DSD_REQ)
- {
+ * In case of DSD Req By MS, we should immediately delete this SF so that
+ * we can stop the further classifying the pkt for this SF.
+ */
+ if (pstAddIndicationAlt->u8Type == DSD_REQ) {
pstDeletionRequest = (stLocalSFDeleteRequest *)pvBuffer;
ulSFID = ntohl(pstDeletionRequest->u32SFID);
- uiSearchRuleIndex=SearchSfid(Adapter,ulSFID);
+ uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
- if(uiSearchRuleIndex < NO_OF_QUEUES)
- {
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
Adapter->u32TotalDSD++;
}
return 1;
}
-
- if( (pstAddIndicationAlt->u8Type == DSD_RSP) ||
- (pstAddIndicationAlt->u8Type == DSD_ACK))
- {
- //No Special handling send the message as it is
+ if ((pstAddIndicationAlt->u8Type == DSD_RSP) ||
+ (pstAddIndicationAlt->u8Type == DSD_ACK)) {
+ /* No Special handling send the message as it is */
return 1;
}
- // For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver!
+ /* For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver! */
- pstAddIndication=kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
- if(NULL==pstAddIndication)
+ pstAddIndication = kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
+ if (pstAddIndication == NULL)
return 0;
/* AUTHORIZED SET */
pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
- if(!pstAddIndication->psfAuthorizedSet)
+ if (!pstAddIndication->psfAuthorizedSet)
return 0;
- if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
- (ULONG)pstAddIndication->psfAuthorizedSet)!= 1)
+ if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
+ (ULONG)pstAddIndication->psfAuthorizedSet) != 1)
return 0;
/* this can't possibly be right */
pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAuthorizedSet);
- if(pstAddIndicationAlt->u8Type == DSA_REQ)
- {
+ if (pstAddIndicationAlt->u8Type == DSA_REQ) {
stLocalSFAddRequest AddRequest;
AddRequest.u8Type = pstAddIndicationAlt->u8Type;
@@ -1724,18 +1417,17 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
AddRequest.u16TID = pstAddIndicationAlt->u16TID;
AddRequest.u16CID = pstAddIndicationAlt->u16CID;
AddRequest.u16VCID = pstAddIndicationAlt->u16VCID;
- AddRequest.psfParameterSet =pstAddIndication->psfAuthorizedSet ;
+ AddRequest.psfParameterSet = pstAddIndication->psfAuthorizedSet;
(*puBufferLength) = sizeof(stLocalSFAddRequest);
- memcpy(pvBuffer,&AddRequest,sizeof(stLocalSFAddRequest));
+ memcpy(pvBuffer, &AddRequest, sizeof(stLocalSFAddRequest));
return 1;
}
- // Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt
-
- //We need to extract the structure from the buffer and pack it differently
+ /* Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt */
+ /* We need to extract the structure from the buffer and pack it differently */
pstAddIndication->u8Type = pstAddIndicationAlt->u8Type;
- pstAddIndication->eConnectionDir= pstAddIndicationAlt->u8Direction ;
+ pstAddIndication->eConnectionDir = pstAddIndicationAlt->u8Direction;
pstAddIndication->u16TID = pstAddIndicationAlt->u16TID;
pstAddIndication->u16CID = pstAddIndicationAlt->u16CID;
pstAddIndication->u16VCID = pstAddIndicationAlt->u16VCID;
@@ -1744,20 +1436,19 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
/* ADMITTED SET */
pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
- if(!pstAddIndication->psfAdmittedSet)
+ if (!pstAddIndication->psfAdmittedSet)
return 0;
- if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfAdmittedSet,(ULONG)pstAddIndication->psfAdmittedSet) != 1)
+ if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAdmittedSet, (ULONG)pstAddIndication->psfAdmittedSet) != 1)
return 0;
pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAdmittedSet);
-
/* ACTIVE SET */
pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
- if(!pstAddIndication->psfActiveSet)
+ if (!pstAddIndication->psfActiveSet)
return 0;
- if(StoreSFParam(Adapter,(PUCHAR)&pstAddIndicationAlt->sfActiveSet,(ULONG)pstAddIndication->psfActiveSet) != 1)
+ if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfActiveSet, (ULONG)pstAddIndication->psfActiveSet) != 1)
return 0;
pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfActiveSet);
@@ -1768,47 +1459,41 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
return 1;
}
-
static inline stLocalSFAddIndicationAlt
-*RestoreCmControlResponseMessage(register PMINI_ADAPTER Adapter,register PVOID pvBuffer)
+*RestoreCmControlResponseMessage(register PMINI_ADAPTER Adapter, register PVOID pvBuffer)
{
- ULONG ulStatus=0;
+ ULONG ulStatus = 0;
stLocalSFAddIndication *pstAddIndication = NULL;
stLocalSFAddIndicationAlt *pstAddIndicationDest = NULL;
- pstAddIndication = (stLocalSFAddIndication *)(pvBuffer);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>" );
+ pstAddIndication = (stLocalSFAddIndication *)(pvBuffer);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>");
if ((pstAddIndication->u8Type == DSD_REQ) ||
(pstAddIndication->u8Type == DSD_RSP) ||
(pstAddIndication->u8Type == DSD_ACK))
- {
return (stLocalSFAddIndicationAlt *)pvBuffer;
- }
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage ");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage ");
/*
- //Need to Allocate memory to contain the SUPER Large structures
- //Our driver can't create these structures on Stack :(
- */
- pstAddIndicationDest=kmalloc(sizeof(stLocalSFAddIndicationAlt), GFP_KERNEL);
-
- if(pstAddIndicationDest)
- {
- memset(pstAddIndicationDest,0,sizeof(stLocalSFAddIndicationAlt));
- }
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Failed to allocate memory for SF Add Indication Structure ");
+ * Need to Allocate memory to contain the SUPER Large structures
+ * Our driver can't create these structures on Stack :(
+ */
+ pstAddIndicationDest = kmalloc(sizeof(stLocalSFAddIndicationAlt), GFP_KERNEL);
+
+ if (pstAddIndicationDest) {
+ memset(pstAddIndicationDest, 0, sizeof(stLocalSFAddIndicationAlt));
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Failed to allocate memory for SF Add Indication Structure ");
return NULL;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Type : 0x%X",pstAddIndication->u8Type);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Direction : 0x%X",pstAddIndication->eConnectionDir);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8TID : 0x%X",ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8CID : 0x%X",ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u16VCID : 0x%X",ntohs(pstAddIndication->u16VCID));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-autorized set loc : %p",pstAddIndication->psfAuthorizedSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-admitted set loc : %p",pstAddIndication->psfAdmittedSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-Active set loc : %p",pstAddIndication->psfActiveSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Type : 0x%X", pstAddIndication->u8Type);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Direction : 0x%X", pstAddIndication->eConnectionDir);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8TID : 0x%X", ntohs(pstAddIndication->u16TID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8CID : 0x%X", ntohs(pstAddIndication->u16CID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u16VCID : 0x%X", ntohs(pstAddIndication->u16VCID));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-autorized set loc : %p", pstAddIndication->psfAuthorizedSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-admitted set loc : %p", pstAddIndication->psfAdmittedSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-Active set loc : %p", pstAddIndication->psfActiveSet);
pstAddIndicationDest->u8Type = pstAddIndication->u8Type;
pstAddIndicationDest->u8Direction = pstAddIndication->eConnectionDir;
@@ -1817,42 +1502,39 @@ static inline stLocalSFAddIndicationAlt
pstAddIndicationDest->u16VCID = pstAddIndication->u16VCID;
pstAddIndicationDest->u8CC = pstAddIndication->u8CC;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Active Set ");
- ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfActiveSet, (PUCHAR)&pstAddIndicationDest->sfActiveSet);
- if(ulStatus != 1)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Active Set ");
+ ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfActiveSet, (PUCHAR)&pstAddIndicationDest->sfActiveSet);
+ if (ulStatus != 1)
goto failed_restore_sf_param;
- }
- if(pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
+
+ if (pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfActiveSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Admitted Set ");
- ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfAdmittedSet,(PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
- if(ulStatus != 1)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Admitted Set ");
+ ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAdmittedSet, (PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
+ if (ulStatus != 1)
goto failed_restore_sf_param;
- }
- if(pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
+
+ if (pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Authorized Set ");
- ulStatus=RestoreSFParam(Adapter,(ULONG)pstAddIndication->psfAuthorizedSet,(PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
- if(ulStatus != 1)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Authorized Set ");
+ ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAuthorizedSet, (PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
+ if (ulStatus != 1)
goto failed_restore_sf_param;
- }
- if(pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
+
+ if (pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dumping the whole raw packet");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " pstAddIndicationDest->sfActiveSet size %zx %p", sizeof(*pstAddIndicationDest), pstAddIndicationDest);
- //BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, sizeof(*pstAddIndicationDest));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dumping the whole raw packet");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " pstAddIndicationDest->sfActiveSet size %zx %p", sizeof(*pstAddIndicationDest), pstAddIndicationDest);
+ /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, sizeof(*pstAddIndicationDest)); */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
return pstAddIndicationDest;
failed_restore_sf_param:
kfree(pstAddIndicationDest);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====" );
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====");
return NULL;
}
@@ -1860,7 +1542,7 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
{
ULONG ulTargetDsxBuffersBase = 0;
ULONG ulCntTargetBuffers;
- ULONG ulIndex=0;
+ ULONG i;
int Status;
if (!Adapter) {
@@ -1868,411 +1550,348 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
return 0;
}
- if(Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
+ if (Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
return 1;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Size of Each DSX Buffer(Also size of ServiceFlowParamSI): %zx ",sizeof(stServiceFlowParamSI));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Reading DSX buffer From Target location %x ",DSX_MESSAGE_EXCHANGE_BUFFER);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Size of Each DSX Buffer(Also size of ServiceFlowParamSI): %zx ", sizeof(stServiceFlowParamSI));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Reading DSX buffer From Target location %x ", DSX_MESSAGE_EXCHANGE_BUFFER);
- Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER,
- (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
- if(Status < 0)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "RDM failed!!");
+ Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER, (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
+ if (Status < 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "RDM failed!!");
return 0;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Base Address Of DSX Target Buffer : 0x%lx",ulTargetDsxBuffersBase);
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Tgt Buffer is Now %lx :",ulTargetDsxBuffersBase);
-
- ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE/sizeof(stServiceFlowParamSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Base Address Of DSX Target Buffer : 0x%lx", ulTargetDsxBuffersBase);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Tgt Buffer is Now %lx :", ulTargetDsxBuffersBase);
+ ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE / sizeof(stServiceFlowParamSI);
Adapter->ulTotalTargetBuffersAvailable =
ulCntTargetBuffers > MAX_TARGET_DSX_BUFFERS ?
MAX_TARGET_DSX_BUFFERS : ulCntTargetBuffers;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Total Target DSX Buffer setup %lx ",Adapter->ulTotalTargetBuffersAvailable);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Total Target DSX Buffer setup %lx ", Adapter->ulTotalTargetBuffersAvailable);
- for(ulIndex=0; ulIndex < Adapter->ulTotalTargetBuffersAvailable ; ulIndex++)
- {
- Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
- Adapter->astTargetDsxBuffer[ulIndex].valid=1;
- Adapter->astTargetDsxBuffer[ulIndex].tid=0;
- ulTargetDsxBuffersBase+=sizeof(stServiceFlowParamSI);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
- ulIndex, Adapter->astTargetDsxBuffer[ulIndex].ulTargetDsxBuffer);
+ for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
+ Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
+ Adapter->astTargetDsxBuffer[i].valid = 1;
+ Adapter->astTargetDsxBuffer[i].tid = 0;
+ ulTargetDsxBuffersBase += sizeof(stServiceFlowParamSI);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
+ i, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
}
Adapter->ulCurrentTargetBuffer = 0;
Adapter->ulFreeTargetBufferCnt = Adapter->ulTotalTargetBuffersAvailable;
return 1;
}
-static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid)
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid)
{
- ULONG ulTargetDSXBufferAddress;
- ULONG ulTargetDsxBufferIndexToUse,ulMaxTry;
+ ULONG ulTargetDSXBufferAddress;
+ ULONG ulTargetDsxBufferIndexToUse, ulMaxTry;
- if((Adapter->ulTotalTargetBuffersAvailable == 0)||
- (Adapter->ulFreeTargetBufferCnt == 0))
- {
- ClearTargetDSXBuffer(Adapter,tid,FALSE);
+ if ((Adapter->ulTotalTargetBuffersAvailable == 0) || (Adapter->ulFreeTargetBufferCnt == 0)) {
+ ClearTargetDSXBuffer(Adapter, tid, FALSE);
return 0;
}
- ulTargetDsxBufferIndexToUse = Adapter->ulCurrentTargetBuffer;
- ulMaxTry = Adapter->ulTotalTargetBuffersAvailable;
- while((ulMaxTry)&&(Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid != 1))
- {
- ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1)%
- Adapter->ulTotalTargetBuffersAvailable;
- ulMaxTry--;
+ ulTargetDsxBufferIndexToUse = Adapter->ulCurrentTargetBuffer;
+ ulMaxTry = Adapter->ulTotalTargetBuffersAvailable;
+ while ((ulMaxTry) && (Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid != 1)) {
+ ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1) % Adapter->ulTotalTargetBuffersAvailable;
+ ulMaxTry--;
}
- if(ulMaxTry==0)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ",Adapter->ulFreeTargetBufferCnt);
- ClearTargetDSXBuffer(Adapter,tid,FALSE);
+ if (ulMaxTry == 0) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ", Adapter->ulFreeTargetBufferCnt);
+ ClearTargetDSXBuffer(Adapter, tid, FALSE);
return 0;
}
-
- ulTargetDSXBufferAddress =
- Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].ulTargetDsxBuffer;
- Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid=0;
- Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].tid=tid;
+ ulTargetDSXBufferAddress = Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].ulTargetDsxBuffer;
+ Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid = 0;
+ Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].tid = tid;
Adapter->ulFreeTargetBufferCnt--;
-
-
- ulTargetDsxBufferIndexToUse =
- (ulTargetDsxBufferIndexToUse+1)%Adapter->ulTotalTargetBuffersAvailable;
+ ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1)%Adapter->ulTotalTargetBuffersAvailable;
Adapter->ulCurrentTargetBuffer = ulTargetDsxBufferIndexToUse;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "GetNextTargetBufferLocation :Returning address %lx tid %d\n",
- ulTargetDSXBufferAddress,tid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "GetNextTargetBufferLocation :Returning address %lx tid %d\n", ulTargetDSXBufferAddress, tid);
+
return ulTargetDSXBufferAddress;
}
-
-INT AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter)
+int AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter)
{
/*
- //Need to Allocate memory to contain the SUPER Large structures
- //Our driver can't create these structures on Stack
- */
- Adapter->caDsxReqResp=kmalloc(sizeof(stLocalSFAddIndicationAlt)+LEADER_SIZE, GFP_KERNEL);
- if(!Adapter->caDsxReqResp)
+ * Need to Allocate memory to contain the SUPER Large structures
+ * Our driver can't create these structures on Stack
+ */
+ Adapter->caDsxReqResp = kmalloc(sizeof(stLocalSFAddIndicationAlt)+LEADER_SIZE, GFP_KERNEL);
+ if (!Adapter->caDsxReqResp)
return -ENOMEM;
+
return 0;
}
-INT FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter)
+int FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter)
{
kfree(Adapter->caDsxReqResp);
return 0;
-
}
-/**
-@ingroup ctrl_pkt_functions
-This routinue would process the Control responses
-for the Connection Management.
-@return - Queue index for the free SFID else returns Invalid Index.
-*/
-BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/
- PVOID pvBuffer /**Starting Address of the Buffer, that contains the AddIndication Data*/
- )
+
+/*
+ * @ingroup ctrl_pkt_functions
+ * This routinue would process the Control responses
+ * for the Connection Management.
+ * @return - Queue index for the free SFID else returns Invalid Index.
+ */
+BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
+ PVOID pvBuffer /* Starting Address of the Buffer, that contains the AddIndication Data */)
{
- stServiceFlowParamSI *psfLocalSet=NULL;
- stLocalSFAddIndicationAlt *pstAddIndication = NULL;
- stLocalSFChangeIndicationAlt *pstChangeIndication = NULL;
- PLEADER pLeader=NULL;
+ stServiceFlowParamSI *psfLocalSet = NULL;
+ stLocalSFAddIndicationAlt *pstAddIndication = NULL;
+ stLocalSFChangeIndicationAlt *pstChangeIndication = NULL;
+ PLEADER pLeader = NULL;
+
/*
- //Otherwise the message contains a target address from where we need to
- //read out the rest of the service flow param structure
- */
- if((pstAddIndication = RestoreCmControlResponseMessage(Adapter,pvBuffer))
- == NULL)
- {
- ClearTargetDSXBuffer(Adapter,((stLocalSFAddIndication *)pvBuffer)->u16TID, FALSE);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
+ * Otherwise the message contains a target address from where we need to
+ * read out the rest of the service flow param structure
+ */
+ pstAddIndication = RestoreCmControlResponseMessage(Adapter, pvBuffer);
+ if (pstAddIndication == NULL) {
+ ClearTargetDSXBuffer(Adapter, ((stLocalSFAddIndication *)pvBuffer)->u16TID, FALSE);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
return FALSE;
}
DumpCmControlPacket(pstAddIndication);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
pLeader = (PLEADER)Adapter->caDsxReqResp;
- pLeader->Status =CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
+ pLeader->Status = CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
pLeader->Vcid = 0;
- ClearTargetDSXBuffer(Adapter,pstAddIndication->u16TID,FALSE);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n",pstAddIndication->u16TID);
- switch(pstAddIndication->u8Type)
+ ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, FALSE);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
+ switch (pstAddIndication->u8Type) {
+ case DSA_REQ:
{
- case DSA_REQ:
- {
- pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength );
- *((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
- CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
- kfree(pstAddIndication);
- }
- break;
- case DSA_RSP:
- {
- pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
+ pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength);
+ *((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
+ = *pstAddIndication;
+ ((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
+ CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
+ kfree(pstAddIndication);
+ }
+ break;
+ case DSA_RSP:
+ {
+ pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
pLeader->PLength);
- *((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((stLocalSFAddIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
+ *((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
+ = *pstAddIndication;
+ ((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
- }//no break here..we should go down.
- case DSA_ACK:
- {
- UINT uiSearchRuleIndex=0;
+ } /* no break here..we should go down. */
+ case DSA_ACK:
+ {
+ UINT uiSearchRuleIndex = 0;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
ntohs(pstAddIndication->u16VCID));
- uiSearchRuleIndex=SearchFreeSfid(Adapter);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"uiSearchRuleIndex:0x%X ",
+ uiSearchRuleIndex = SearchFreeSfid(Adapter);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiSearchRuleIndex:0x%X ",
uiSearchRuleIndex);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Direction:0x%X ",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ",
pstAddIndication->u8Direction);
- if((uiSearchRuleIndex< NO_OF_QUEUES) )
- {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
- pstAddIndication->u8Direction;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
+ if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
+ pstAddIndication->u8Direction;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
pstAddIndication->sfActiveSet.bValid);
- if(pstAddIndication->sfActiveSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet=TRUE;
- }
- if(pstAddIndication->sfAuthorizedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet=TRUE;
- }
- if(pstAddIndication->sfAdmittedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet=TRUE;
- }
- if(FALSE == pstAddIndication->sfActiveSet.bValid)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
- if(pstAddIndication->sfAdmittedSet.bValid)
- {
- psfLocalSet = &pstAddIndication->sfAdmittedSet;
- }
- else if(pstAddIndication->sfAuthorizedSet.bValid)
- {
- psfLocalSet = &pstAddIndication->sfAuthorizedSet;
- }
- }
- else
- {
- psfLocalSet = &pstAddIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
- }
-
- if(!psfLocalSet)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
- Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
- kfree(pstAddIndication);
- }
+ if (pstAddIndication->sfActiveSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
+
+ if (pstAddIndication->sfAuthorizedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
+
+ if (pstAddIndication->sfAdmittedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
+
+ if (pstAddIndication->sfActiveSet.bValid == FALSE) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
+ if (pstAddIndication->sfAdmittedSet.bValid)
+ psfLocalSet = &pstAddIndication->sfAdmittedSet;
+ else if (pstAddIndication->sfAuthorizedSet.bValid)
+ psfLocalSet = &pstAddIndication->sfAuthorizedSet;
+ } else {
+ psfLocalSet = &pstAddIndication->sfActiveSet;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
+ }
- else if(psfLocalSet->bValid && (pstAddIndication->u8CC == 0))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value =
- ntohs(pstAddIndication->u16VCID);
- Adapter->PackInfo[uiSearchRuleIndex].usCID =
- ntohs(pstAddIndication->u16CID);
-
- if(UPLINK_DIR == pstAddIndication->u8Direction)
- atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
- CopyToAdapter(Adapter,psfLocalSet,uiSearchRuleIndex,
- DSA_ACK, pstAddIndication);
- // don't free pstAddIndication
-
- /* Inside CopyToAdapter, Sorting of all the SFs take place.
- Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
- SHOULD BE STRICTLY AVOIDED.
- */
-// *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID;
- memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
-
- if(pstAddIndication->sfActiveSet.bValid == TRUE)
- {
- if(UPLINK_DIR == pstAddIndication->u8Direction)
- {
- if(!Adapter->LinkUpStatus)
- {
- netif_carrier_on(Adapter->dev);
- netif_start_queue(Adapter->dev);
- Adapter->LinkUpStatus = 1;
- if (netif_msg_link(Adapter))
- pr_info(PFX "%s: link up\n", Adapter->dev->name);
- atomic_set(&Adapter->TxPktAvail, 1);
- wake_up(&Adapter->tx_packet_wait_queue);
- Adapter->liTimeSinceLastNetEntry = get_seconds();
- }
+ if (!psfLocalSet) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
+ kfree(pstAddIndication);
+ } else if (psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstAddIndication->u16VCID);
+ Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstAddIndication->u16CID);
+
+ if (UPLINK_DIR == pstAddIndication->u8Direction)
+ atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
+
+ CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSA_ACK, pstAddIndication);
+ /* don't free pstAddIndication */
+
+ /* Inside CopyToAdapter, Sorting of all the SFs take place.
+ * Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
+ * SHOULD BE STRICTLY AVOIDED.
+ */
+ /* *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; */
+ memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
+
+ if (pstAddIndication->sfActiveSet.bValid == TRUE) {
+ if (UPLINK_DIR == pstAddIndication->u8Direction) {
+ if (!Adapter->LinkUpStatus) {
+ netif_carrier_on(Adapter->dev);
+ netif_start_queue(Adapter->dev);
+ Adapter->LinkUpStatus = 1;
+ if (netif_msg_link(Adapter))
+ pr_info(PFX "%s: link up\n", Adapter->dev->name);
+ atomic_set(&Adapter->TxPktAvail, 1);
+ wake_up(&Adapter->tx_packet_wait_queue);
+ Adapter->liTimeSinceLastNetEntry = get_seconds();
}
}
}
-
- else
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
- kfree(pstAddIndication);
- }
- }
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
+ } else {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
kfree(pstAddIndication);
- return FALSE;
}
- }
- break;
- case DSC_REQ:
- {
- pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
- pstChangeIndication = (stLocalSFChangeIndicationAlt*)pstAddIndication;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
-
- *((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
-
- CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
kfree(pstAddIndication);
+ return FALSE;
}
- break;
- case DSC_RSP:
- {
- pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
- pstChangeIndication = (stLocalSFChangeIndicationAlt*)pstAddIndication;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
- *((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
- }
- case DSC_ACK:
- {
- UINT uiSearchRuleIndex=0;
-
- pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
- uiSearchRuleIndex=SearchSfid(Adapter,ntohl(pstChangeIndication->sfActiveSet.u32SFID));
- if(uiSearchRuleIndex > NO_OF_QUEUES-1)
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
- }
- if((uiSearchRuleIndex < NO_OF_QUEUES))
- {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
- if(pstChangeIndication->sfActiveSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet=TRUE;
- }
- if(pstChangeIndication->sfAuthorizedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet=TRUE;
- }
- if(pstChangeIndication->sfAdmittedSet.bValid==TRUE)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet=TRUE;
- }
+ }
+ break;
+ case DSC_REQ:
+ {
+ pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
+ pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
- if(FALSE==pstChangeIndication->sfActiveSet.bValid)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
- if(pstChangeIndication->sfAdmittedSet.bValid)
- {
- psfLocalSet = &pstChangeIndication->sfAdmittedSet;
- }
- else if(pstChangeIndication->sfAuthorizedSet.bValid)
- {
- psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
- }
- }
+ *((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
+ ((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
- else
- {
- psfLocalSet = &pstChangeIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
- }
- if(psfLocalSet->bValid && (pstChangeIndication->u8CC == 0))
- {
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value =
- ntohs(pstChangeIndication->u16VCID);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
- pstChangeIndication->u8CC, psfLocalSet->bValid);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
- Adapter->PackInfo[uiSearchRuleIndex].usCID =
- ntohs(pstChangeIndication->u16CID);
- CopyToAdapter(Adapter,psfLocalSet,uiSearchRuleIndex,
- DSC_ACK, pstAddIndication);
-
- *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID;
- }
- else if(pstChangeIndication->u8CC == 6)
- {
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
- kfree(pstAddIndication);
- }
+ CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
+ kfree(pstAddIndication);
+ }
+ break;
+ case DSC_RSP:
+ {
+ pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
+ pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
+ *((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
+ ((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
+ }
+ case DSC_ACK:
+ {
+ UINT uiSearchRuleIndex = 0;
+
+ pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
+ uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID));
+ if (uiSearchRuleIndex > NO_OF_QUEUES-1)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
+
+ if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
+ if (pstChangeIndication->sfActiveSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
+
+ if (pstChangeIndication->sfAuthorizedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
+
+ if (pstChangeIndication->sfAdmittedSet.bValid == TRUE)
+ Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
+
+ if (pstChangeIndication->sfActiveSet.bValid == FALSE) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
+
+ if (pstChangeIndication->sfAdmittedSet.bValid)
+ psfLocalSet = &pstChangeIndication->sfAdmittedSet;
+ else if (pstChangeIndication->sfAuthorizedSet.bValid)
+ psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
+ } else {
+ psfLocalSet = &pstChangeIndication->sfActiveSet;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
}
- else
- {
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
+
+ if (psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) {
+ Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstChangeIndication->u16VCID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
+ pstChangeIndication->u8CC, psfLocalSet->bValid);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
+ Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstChangeIndication->u16CID);
+ CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSC_ACK, pstAddIndication);
+
+ *(PULONG)(((PUCHAR)pvBuffer)+1) = psfLocalSet->u32SFID;
+ } else if (pstChangeIndication->u8CC == 6) {
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
kfree(pstAddIndication);
- return FALSE;
}
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
+ kfree(pstAddIndication);
+ return FALSE;
}
- break;
- case DSD_REQ:
- {
- UINT uiSearchRuleIndex;
- ULONG ulSFID;
-
- pLeader->PLength = sizeof(stLocalSFDeleteIndication);
- *((stLocalSFDeleteIndication*)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((stLocalSFDeleteIndication*)pstAddIndication);
+ }
+ break;
+ case DSD_REQ:
+ {
+ UINT uiSearchRuleIndex;
+ ULONG ulSFID;
- ulSFID = ntohl(((stLocalSFDeleteIndication*)pstAddIndication)->u32SFID);
- uiSearchRuleIndex=SearchSfid(Adapter,ulSFID);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x",uiSearchRuleIndex);
+ pLeader->PLength = sizeof(stLocalSFDeleteIndication);
+ *((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((stLocalSFDeleteIndication *)pstAddIndication);
- if(uiSearchRuleIndex < NO_OF_QUEUES)
- {
- //Delete All Classifiers Associated with this SFID
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
- Adapter->u32TotalDSD++;
- }
+ ulSFID = ntohl(((stLocalSFDeleteIndication *)pstAddIndication)->u32SFID);
+ uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x", uiSearchRuleIndex);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
- ((stLocalSFDeleteIndication*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
- CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
- }
- case DSD_RSP:
- {
- //Do nothing as SF has already got Deleted
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
+ /* Delete All Classifiers Associated with this SFID */
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
+ Adapter->u32TotalDSD++;
}
- break;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
+ ((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
+ CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
+ }
+ case DSD_RSP:
+ {
+ /* Do nothing as SF has already got Deleted */
+ }
+ break;
case DSD_ACK:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
- break;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
+ break;
default:
kfree(pstAddIndication);
- return FALSE ;
+ return FALSE;
}
return TRUE;
}
@@ -2280,78 +1899,67 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
int get_dsx_sf_data_to_application(PMINI_ADAPTER Adapter, UINT uiSFId, void __user *user_buffer)
{
int status = 0;
- struct _packet_info *psSfInfo=NULL;
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d",status);
+ struct _packet_info *psSfInfo = NULL;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
status = SearchSfid(Adapter, uiSFId);
if (status >= NO_OF_QUEUES) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID %d not present in queue !!!", uiSFId );
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID %d not present in queue !!!", uiSFId);
return -EINVAL;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d",status);
- psSfInfo=&Adapter->PackInfo[status];
- if(psSfInfo->pstSFIndication && copy_to_user(user_buffer,
- psSfInfo->pstSFIndication, sizeof(stLocalSFAddIndicationAlt)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copy to user failed SFID %d, present in queue !!!", uiSFId );
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
+ psSfInfo = &Adapter->PackInfo[status];
+ if (psSfInfo->pstSFIndication && copy_to_user(user_buffer,
+ psSfInfo->pstSFIndication, sizeof(stLocalSFAddIndicationAlt))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy to user failed SFID %d, present in queue !!!", uiSFId);
status = -EFAULT;
return status;
}
return STATUS_SUCCESS;
}
-VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter,PUINT puiBuffer)
+VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter, PUINT puiBuffer)
{
- B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
+ B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
stIM_SFHostNotify *pHostInfo = NULL;
- UINT uiSearchRuleIndex = 0;
- ULONG ulSFID = 0;
+ UINT uiSearchRuleIndex = 0;
+ ULONG ulSFID = 0;
- puiBuffer+=2;
+ puiBuffer += 2;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32NumofSFsinMsg: 0x%x\n", u32NumofSFsinMsg);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32NumofSFsinMsg: 0x%x\n",u32NumofSFsinMsg);
-
- while(u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES)
- {
+ while (u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES) {
u32NumofSFsinMsg--;
pHostInfo = (stIM_SFHostNotify *)puiBuffer;
puiBuffer = (PUINT)(pHostInfo + 1);
ulSFID = ntohl(pHostInfo->SFID);
- uiSearchRuleIndex=SearchSfid(Adapter,ulSFID);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"SFID: 0x%lx\n",ulSFID);
+ uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID: 0x%lx\n", ulSFID);
- if(uiSearchRuleIndex >= NO_OF_QUEUES || uiSearchRuleIndex == HiPriority)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"The SFID <%lx> doesn't exist in host entry or is Invalid\n", ulSFID);
+ if (uiSearchRuleIndex >= NO_OF_QUEUES || uiSearchRuleIndex == HiPriority) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "The SFID <%lx> doesn't exist in host entry or is Invalid\n", ulSFID);
continue;
}
- if(pHostInfo->RetainSF == FALSE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"Going to Delete SF");
- deleteSFBySfid(Adapter,uiSearchRuleIndex);
- }
- else
- {
-
+ if (pHostInfo->RetainSF == FALSE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Going to Delete SF");
+ deleteSFBySfid(Adapter, uiSearchRuleIndex);
+ } else {
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pHostInfo->VCID);
Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pHostInfo->newCID);
- Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,"pHostInfo->QoSParamSet: 0x%x\n",pHostInfo->QoSParamSet);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "pHostInfo->QoSParamSet: 0x%x\n", pHostInfo->QoSParamSet);
- if(pHostInfo->QoSParamSet & 0x1)
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet =TRUE;
- if(pHostInfo->QoSParamSet & 0x2)
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet =TRUE;
- if(pHostInfo->QoSParamSet & 0x4)
- {
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet =TRUE;
- Adapter->PackInfo[uiSearchRuleIndex].bActive=TRUE;
+ if (pHostInfo->QoSParamSet & 0x1)
+ Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
+ if (pHostInfo->QoSParamSet & 0x2)
+ Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
+ if (pHostInfo->QoSParamSet & 0x4) {
+ Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
+ Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
}
}
}
}
-
-
-
diff --git a/drivers/staging/bcm/led_control.h b/drivers/staging/bcm/led_control.h
index 0711ac20f6fc..84d1a028e1e2 100644
--- a/drivers/staging/bcm/led_control.h
+++ b/drivers/staging/bcm/led_control.h
@@ -4,11 +4,11 @@
/*************************TYPE DEF**********************/
#define NUM_OF_LEDS 4
-#define DSD_START_OFFSET 0x0200
-#define EEPROM_VERSION_OFFSET 0x020E
-#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
-#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
-#define GPIO_SECTION_START_OFFSET 0x03
+#define DSD_START_OFFSET 0x0200
+#define EEPROM_VERSION_OFFSET 0x020E
+#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
+#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
+#define GPIO_SECTION_START_OFFSET 0x03
#define COMPATIBILITY_SECTION_LENGTH 42
#define COMPATIBILITY_SECTION_LENGTH_MAP5 84
@@ -18,27 +18,27 @@
#define EEPROM_MAP5_MINORVERSION 0
-#define MAX_NUM_OF_BLINKS 10
-#define NUM_OF_GPIO_PINS 16
+#define MAX_NUM_OF_BLINKS 10
+#define NUM_OF_GPIO_PINS 16
-#define DISABLE_GPIO_NUM 0xFF
-#define EVENT_SIGNALED 1
+#define DISABLE_GPIO_NUM 0xFF
+#define EVENT_SIGNALED 1
-#define MAX_FILE_NAME_BUFFER_SIZE 100
+#define MAX_FILE_NAME_BUFFER_SIZE 100
-#define TURN_ON_LED(GPIO, index) do{ \
+#define TURN_ON_LED(GPIO, index) do { \
UINT gpio_val = GPIO; \
(Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_SET_REG, &gpio_val ,sizeof(gpio_val)) : \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
- }while(0);
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)) : \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
+ } while (0);
#define TURN_OFF_LED(GPIO, index) do { \
UINT gpio_val = GPIO; \
(Adapter->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_CLR_REG,&gpio_val ,sizeof(gpio_val)) : \
- wrmaltWithLock(Adapter,BCM_GPIO_OUTPUT_SET_REG,&gpio_val ,sizeof(gpio_val)); \
- }while(0);
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)) : \
+ wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)); \
+ } while (0);
#define B_ULONG32 unsigned long
@@ -50,7 +50,7 @@ typedef enum _LEDColors{
BLUE_LED = 2,
YELLOW_LED = 3,
GREEN_LED = 4
-} LEDColors; /*Enumerated values of different LED types*/
+} LEDColors; /*Enumerated values of different LED types*/
typedef enum LedEvents {
SHUTDOWN_EXIT = 0x00,
@@ -62,43 +62,41 @@ typedef enum LedEvents {
LOWPOWER_MODE_ENTER = 0x20,
IDLEMODE_CONTINUE = 0x40,
IDLEMODE_EXIT = 0x80,
- LED_THREAD_INACTIVE = 0x100, //Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold.
- LED_THREAD_ACTIVE = 0x200 //Makes the LED Thread Active back.
-} LedEventInfo_t; /*Enumerated values of different driver states*/
+ LED_THREAD_INACTIVE = 0x100, /* Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. */
+ LED_THREAD_ACTIVE = 0x200 /* Makes the LED Thread Active back. */
+} LedEventInfo_t; /* Enumerated values of different driver states */
#define DRIVER_HALT 0xff
-/*Structure which stores the information of different LED types
- * and corresponding LED state information of driver states*/
-typedef struct LedStateInfo_t
-{
+/*
+ * Structure which stores the information of different LED types
+ * and corresponding LED state information of driver states
+ */
+typedef struct LedStateInfo_t {
UCHAR LED_Type; /* specify GPIO number - use 0xFF if not used */
UCHAR LED_On_State; /* Bits set or reset for different states */
UCHAR LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */
UCHAR GPIO_Num;
- UCHAR BitPolarity; /*To represent whether H/W is normal polarity or reverse
- polarity*/
-}LEDStateInfo, *pLEDStateInfo;
+ UCHAR BitPolarity; /* To represent whether H/W is normal polarity or reverse polarity */
+} LEDStateInfo, *pLEDStateInfo;
-typedef struct _LED_INFO_STRUCT
-{
+typedef struct _LED_INFO_STRUCT {
LEDStateInfo LEDState[NUM_OF_LEDS];
- BOOLEAN bIdleMode_tx_from_host; /*Variable to notify whether driver came out
- from idlemode due to Host or target*/
+ BOOLEAN bIdleMode_tx_from_host; /* Variable to notify whether driver came out from idlemode due to Host or target*/
BOOLEAN bIdle_led_off;
wait_queue_head_t notify_led_event;
wait_queue_head_t idleModeSyncEvent;
- struct task_struct *led_cntrl_threadid;
- int led_thread_running;
+ struct task_struct *led_cntrl_threadid;
+ int led_thread_running;
BOOLEAN bLedInitDone;
} LED_INFO_STRUCT, *PLED_INFO_STRUCT;
-//LED Thread state.
-#define BCM_LED_THREAD_DISABLED 0 //LED Thread is not running.
-#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 //LED thread is running.
-#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 //LED thread has been put on hold
+/* LED Thread state. */
+#define BCM_LED_THREAD_DISABLED 0 /* LED Thread is not running. */
+#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 /* LED thread is running. */
+#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 /*LED thread has been put on hold*/
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 4c77e508066b..12c691d90900 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -765,8 +765,9 @@ config COMEDI_ADV_PCI_DIO
default N
---help---
Enable support for Advantech PCI DIO cards
- PCI-1730, PCI-1733, PCI-1734, PCI-1736UP, PCI-1750, PCI-1751,
- PCI-1752, PCI-1753/E, PCI-1754, PCI-1756 and PCI-1762
+ PCI-1730, PCI-1733, PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U,
+ PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756,
+ PCI-1760 and PCI-1762
To compile this driver as a module, choose M here: the module will be
called adv_pci_dio.
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 537e58534275..7af068f4a749 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -8,16 +8,16 @@
/*
Driver: adv_pci_dio
Description: Advantech PCI-1730, PCI-1733, PCI-1734, PCI-1735U,
- PCI-1736UP, PCI-1750, PCI-1751, PCI-1752, PCI-1753/E,
- PCI-1754, PCI-1756, PCI-1762
+ PCI-1736UP, PCI-1739U, PCI-1750, PCI-1751, PCI-1752,
+ PCI-1753/E, PCI-1754, PCI-1756, PCI-1760, PCI-1762
Author: Michal Dobes <dobes@tesnet.cz>
Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733,
- PCI-1734, PCI-1735U, PCI-1736UP, PCI-1750,
+ PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U, PCI-1750,
PCI-1751, PCI-1752, PCI-1753,
PCI-1753+PCI-1753E, PCI-1754, PCI-1756,
PCI-1760, PCI-1762
Status: untested
-Updated: Tue, 04 May 2010 13:00:00 +0000
+Updated: Mon, 09 Jan 2012 12:40:46 +0000
This driver supports now only insn interface for DI/DO/DIO.
@@ -51,6 +51,7 @@ Configuration options:
/* hardware types of the cards */
enum hw_cards_id {
TYPE_PCI1730, TYPE_PCI1733, TYPE_PCI1734, TYPE_PCI1735, TYPE_PCI1736,
+ TYPE_PCI1739,
TYPE_PCI1750,
TYPE_PCI1751,
TYPE_PCI1752,
@@ -109,6 +110,12 @@ enum hw_io_access {
#define PCI1736_BOARDID 4 /* R: Board I/D switch for 1736UP */
#define PCI1736_MAINREG 0 /* Normal register (2) doesn't work */
+/* Advantech PCI-1739U */
+#define PCI1739_DIO 0 /* R/W: begin of 8255 registers block */
+#define PCI1739_ICR 32 /* W: Interrupt control register */
+#define PCI1739_ISR 32 /* R: Interrupt status register */
+#define PCI1739_BOARDID 8 /* R: Board I/D switch for 1739U */
+
/* Advantech PCI-1750 */
#define PCI1750_IDI 0 /* R: Isolated digital input 0-15 */
#define PCI1750_IDO 0 /* W: Isolated digital output 0-15 */
@@ -262,6 +269,7 @@ static DEFINE_PCI_DEVICE_TABLE(pci_dio_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1734) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1735) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1736) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1739) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1750) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1751) },
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1752) },
@@ -316,6 +324,14 @@ static const struct dio_boardtype boardtypes[] = {
{4, PCI1736_BOARDID, 1, SDF_INTERNAL},
{ {0, 0, 0, 0} },
IO_8b},
+ {"pci1739", PCI_VENDOR_ID_ADVANTECH, 0x1739, PCIDIO_MAINREG,
+ TYPE_PCI1739,
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {0, 0, 0, 0}, {0, 0, 0, 0} },
+ { {48, PCI1739_DIO, 2, 0}, {0, 0, 0, 0} },
+ {0, 0, 0, 0},
+ { {0, 0, 0, 0} },
+ IO_8b},
{"pci1750", PCI_VENDOR_ID_ADVANTECH, 0x1750, PCIDIO_MAINREG,
TYPE_PCI1750,
{ {0, 0, 0, 0}, {16, PCI1750_IDI, 2, 0} },
@@ -883,6 +899,11 @@ static int pci_dio_reset(struct comedi_device *dev)
outb(0, dev->iobase + PCI1736_3_INT_RF);
break;
+ case TYPE_PCI1739:
+ /* disable & clear interrupts */
+ outb(0x88, dev->iobase + PCI1739_ICR);
+ break;
+
case TYPE_PCI1750:
case TYPE_PCI1751:
/* disable & clear interrupts */
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 5cce1b5f4484..b85c8366a396 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -720,12 +720,20 @@ static int dt2801_dio_insn_config(struct comedi_device *dev,
which = 1;
/* configure */
- if (data[0]) {
+ switch (data[0]) {
+ case INSN_CONFIG_DIO_OUTPUT:
s->io_bits = 0xff;
dt2801_writecmd(dev, DT_C_SET_DIGOUT);
- } else {
+ break;
+ case INSN_CONFIG_DIO_INPUT:
s->io_bits = 0;
dt2801_writecmd(dev, DT_C_SET_DIGIN);
+ break;
+ case INSN_CONFIG_DIO_QUERY:
+ data[1] = s->io_bits ? COMEDI_OUTPUT : COMEDI_INPUT;
+ return insn->n;
+ default:
+ return -EINVAL;
}
dt2801_writedata(dev, which);
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 32d9c42e9659..e86ab5862895 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -527,7 +527,7 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
* 11x -> Gain = 0.5
*/
case DT9812_GAIN_0PT5:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 ||
+ rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 |
F020_MASK_ADC0CF_AMP0GN1;
break;
case DT9812_GAIN_1:
@@ -540,7 +540,7 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
rmw->or_value = F020_MASK_ADC0CF_AMP0GN1;
break;
case DT9812_GAIN_8:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 ||
+ rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 |
F020_MASK_ADC0CF_AMP0GN0;
break;
case DT9812_GAIN_16:
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index b692fea0d2b0..b0bc6bb877ab 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -2098,23 +2098,29 @@ static int me4000_dio_insn_config(struct comedi_device *dev,
CALL_PDEBUG("In me4000_dio_insn_config()\n");
- if (data[0] == INSN_CONFIG_DIO_QUERY) {
+ switch (data[0]) {
+ default:
+ return -EINVAL;
+ case INSN_CONFIG_DIO_QUERY:
data[1] =
(s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
return insn->n;
+ case INSN_CONFIG_DIO_INPUT:
+ case INSN_CONFIG_DIO_OUTPUT:
+ break;
}
/*
* The input or output configuration of each digital line is
* configured by a special insn_config instruction. chanspec
* contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT.
+ * value INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_OUTPUT.
* On the ME-4000 it is only possible to switch port wise (8 bit)
*/
tmp = me4000_inl(dev, info->dio_context.ctrl_reg);
- if (data[0] == COMEDI_OUTPUT) {
+ if (data[0] == INSN_CONFIG_DIO_OUTPUT) {
if (chan < 8) {
s->io_bits |= 0xFF;
tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 |
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 045a4c00f346..1df8fcbcd108 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -30,7 +30,7 @@ Status: works
Devices: [National Instruments] PCI-DIO-32HS (ni_pcidio), PXI-6533,
PCI-DIO-96, PCI-DIO-96B, PXI-6508, PCI-6503, PCI-6503B, PCI-6503X,
PXI-6503, PCI-6533, PCI-6534
-Updated: Sun, 21 Apr 2002 21:03:38 -0700
+Updated: Mon, 09 Jan 2012 14:27:23 +0000
The DIO-96 appears as four 8255 subdevices. See the 8255
driver notes for details.
@@ -42,6 +42,11 @@ supports simple digital I/O; no handshaking is supported.
DMA mostly works for the PCI-DIO32HS, but only in timed input mode.
+The PCI-DIO-32HS/PCI-6533 has a configurable external trigger. Setting
+scan_begin_arg to 0 or CR_EDGE triggers on the leading edge. Setting
+scan_begin_arg to CR_INVERT or (CR_EDGE | CR_INVERT) triggers on the
+trailing edge.
+
This driver could be easily modified to support AT-MIO32HS and
AT-MIO96.
@@ -436,6 +441,7 @@ static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
comedi_error(dev, "failed to reserve mite dma channel.");
return -EBUSY;
}
+ devpriv->di_mite_chan->dir = COMEDI_INPUT;
writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
devpriv->mite->daq_io_addr + DMA_Line_Control_Group1);
@@ -482,6 +488,21 @@ void ni_pcidio_event(struct comedi_device *dev, struct comedi_subdevice *s)
comedi_event(dev, s);
}
+static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ unsigned long irq_flags;
+ int count;
+
+ spin_lock_irqsave(&dev->spinlock, irq_flags);
+ spin_lock(&devpriv->mite_channel_lock);
+ if (devpriv->di_mite_chan)
+ mite_sync_input_dma(devpriv->di_mite_chan, s->async);
+ spin_unlock(&devpriv->mite_channel_lock);
+ count = s->async->buf_write_count - s->async->buf_read_count;
+ spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ return count;
+}
+
static irqreturn_t nidio_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
@@ -497,7 +518,6 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
int status;
int work = 0;
unsigned int m_status = 0;
- unsigned long irq_flags;
/* interrupcions parasites */
if (dev->attached == 0) {
@@ -505,6 +525,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
return IRQ_NONE;
}
+ /* Lock to avoid race with comedi_poll */
+ spin_lock(&dev->spinlock);
+
status = readb(devpriv->mite->daq_io_addr +
Interrupt_And_Window_Status);
flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
@@ -518,7 +541,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
/* printk("buf[4096]=%08x\n",
*(unsigned int *)(async->prealloc_buf+4096)); */
- spin_lock_irqsave(&devpriv->mite_channel_lock, irq_flags);
+ spin_lock(&devpriv->mite_channel_lock);
if (devpriv->di_mite_chan)
m_status = mite_get_status(devpriv->di_mite_chan);
#ifdef MITE_DEBUG
@@ -543,7 +566,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
disable_irq(dev->irq);
}
}
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, irq_flags);
+ spin_unlock(&devpriv->mite_channel_lock);
while (status & DataLeft) {
work++;
@@ -645,6 +668,8 @@ out:
Master_DMA_And_Interrupt_Control);
}
#endif
+
+ spin_unlock(&dev->spinlock);
return IRQ_HANDLED;
}
@@ -825,8 +850,8 @@ static int ni_pcidio_cmdtest(struct comedi_device *dev,
} else {
/* TRIG_EXT */
/* should be level/edge, hi/lo specification here */
- if (cmd->scan_begin_arg != 0) {
- cmd->scan_begin_arg = 0;
+ if ((cmd->scan_begin_arg & ~(CR_EDGE | CR_INVERT)) != 0) {
+ cmd->scan_begin_arg &= (CR_EDGE | CR_INVERT);
err++;
}
}
@@ -941,7 +966,13 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
writeb(0, devpriv->mite->daq_io_addr + Sequence);
writeb(0x00, devpriv->mite->daq_io_addr + ReqReg);
writeb(4, devpriv->mite->daq_io_addr + BlockMode);
- writeb(0, devpriv->mite->daq_io_addr + LinePolarities);
+ if (!(cmd->scan_begin_arg & CR_INVERT)) {
+ /* Leading Edge pulse mode */
+ writeb(0, devpriv->mite->daq_io_addr + LinePolarities);
+ } else {
+ /* Trailing Edge pulse mode */
+ writeb(2, devpriv->mite->daq_io_addr + LinePolarities);
+ }
writeb(0x00, devpriv->mite->daq_io_addr + AckSer);
writel(1, devpriv->mite->daq_io_addr + StartDelay);
writeb(1, devpriv->mite->daq_io_addr + ReqDelay);
@@ -1005,17 +1036,24 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int setup_mite_dma(struct comedi_device *dev, struct comedi_subdevice *s)
{
int retval;
+ unsigned long flags;
retval = ni_pcidio_request_di_mite_channel(dev);
if (retval)
return retval;
- devpriv->di_mite_chan->dir = COMEDI_INPUT;
+ /* write alloc the entire buffer */
+ comedi_buf_write_alloc(s->async, s->async->prealloc_bufsz);
- mite_prep_dma(devpriv->di_mite_chan, 32, 32);
+ spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
+ if (devpriv->di_mite_chan) {
+ mite_prep_dma(devpriv->di_mite_chan, 32, 32);
+ mite_dma_arm(devpriv->di_mite_chan);
+ } else
+ retval = -EIO;
+ spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- mite_dma_arm(devpriv->di_mite_chan);
- return 0;
+ return retval;
}
static int ni_pcidio_inttrig(struct comedi_device *dev,
@@ -1244,6 +1282,7 @@ static int nidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->len_chanlist = 32; /* XXX */
s->buf_change = &ni_pcidio_change;
s->async_dma_dir = DMA_BIDIRECTIONAL;
+ s->poll = &ni_pcidio_poll;
writel(0, devpriv->mite->daq_io_addr + Port_IO(0));
writel(0, devpriv->mite->daq_io_addr + Port_Pin_Directions(0));
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 0f0d995f137c..27baefa32b17 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -29,14 +29,15 @@ Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E,
PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
- PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224, PCI-6225, PXI-6225,
- PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PCI-6254, PCI-6259, PCIe-6259,
+ PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
+ PCI-6225, PXI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PXIe-6251,
+ PCI-6254, PCI-6259, PCIe-6259,
PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
PCI-6711, PXI-6711, PCI-6713, PXI-6713,
PXI-6071E, PCI-6070E, PXI-6070E,
PXI-6052E, PCI-6036E, PCI-6731, PCI-6733, PXI-6733,
PCI-6143, PXI-6143
-Updated: Wed, 03 Dec 2008 10:51:47 +0000
+Updated: Mon, 09 Jan 2012 14:52:48 +0000
These boards are almost identical to the AT-MIO E series, except that
they use the PCI bus instead of ISA (i.e., AT). See the notes for
@@ -182,6 +183,7 @@ static DEFINE_PCI_DEVICE_TABLE(ni_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717f)},
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x71bc)},
{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717d)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x72e8)},
{0}
};
@@ -1046,6 +1048,25 @@ static const struct ni_board_struct ni_boards[] = {
.has_8255 = 0,
},
{
+ .device_id = 0x72e8,
+ .name = "pxie-6251",
+ .n_adchan = 16,
+ .adbits = 16,
+ .ai_fifo_depth = 4095,
+ .gainlkup = ai_gain_628x,
+ .ai_speed = 800,
+ .n_aochan = 2,
+ .aobits = 16,
+ .ao_fifo_depth = 8191,
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+ .ao_speed = 357,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+ },
+ {
.device_id = 0x70b7,
.name = "pci-6254",
.n_adchan = 32,
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 5acf39e7cdef..eeddee9a6050 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -868,8 +868,7 @@ static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
- /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */
- if (!dma_cntrl & DMA_START_BIT) {
+ if (!(dma_cntrl & DMA_START_BIT)) {
BCMLOG(BCMLOG_DBG, "Already Stopped\n");
return BC_STS_SUCCESS;
}
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 7faeadad1fff..71aaad31270b 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -29,10 +29,10 @@
#define FT1000_PROC "ft1000"
#define MAX_FILE_LEN 255
-#define PUTM_TO_PAGE(len,page,args...) \
+#define PUTM_TO_PAGE(len, page, args...) \
len += snprintf(page+len, PAGE_SIZE - len, args)
-#define PUTX_TO_PAGE(len,page,message,size,var) \
+#define PUTX_TO_PAGE(len, page, message, size, var) \
len += snprintf(page+len, PAGE_SIZE - len, message); \
for(i = 0; i < (size - 1); i++) \
{ \
@@ -40,7 +40,7 @@
} \
len += snprintf(page+len, PAGE_SIZE - len, "%02x\n", var[i])
-#define PUTD_TO_PAGE(len,page,message,size,var) \
+#define PUTD_TO_PAGE(len, page, message, size, var) \
len += snprintf(page+len, PAGE_SIZE - len, message); \
for(i = 0; i < (size - 1); i++) \
{ \
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index eb853f71089a..695ffc36e02d 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -42,56 +42,23 @@
#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_dbg.h>
+/*
+ * All wire protocol details (storage protocol between the guest and the host)
+ * are consolidated here.
+ *
+ * Begin protocol definitions.
+ */
-#define STORVSC_MIN_BUF_NR 64
-#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
-static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
-
-module_param(storvsc_ringbuffer_size, int, S_IRUGO);
-MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
-
-/* to alert the user that structure sizes may be mismatched even though the */
-/* protocol versions match. */
-
-
-#define REVISION_STRING(REVISION_) #REVISION_
-#define FILL_VMSTOR_REVISION(RESULT_LVALUE_) \
- do { \
- char *revision_string \
- = REVISION_STRING($Rev : 6 $) + 6; \
- RESULT_LVALUE_ = 0; \
- while (*revision_string >= '0' \
- && *revision_string <= '9') { \
- RESULT_LVALUE_ *= 10; \
- RESULT_LVALUE_ += *revision_string - '0'; \
- revision_string++; \
- } \
- } while (0)
-
-/* Major/minor macros. Minor version is in LSB, meaning that earlier flat */
-/* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
-#define VMSTOR_PROTOCOL_MAJOR(VERSION_) (((VERSION_) >> 8) & 0xff)
-#define VMSTOR_PROTOCOL_MINOR(VERSION_) (((VERSION_)) & 0xff)
-#define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
- (((MINOR_) & 0xff)))
-#define VMSTOR_INVALID_PROTOCOL_VERSION (-1)
-
-/* Version history: */
-/* V1 Beta 0.1 */
-/* V1 RC < 2008/1/31 1.0 */
-/* V1 RC > 2008/1/31 2.0 */
-#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(4, 2)
-
-
-
+/*
+ * Version history:
+ * V1 Beta: 0.1
+ * V1 RC < 2008/1/31: 1.0
+ * V1 RC > 2008/1/31: 2.0
+ * Win7: 4.2
+ */
-/* This will get replaced with the max transfer length that is possible on */
-/* the host adapter. */
-/* The max transfer length will be published when we offer a vmbus channel. */
-#define MAX_TRANSFER_LENGTH 0x40000
-#define DEFAULT_PACKET_SIZE (sizeof(struct vmdata_gpa_direct) + \
- sizeof(struct vstor_packet) + \
- sizesizeof(u64) * (MAX_TRANSFER_LENGTH / PAGE_SIZE)))
+#define VMSTOR_CURRENT_MAJOR 4
+#define VMSTOR_CURRENT_MINOR 2
/* Packet structure describing virtual storage requests. */
@@ -115,35 +82,31 @@ enum vstor_packet_operation {
* this remains the same across the write regardless of 32/64 bit
* note: it's patterned off the SCSI_PASS_THROUGH structure
*/
-#define CDB16GENERIC_LENGTH 0x10
-
-#ifndef SENSE_BUFFER_SIZE
-#define SENSE_BUFFER_SIZE 0x12
-#endif
-
-#define MAX_DATA_BUF_LEN_WITH_PADDING 0x14
+#define STORVSC_MAX_CMD_LEN 0x10
+#define STORVSC_SENSE_BUFFER_SIZE 0x12
+#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
struct vmscsi_request {
- unsigned short length;
- unsigned char srb_status;
- unsigned char scsi_status;
+ u16 length;
+ u8 srb_status;
+ u8 scsi_status;
- unsigned char port_number;
- unsigned char path_id;
- unsigned char target_id;
- unsigned char lun;
+ u8 port_number;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
- unsigned char cdb_length;
- unsigned char sense_info_length;
- unsigned char data_in;
- unsigned char reserved;
+ u8 cdb_length;
+ u8 sense_info_length;
+ u8 data_in;
+ u8 reserved;
- unsigned int data_transfer_length;
+ u32 data_transfer_length;
union {
- unsigned char cdb[CDB16GENERIC_LENGTH];
- unsigned char sense_data[SENSE_BUFFER_SIZE];
- unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
+ u8 cdb[STORVSC_MAX_CMD_LEN];
+ u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
+ u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
};
} __attribute((packed));
@@ -153,32 +116,36 @@ struct vmscsi_request {
* properties of the channel.
*/
struct vmstorage_channel_properties {
- unsigned short protocol_version;
- unsigned char path_id;
- unsigned char target_id;
+ u16 protocol_version;
+ u8 path_id;
+ u8 target_id;
/* Note: port number is only really known on the client side */
- unsigned int port_number;
- unsigned int flags;
- unsigned int max_transfer_bytes;
+ u32 port_number;
+ u32 flags;
+ u32 max_transfer_bytes;
- /* This id is unique for each channel and will correspond with */
- /* vendor specific data in the inquirydata */
- unsigned long long unique_id;
+ /*
+ * This id is unique for each channel and will correspond with
+ * vendor specific data in the inquiry data.
+ */
+
+ u64 unique_id;
} __packed;
/* This structure is sent during the storage protocol negotiations. */
struct vmstorage_protocol_version {
/* Major (MSW) and minor (LSW) version numbers. */
- unsigned short major_minor;
+ u16 major_minor;
/*
* Revision number is auto-incremented whenever this file is changed
* (See FILL_VMSTOR_REVISION macro above). Mismatch does not
* definitely indicate incompatibility--but it does indicate mismatched
* builds.
+ * This is only used on the windows side. Just set it to 0.
*/
- unsigned short revision;
+ u16 revision;
} __packed;
/* Channel Property Flags */
@@ -190,10 +157,10 @@ struct vstor_packet {
enum vstor_packet_operation operation;
/* Flags - see below for values */
- unsigned int flags;
+ u32 flags;
/* Status of the request returned from the server side. */
- unsigned int status;
+ u32 status;
/* Data payload area */
union {
@@ -211,18 +178,47 @@ struct vstor_packet {
};
} __packed;
-/* Packet flags */
/*
+ * Packet Flags:
+ *
* This flag indicates that the server should send back a completion for this
* packet.
*/
+
#define REQUEST_COMPLETION_FLAG 0x1
-/* This is the set of flags that the vsc can set in any packets it sends */
-#define VSC_LEGAL_FLAGS (REQUEST_COMPLETION_FLAG)
+/* Matches Windows-end */
+enum storvsc_request_type {
+ WRITE_TYPE = 0,
+ READ_TYPE,
+ UNKNOWN_TYPE,
+};
+/*
+ * SRB status codes and masks; a subset of the codes used here.
+ */
-/* Defines */
+#define SRB_STATUS_AUTOSENSE_VALID 0x80
+#define SRB_STATUS_INVALID_LUN 0x20
+#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ERROR 0x04
+
+/*
+ * This is the end of Protocol specific defines.
+ */
+
+
+/*
+ * We setup a mempool to allocate request structures for this driver
+ * on a per-lun basis. The following define specifies the number of
+ * elements in the pool.
+ */
+
+#define STORVSC_MIN_BUF_NR 64
+static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
+
+module_param(storvsc_ringbuffer_size, int, S_IRUGO);
+MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
#define STORVSC_MAX_IO_REQUESTS 128
@@ -235,27 +231,23 @@ struct vstor_packet {
#define STORVSC_MAX_LUNS_PER_TARGET 64
#define STORVSC_MAX_TARGETS 1
#define STORVSC_MAX_CHANNELS 1
-#define STORVSC_MAX_CMD_LEN 16
-/* Matches Windows-end */
-enum storvsc_request_type {
- WRITE_TYPE,
- READ_TYPE,
- UNKNOWN_TYPE,
-};
-struct hv_storvsc_request {
+struct storvsc_cmd_request {
+ struct list_head entry;
+ struct scsi_cmnd *cmd;
+
+ unsigned int bounce_sgl_count;
+ struct scatterlist *bounce_sgl;
+
struct hv_device *device;
/* Synchronize the request/response if needed */
struct completion wait_event;
unsigned char *sense_buffer;
- void *context;
- void (*on_io_completion)(struct hv_storvsc_request *request);
struct hv_multipage_buffer data_buffer;
-
struct vstor_packet vstor_packet;
};
@@ -281,8 +273,8 @@ struct storvsc_device {
unsigned char target_id;
/* Used for vsc/vsp channel reset process */
- struct hv_storvsc_request init_request;
- struct hv_storvsc_request reset_request;
+ struct storvsc_cmd_request init_request;
+ struct storvsc_cmd_request reset_request;
};
struct stor_mem_pools {
@@ -297,16 +289,6 @@ struct hv_host_device {
unsigned char target;
};
-struct storvsc_cmd_request {
- struct list_head entry;
- struct scsi_cmnd *cmd;
-
- unsigned int bounce_sgl_count;
- struct scatterlist *bounce_sgl;
-
- struct hv_storvsc_request request;
-};
-
struct storvsc_scan_work {
struct work_struct work;
struct Scsi_Host *host;
@@ -352,6 +334,34 @@ done:
kfree(wrk);
}
+/*
+ * Major/minor macros. Minor version is in LSB, meaning that earlier flat
+ * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
+ */
+
+static inline u16 storvsc_get_version(u8 major, u8 minor)
+{
+ u16 version;
+
+ version = ((major << 8) | minor);
+ return version;
+}
+
+/*
+ * We can get incoming messages from the host that are not in response to
+ * messages that we have sent out. An example of this would be messages
+ * received by the guest to notify dynamic addition/removal of LUNs. To
+ * deal with potential race conditions where the driver may be in the
+ * midst of being unloaded when we might receive an unsolicited message
+ * from the host, we have implemented a mechanism to gurantee sequential
+ * consistency:
+ *
+ * 1) Once the device is marked as being destroyed, we will fail all
+ * outgoing messages.
+ * 2) We permit incoming messages when the device is being destroyed,
+ * only to properly account for messages already sent out.
+ */
+
static inline struct storvsc_device *get_out_stor_device(
struct hv_device *device)
{
@@ -398,10 +408,231 @@ get_in_err:
}
+static void destroy_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count)
+{
+ int i;
+ struct page *page_buf;
+
+ for (i = 0; i < sg_count; i++) {
+ page_buf = sg_page((&sgl[i]));
+ if (page_buf != NULL)
+ __free_page(page_buf);
+ }
+
+ kfree(sgl);
+}
+
+static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
+{
+ int i;
+
+ /* No need to check */
+ if (sg_count < 2)
+ return -1;
+
+ /* We have at least 2 sg entries */
+ for (i = 0; i < sg_count; i++) {
+ if (i == 0) {
+ /* make sure 1st one does not have hole */
+ if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
+ return i;
+ } else if (i == sg_count - 1) {
+ /* make sure last one does not have hole */
+ if (sgl[i].offset != 0)
+ return i;
+ } else {
+ /* make sure no hole in the middle */
+ if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
+ return i;
+ }
+ }
+ return -1;
+}
+
+static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count,
+ unsigned int len,
+ int write)
+{
+ int i;
+ int num_pages;
+ struct scatterlist *bounce_sgl;
+ struct page *page_buf;
+ unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
+
+ num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
+
+ bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!bounce_sgl)
+ return NULL;
+
+ for (i = 0; i < num_pages; i++) {
+ page_buf = alloc_page(GFP_ATOMIC);
+ if (!page_buf)
+ goto cleanup;
+ sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
+ }
+
+ return bounce_sgl;
+
+cleanup:
+ destroy_bounce_buffer(bounce_sgl, num_pages);
+ return NULL;
+}
+
+/* Assume the original sgl has enough room */
+static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count,
+ unsigned int bounce_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long dest_addr = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < orig_sgl_count; i++) {
+ dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
+ KM_IRQ0) + orig_sgl[i].offset;
+ dest = dest_addr;
+ destlen = orig_sgl[i].length;
+
+ if (bounce_addr == 0)
+ bounce_addr =
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
+ KM_IRQ0);
+
+ while (destlen) {
+ src = bounce_addr + bounce_sgl[j].offset;
+ srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
+
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);
+
+ total_copied += copylen;
+ bounce_sgl[j].offset += copylen;
+ destlen -= copylen;
+ dest += copylen;
+
+ if (bounce_sgl[j].offset == bounce_sgl[j].length) {
+ /* full */
+ kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ j++;
+
+ /*
+ * It is possible that the number of elements
+ * in the bounce buffer may not be equal to
+ * the number of elements in the original
+ * scatter list. Handle this correctly.
+ */
+
+ if (j == bounce_sgl_count) {
+ /*
+ * We are done; cleanup and return.
+ */
+ kunmap_atomic((void *)(dest_addr -
+ orig_sgl[i].offset),
+ KM_IRQ0);
+ local_irq_restore(flags);
+ return total_copied;
+ }
+
+ /* if we need to use another bounce buffer */
+ if (destlen || i != orig_sgl_count - 1)
+ bounce_addr =
+ (unsigned long)kmap_atomic(
+ sg_page((&bounce_sgl[j])), KM_IRQ0);
+ } else if (destlen == 0 && i == orig_sgl_count - 1) {
+ /* unmap the last bounce that is < PAGE_SIZE */
+ kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ }
+ }
+
+ kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
+ KM_IRQ0);
+ }
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
+/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
+static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long src_addr = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < orig_sgl_count; i++) {
+ src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
+ KM_IRQ0) + orig_sgl[i].offset;
+ src = src_addr;
+ srclen = orig_sgl[i].length;
+
+ if (bounce_addr == 0)
+ bounce_addr =
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
+ KM_IRQ0);
+
+ while (srclen) {
+ /* assume bounce offset always == 0 */
+ dest = bounce_addr + bounce_sgl[j].length;
+ destlen = PAGE_SIZE - bounce_sgl[j].length;
+
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);
+
+ total_copied += copylen;
+ bounce_sgl[j].length += copylen;
+ srclen -= copylen;
+ src += copylen;
+
+ if (bounce_sgl[j].length == PAGE_SIZE) {
+ /* full..move to next entry */
+ kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ j++;
+
+ /* if we need to use another bounce buffer */
+ if (srclen || i != orig_sgl_count - 1)
+ bounce_addr =
+ (unsigned long)kmap_atomic(
+ sg_page((&bounce_sgl[j])), KM_IRQ0);
+
+ } else if (srclen == 0 && i == orig_sgl_count - 1) {
+ /* unmap the last bounce that is < PAGE_SIZE */
+ kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ }
+ }
+
+ kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
+ }
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
static int storvsc_channel_init(struct hv_device *device)
{
struct storvsc_device *stor_device;
- struct hv_storvsc_request *request;
+ struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
@@ -416,7 +647,7 @@ static int storvsc_channel_init(struct hv_device *device)
* Now, initiate the vsc/vsp initialization protocol on the open
* channel
*/
- memset(request, 0, sizeof(struct hv_storvsc_request));
+ memset(request, 0, sizeof(struct storvsc_cmd_request));
init_completion(&request->wait_event);
vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
@@ -445,8 +676,13 @@ static int storvsc_channel_init(struct hv_device *device)
vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
- FILL_VMSTOR_REVISION(vstor_packet->version.revision);
+ vstor_packet->version.major_minor =
+ storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
+
+ /*
+ * The revision number is only used in Windows; set it to 0.
+ */
+ vstor_packet->version.revision = 0;
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
@@ -524,9 +760,84 @@ cleanup:
return ret;
}
+
+static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
+{
+ struct scsi_cmnd *scmnd = cmd_request->cmd;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
+ void (*scsi_done_fn)(struct scsi_cmnd *);
+ struct scsi_sense_hdr sense_hdr;
+ struct vmscsi_request *vm_srb;
+ struct storvsc_scan_work *wrk;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
+
+ vm_srb = &cmd_request->vstor_packet.vm_srb;
+ if (cmd_request->bounce_sgl_count) {
+ if (vm_srb->data_in == READ_TYPE)
+ copy_from_bounce_buffer(scsi_sglist(scmnd),
+ cmd_request->bounce_sgl,
+ scsi_sg_count(scmnd),
+ cmd_request->bounce_sgl_count);
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+ }
+
+ /*
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side.
+ */
+ if (vm_srb->srb_status == SRB_STATUS_ERROR)
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ else
+ scmnd->result = vm_srb->scsi_status;
+
+ /*
+ * If the LUN is invalid; remove the device.
+ */
+ if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+ struct Scsi_Host *host;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
+
+ wrk = kmalloc(sizeof(struct storvsc_scan_work),
+ GFP_ATOMIC);
+ if (!wrk) {
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ } else {
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, storvsc_remove_lun);
+ schedule_work(&wrk->work);
+ }
+ }
+
+ if (scmnd->result) {
+ if (scsi_normalize_sense(scmnd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, &sense_hdr))
+ scsi_print_sense_hdr("storvsc", &sense_hdr);
+ }
+
+ scsi_set_resid(scmnd,
+ cmd_request->data_buffer.len -
+ vm_srb->data_transfer_length);
+
+ scsi_done_fn = scmnd->scsi_done;
+
+ scmnd->host_scribble = NULL;
+ scmnd->scsi_done = NULL;
+
+ scsi_done_fn(scmnd);
+
+ mempool_free(cmd_request, memp->request_mempool);
+}
+
static void storvsc_on_io_completion(struct hv_device *device,
struct vstor_packet *vstor_packet,
- struct hv_storvsc_request *request)
+ struct storvsc_cmd_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *stor_pkt;
@@ -546,9 +857,9 @@ static void storvsc_on_io_completion(struct hv_device *device,
*/
if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
- (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
+ (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
vstor_packet->vm_srb.scsi_status = 0;
- vstor_packet->vm_srb.srb_status = 0x1;
+ vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
}
@@ -559,7 +870,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
vstor_packet->vm_srb.sense_info_length;
if (vstor_packet->vm_srb.scsi_status != 0 ||
- vstor_packet->vm_srb.srb_status != 1){
+ vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
dev_warn(&device->device,
"cmd 0x%x scsi status 0x%x srb status 0x%x\n",
stor_pkt->vm_srb.cdb[0],
@@ -569,7 +880,8 @@ static void storvsc_on_io_completion(struct hv_device *device,
if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
/* CHECK_CONDITION */
- if (vstor_packet->vm_srb.srb_status & 0x80) {
+ if (vstor_packet->vm_srb.srb_status &
+ SRB_STATUS_AUTOSENSE_VALID) {
/* autosense data available */
dev_warn(&device->device,
"stor pkt %p autosense data valid - len %d\n",
@@ -586,7 +898,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
stor_pkt->vm_srb.data_transfer_length =
vstor_packet->vm_srb.data_transfer_length;
- request->on_io_completion(request);
+ storvsc_command_completion(request);
if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
stor_device->drain_notify)
@@ -597,7 +909,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
static void storvsc_on_receive(struct hv_device *device,
struct vstor_packet *vstor_packet,
- struct hv_storvsc_request *request)
+ struct storvsc_cmd_request *request)
{
struct storvsc_scan_work *work;
struct storvsc_device *stor_device;
@@ -631,7 +943,7 @@ static void storvsc_on_channel_callback(void *context)
u32 bytes_recvd;
u64 request_id;
unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
- struct hv_storvsc_request *request;
+ struct storvsc_cmd_request *request;
int ret;
@@ -645,7 +957,7 @@ static void storvsc_on_channel_callback(void *context)
&bytes_recvd, &request_id);
if (ret == 0 && bytes_recvd > 0) {
- request = (struct hv_storvsc_request *)
+ request = (struct storvsc_cmd_request *)
(unsigned long)request_id;
if ((request == &stor_device->init_request) ||
@@ -674,7 +986,6 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
- /* Open the channel */
ret = vmbus_open(device->channel,
ring_size,
ring_size,
@@ -728,7 +1039,7 @@ static int storvsc_dev_remove(struct hv_device *device)
}
static int storvsc_do_io(struct hv_device *device,
- struct hv_storvsc_request *request)
+ struct storvsc_cmd_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
@@ -749,7 +1060,7 @@ static int storvsc_do_io(struct hv_device *device,
vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
- vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
+ vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
vstor_packet->vm_srb.data_transfer_length =
@@ -779,18 +1090,6 @@ static int storvsc_do_io(struct hv_device *device,
return ret;
}
-static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
-{
- *target =
- dev->dev_instance.b[5] << 8 | dev->dev_instance.b[4];
-
- *path =
- dev->dev_instance.b[3] << 24 |
- dev->dev_instance.b[2] << 16 |
- dev->dev_instance.b[1] << 8 | dev->dev_instance.b[0];
-}
-
-
static int storvsc_device_alloc(struct scsi_device *sdevice)
{
struct stor_mem_pools *memp;
@@ -849,245 +1148,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
return 0;
}
-static void destroy_bounce_buffer(struct scatterlist *sgl,
- unsigned int sg_count)
-{
- int i;
- struct page *page_buf;
-
- for (i = 0; i < sg_count; i++) {
- page_buf = sg_page((&sgl[i]));
- if (page_buf != NULL)
- __free_page(page_buf);
- }
-
- kfree(sgl);
-}
-
-static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
-{
- int i;
-
- /* No need to check */
- if (sg_count < 2)
- return -1;
-
- /* We have at least 2 sg entries */
- for (i = 0; i < sg_count; i++) {
- if (i == 0) {
- /* make sure 1st one does not have hole */
- if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
- return i;
- } else if (i == sg_count - 1) {
- /* make sure last one does not have hole */
- if (sgl[i].offset != 0)
- return i;
- } else {
- /* make sure no hole in the middle */
- if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
- return i;
- }
- }
- return -1;
-}
-
-static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
- unsigned int sg_count,
- unsigned int len,
- int write)
-{
- int i;
- int num_pages;
- struct scatterlist *bounce_sgl;
- struct page *page_buf;
- unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
-
- num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
-
- bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
- if (!bounce_sgl)
- return NULL;
-
- for (i = 0; i < num_pages; i++) {
- page_buf = alloc_page(GFP_ATOMIC);
- if (!page_buf)
- goto cleanup;
- sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
- }
-
- return bounce_sgl;
-
-cleanup:
- destroy_bounce_buffer(bounce_sgl, num_pages);
- return NULL;
-}
-
-
-/* Assume the original sgl has enough room */
-static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
- struct scatterlist *bounce_sgl,
- unsigned int orig_sgl_count,
- unsigned int bounce_sgl_count)
-{
- int i;
- int j = 0;
- unsigned long src, dest;
- unsigned int srclen, destlen, copylen;
- unsigned int total_copied = 0;
- unsigned long bounce_addr = 0;
- unsigned long dest_addr = 0;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for (i = 0; i < orig_sgl_count; i++) {
- dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
- dest = dest_addr;
- destlen = orig_sgl[i].length;
-
- if (bounce_addr == 0)
- bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
-
- while (destlen) {
- src = bounce_addr + bounce_sgl[j].offset;
- srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
-
- copylen = min(srclen, destlen);
- memcpy((void *)dest, (void *)src, copylen);
-
- total_copied += copylen;
- bounce_sgl[j].offset += copylen;
- destlen -= copylen;
- dest += copylen;
-
- if (bounce_sgl[j].offset == bounce_sgl[j].length) {
- /* full */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- j++;
-
- /*
- * It is possible that the number of elements
- * in the bounce buffer may not be equal to
- * the number of elements in the original
- * scatter list. Handle this correctly.
- */
-
- if (j == bounce_sgl_count) {
- /*
- * We are done; cleanup and return.
- */
- kunmap_atomic((void *)(dest_addr -
- orig_sgl[i].offset),
- KM_IRQ0);
- local_irq_restore(flags);
- return total_copied;
- }
-
- /* if we need to use another bounce buffer */
- if (destlen || i != orig_sgl_count - 1)
- bounce_addr =
- (unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
- } else if (destlen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- }
- }
-
- kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
- KM_IRQ0);
- }
-
- local_irq_restore(flags);
-
- return total_copied;
-}
-
-
-/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
-static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
- struct scatterlist *bounce_sgl,
- unsigned int orig_sgl_count)
-{
- int i;
- int j = 0;
- unsigned long src, dest;
- unsigned int srclen, destlen, copylen;
- unsigned int total_copied = 0;
- unsigned long bounce_addr = 0;
- unsigned long src_addr = 0;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for (i = 0; i < orig_sgl_count; i++) {
- src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
- src = src_addr;
- srclen = orig_sgl[i].length;
-
- if (bounce_addr == 0)
- bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
-
- while (srclen) {
- /* assume bounce offset always == 0 */
- dest = bounce_addr + bounce_sgl[j].length;
- destlen = PAGE_SIZE - bounce_sgl[j].length;
-
- copylen = min(srclen, destlen);
- memcpy((void *)dest, (void *)src, copylen);
-
- total_copied += copylen;
- bounce_sgl[j].length += copylen;
- srclen -= copylen;
- src += copylen;
-
- if (bounce_sgl[j].length == PAGE_SIZE) {
- /* full..move to next entry */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- j++;
-
- /* if we need to use another bounce buffer */
- if (srclen || i != orig_sgl_count - 1)
- bounce_addr =
- (unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
-
- } else if (srclen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
- }
- }
-
- kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
- }
-
- local_irq_restore(flags);
-
- return total_copied;
-}
-
-
-static int storvsc_remove(struct hv_device *dev)
-{
- struct storvsc_device *stor_device = hv_get_drvdata(dev);
- struct Scsi_Host *host = stor_device->host;
-
- scsi_remove_host(host);
-
- scsi_host_put(host);
-
- storvsc_dev_remove(dev);
-
- return 0;
-}
-
-
static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
sector_t capacity, int *info)
{
@@ -1111,10 +1171,13 @@ static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
return 0;
}
-static int storvsc_host_reset(struct hv_device *device)
+static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
{
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
+ struct hv_device *device = host_dev->dev;
+
struct storvsc_device *stor_device;
- struct hv_storvsc_request *request;
+ struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
@@ -1153,105 +1216,16 @@ static int storvsc_host_reset(struct hv_device *device)
return SUCCESS;
}
-
-/*
- * storvsc_host_reset_handler - Reset the scsi HBA
- */
-static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
-{
- struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
- struct hv_device *dev = host_dev->dev;
-
- return storvsc_host_reset(dev);
-}
-
-
-/*
- * storvsc_command_completion - Command completion processing
- */
-static void storvsc_command_completion(struct hv_storvsc_request *request)
-{
- struct storvsc_cmd_request *cmd_request =
- (struct storvsc_cmd_request *)request->context;
- struct scsi_cmnd *scmnd = cmd_request->cmd;
- struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
- void (*scsi_done_fn)(struct scsi_cmnd *);
- struct scsi_sense_hdr sense_hdr;
- struct vmscsi_request *vm_srb;
- struct storvsc_scan_work *wrk;
- struct stor_mem_pools *memp = scmnd->device->hostdata;
-
- vm_srb = &request->vstor_packet.vm_srb;
- if (cmd_request->bounce_sgl_count) {
- if (vm_srb->data_in == READ_TYPE)
- copy_from_bounce_buffer(scsi_sglist(scmnd),
- cmd_request->bounce_sgl,
- scsi_sg_count(scmnd),
- cmd_request->bounce_sgl_count);
- destroy_bounce_buffer(cmd_request->bounce_sgl,
- cmd_request->bounce_sgl_count);
- }
-
- /*
- * If there is an error; offline the device since all
- * error recovery strategies would have already been
- * deployed on the host side.
- */
- if (vm_srb->srb_status == 0x4)
- scmnd->result = DID_TARGET_FAILURE << 16;
- else
- scmnd->result = vm_srb->scsi_status;
-
- /*
- * If the LUN is invalid; remove the device.
- */
- if (vm_srb->srb_status == 0x20) {
- struct storvsc_device *stor_dev;
- struct hv_device *dev = host_dev->dev;
- struct Scsi_Host *host;
-
- stor_dev = get_in_stor_device(dev);
- host = stor_dev->host;
-
- wrk = kmalloc(sizeof(struct storvsc_scan_work),
- GFP_ATOMIC);
- if (!wrk) {
- scmnd->result = DID_TARGET_FAILURE << 16;
- } else {
- wrk->host = host;
- wrk->lun = vm_srb->lun;
- INIT_WORK(&wrk->work, storvsc_remove_lun);
- schedule_work(&wrk->work);
- }
- }
-
- if (scmnd->result) {
- if (scsi_normalize_sense(scmnd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, &sense_hdr))
- scsi_print_sense_hdr("storvsc", &sense_hdr);
- }
-
- scsi_set_resid(scmnd,
- request->data_buffer.len -
- vm_srb->data_transfer_length);
-
- scsi_done_fn = scmnd->scsi_done;
-
- scmnd->host_scribble = NULL;
- scmnd->scsi_done = NULL;
-
- scsi_done_fn(scmnd);
-
- mempool_free(cmd_request, memp->request_mempool);
-}
-
-static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
+static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
{
bool allowed = true;
u8 scsi_op = scmnd->cmnd[0];
switch (scsi_op) {
- /* smartd sends this command, which will offline the device */
+ /*
+ * smartd sends this command and the host does not handle
+ * this. So, don't send it.
+ */
case SET_WINDOW:
scmnd->result = ILLEGAL_REQUEST << 16;
allowed = false;
@@ -1262,15 +1236,11 @@ static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
return allowed;
}
-/*
- * storvsc_queuecommand - Initiate command processing
- */
static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
{
int ret;
struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
- struct hv_storvsc_request *request;
struct storvsc_cmd_request *cmd_request;
unsigned int request_size = 0;
int i;
@@ -1279,38 +1249,31 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct vmscsi_request *vm_srb;
struct stor_mem_pools *memp = scmnd->device->hostdata;
- if (storvsc_check_scsi_cmd(scmnd) == false) {
+ if (!storvsc_scsi_cmd_ok(scmnd)) {
scmnd->scsi_done(scmnd);
return 0;
}
- /* If retrying, no need to prep the cmd */
- if (scmnd->host_scribble) {
-
- cmd_request =
- (struct storvsc_cmd_request *)scmnd->host_scribble;
-
- goto retry_request;
- }
-
request_size = sizeof(struct storvsc_cmd_request);
cmd_request = mempool_alloc(memp->request_mempool,
GFP_ATOMIC);
+
+ /*
+ * We might be invoked in an interrupt context; hence
+ * mempool_alloc() can fail.
+ */
if (!cmd_request)
return SCSI_MLQUEUE_DEVICE_BUSY;
memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
/* Setup the cmd request */
- cmd_request->bounce_sgl_count = 0;
- cmd_request->bounce_sgl = NULL;
cmd_request->cmd = scmnd;
scmnd->host_scribble = (unsigned char *)cmd_request;
- request = &cmd_request->request;
- vm_srb = &request->vstor_packet.vm_srb;
+ vm_srb = &cmd_request->vstor_packet.vm_srb;
/* Build the SRB */
@@ -1326,8 +1289,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
break;
}
- request->on_io_completion = storvsc_command_completion;
- request->context = cmd_request;/* scmnd; */
vm_srb->port_number = host_dev->port;
vm_srb->path_id = scmnd->device->channel;
@@ -1338,10 +1299,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
- request->sense_buffer = scmnd->sense_buffer;
+ cmd_request->sense_buffer = scmnd->sense_buffer;
- request->data_buffer.len = scsi_bufflen(scmnd);
+ cmd_request->data_buffer.len = scsi_bufflen(scmnd);
if (scsi_sg_count(scmnd)) {
sgl = (struct scatterlist *)scsi_sglist(scmnd);
sg_count = scsi_sg_count(scmnd);
@@ -1353,11 +1314,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
scsi_bufflen(scmnd),
vm_srb->data_in);
if (!cmd_request->bounce_sgl) {
- scmnd->host_scribble = NULL;
- mempool_free(cmd_request,
- memp->request_mempool);
-
- return SCSI_MLQUEUE_HOST_BUSY;
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ goto queue_error;
}
cmd_request->bounce_sgl_count =
@@ -1373,41 +1331,42 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
sg_count = cmd_request->bounce_sgl_count;
}
- request->data_buffer.offset = sgl[0].offset;
+ cmd_request->data_buffer.offset = sgl[0].offset;
for (i = 0; i < sg_count; i++)
- request->data_buffer.pfn_array[i] =
+ cmd_request->data_buffer.pfn_array[i] =
page_to_pfn(sg_page((&sgl[i])));
} else if (scsi_sglist(scmnd)) {
- request->data_buffer.offset =
+ cmd_request->data_buffer.offset =
virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
- request->data_buffer.pfn_array[0] =
+ cmd_request->data_buffer.pfn_array[0] =
virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
}
-retry_request:
/* Invokes the vsc to start an IO */
- ret = storvsc_do_io(dev, &cmd_request->request);
+ ret = storvsc_do_io(dev, cmd_request);
if (ret == -EAGAIN) {
/* no more space */
- if (cmd_request->bounce_sgl_count)
+ if (cmd_request->bounce_sgl_count) {
destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- mempool_free(cmd_request, memp->request_mempool);
-
- scmnd->host_scribble = NULL;
-
- ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto queue_error;
+ }
}
+ return 0;
+
+queue_error:
+ mempool_free(cmd_request, memp->request_mempool);
+ scmnd->host_scribble = NULL;
return ret;
}
-/* Scsi driver */
static struct scsi_host_template scsi_driver = {
.module = THIS_MODULE,
.name = "storvsc_host_t",
@@ -1448,11 +1407,6 @@ static const struct hv_vmbus_device_id id_table[] = {
MODULE_DEVICE_TABLE(vmbus, id_table);
-
-/*
- * storvsc_probe - Add a new device for this driver
- */
-
static int storvsc_probe(struct hv_device *device,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1460,7 +1414,6 @@ static int storvsc_probe(struct hv_device *device,
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
- int path = 0;
int target = 0;
struct storvsc_device *stor_device;
@@ -1493,9 +1446,6 @@ static int storvsc_probe(struct hv_device *device,
if (ret)
goto err_out1;
- if (dev_is_ide)
- storvsc_get_ide_info(device, &target, &path);
-
host_dev->path = stor_device->path_id;
host_dev->target = stor_device->target_id;
@@ -1515,12 +1465,14 @@ static int storvsc_probe(struct hv_device *device,
if (!dev_is_ide) {
scsi_scan_host(host);
- return 0;
- }
- ret = scsi_add_device(host, 0, target, 0);
- if (ret) {
- scsi_remove_host(host);
- goto err_out2;
+ } else {
+ target = (device->dev_instance.b[5] << 8 |
+ device->dev_instance.b[4]);
+ ret = scsi_add_device(host, 0, target, 0);
+ if (ret) {
+ scsi_remove_host(host);
+ goto err_out2;
+ }
}
return 0;
@@ -1542,7 +1494,17 @@ err_out0:
return ret;
}
-/* The one and only one */
+static int storvsc_remove(struct hv_device *dev)
+{
+ struct storvsc_device *stor_device = hv_get_drvdata(dev);
+ struct Scsi_Host *host = stor_device->host;
+
+ scsi_remove_host(host);
+ storvsc_dev_remove(dev);
+ scsi_host_put(host);
+
+ return 0;
+}
static struct hv_driver storvsc_drv = {
.name = KBUILD_MODNAME,
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index 1340aead18b4..657710b266b0 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -3,7 +3,7 @@
#
obj-$(CONFIG_IIO) += industrialio.o
-industrialio-y := industrialio-core.o
+industrialio-y := industrialio-core.o industrialio-event.o
industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index 26c610faee3f..97f9e6b159d9 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -115,9 +115,7 @@ int adis16201_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
ring->scan_timestamp = true;
- ring->access = &ring_sw_access_funcs;
indio_dev->setup_ops = &adis16201_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index 064640d15e41..6a8963db4f60 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -117,9 +117,7 @@ int adis16203_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
ring->scan_timestamp = true;
- ring->access = &ring_sw_access_funcs;
indio_dev->setup_ops = &adis16203_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index 4081179dfa5c..5c8ab7338864 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -112,8 +112,6 @@ int adis16204_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16204_ring_setup_ops;
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index 2a6fd334f5f1..57254b6b38b7 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -113,8 +113,6 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16209_ring_setup_ops;
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index e23622d96f9f..43ba84e993ad 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -110,8 +110,6 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16240_ring_setup_ops;
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 2db383fc2743..ae5f225b4bb2 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -187,12 +187,10 @@ void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
#ifdef CONFIG_LIS3L02DQ_BUF_RING_SW
#define lis3l02dq_free_buf iio_sw_rb_free
#define lis3l02dq_alloc_buf iio_sw_rb_allocate
-#define lis3l02dq_access_funcs ring_sw_access_funcs
#endif
#ifdef CONFIG_LIS3L02DQ_BUF_KFIFO
#define lis3l02dq_free_buf iio_kfifo_free
#define lis3l02dq_alloc_buf iio_kfifo_allocate
-#define lis3l02dq_access_funcs kfifo_access_funcs
#endif
irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private);
#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 98c5c92d3450..ca0a1fe6ff3f 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -406,8 +406,6 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
return -ENOMEM;
indio_dev->buffer = buffer;
- /* Effectively select the buffer implementation */
- indio_dev->buffer->access = &lis3l02dq_access_funcs;
buffer->scan_timestamp = true;
indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 45f4504ed927..9fd6d63d2999 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -561,8 +561,6 @@ static int ad7192_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7192_trigger_handler,
IRQF_ONESHOT,
@@ -824,25 +822,20 @@ static struct attribute *ad7192_attributes[] = {
NULL
};
-static umode_t ad7192_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- umode_t mode = attr->mode;
-
- if ((st->devid != ID_AD7195) &&
- (attr == &iio_dev_attr_ac_excitation_en.dev_attr.attr))
- mode = 0;
-
- return mode;
-}
-
static const struct attribute_group ad7192_attribute_group = {
.attrs = ad7192_attributes,
- .is_visible = ad7192_attr_is_visible,
+};
+
+static struct attribute *ad7195_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
+ &iio_dev_attr_bridge_switch_en.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad7195_attribute_group = {
+ .attrs = ad7195_attributes,
};
static int ad7192_read_raw(struct iio_dev *indio_dev,
@@ -972,6 +965,15 @@ static const struct iio_info ad7192_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad7195_info = {
+ .read_raw = &ad7192_read_raw,
+ .write_raw = &ad7192_write_raw,
+ .write_raw_get_fmt = &ad7192_write_raw_get_fmt,
+ .attrs = &ad7195_attribute_group,
+ .validate_trigger = ad7192_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
#define AD7192_CHAN_DIFF(_chan, _chan2, _name, _address, _si) \
{ .type = IIO_VOLTAGE, \
.differential = 1, \
@@ -1064,7 +1066,10 @@ static int __devinit ad7192_probe(struct spi_device *spi)
indio_dev->channels = ad7192_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
indio_dev->available_scan_masks = st->available_scan_masks;
- indio_dev->info = &ad7192_info;
+ if (st->devid == ID_AD7195)
+ indio_dev->info = &ad7195_info;
+ else
+ indio_dev->info = &ad7192_info;
for (i = 0; i < indio_dev->num_channels; i++)
st->available_scan_masks[i] = (1 << i) | (1 <<
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index d1a12dd015e2..feeb0eeba59a 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -131,9 +131,6 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
-
indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
&ad7298_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c
index 4e298b2a05b2..35a8576a2271 100644
--- a/drivers/staging/iio/adc/ad7476_ring.c
+++ b/drivers/staging/iio/adc/ad7476_ring.c
@@ -98,8 +98,6 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc
= iio_alloc_pollfunc(NULL,
&ad7476_trigger_handler,
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index ddb7ef92f5c1..97e8d3d4471e 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -197,7 +197,7 @@ static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
ad7606_store_oversampling_ratio, 0);
static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
-static struct attribute *ad7606_attributes[] = {
+static struct attribute *ad7606_attributes_os_and_range[] = {
&iio_dev_attr_in_voltage_range.dev_attr.attr,
&iio_const_attr_in_voltage_range_available.dev_attr.attr,
&iio_dev_attr_oversampling_ratio.dev_attr.attr,
@@ -205,34 +205,28 @@ static struct attribute *ad7606_attributes[] = {
NULL,
};
-static umode_t ad7606_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
+static const struct attribute_group ad7606_attribute_group_os_and_range = {
+ .attrs = ad7606_attributes_os_and_range,
+};
- umode_t mode = attr->mode;
-
- if (!(gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) &&
- (attr == &iio_dev_attr_oversampling_ratio.dev_attr.attr ||
- attr ==
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr))
- mode = 0;
- else if (!gpio_is_valid(st->pdata->gpio_range) &&
- (attr == &iio_dev_attr_in_voltage_range.dev_attr.attr ||
- attr ==
- &iio_const_attr_in_voltage_range_available.dev_attr.attr))
- mode = 0;
-
- return mode;
-}
+static struct attribute *ad7606_attributes_os[] = {
+ &iio_dev_attr_oversampling_ratio.dev_attr.attr,
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
-static const struct attribute_group ad7606_attribute_group = {
- .attrs = ad7606_attributes,
- .is_visible = ad7606_attr_is_visible,
+static const struct attribute_group ad7606_attribute_group_os = {
+ .attrs = ad7606_attributes_os,
+};
+
+static struct attribute *ad7606_attributes_range[] = {
+ &iio_dev_attr_in_voltage_range.dev_attr.attr,
+ &iio_const_attr_in_voltage_range_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7606_attribute_group_range = {
+ .attrs = ad7606_attributes_range,
};
#define AD7606_CHANNEL(num) \
@@ -435,10 +429,27 @@ static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
};
-static const struct iio_info ad7606_info = {
+static const struct iio_info ad7606_info_no_os_or_range = {
.driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
- .attrs = &ad7606_attribute_group,
+};
+
+static const struct iio_info ad7606_info_os_and_range = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7606_read_raw,
+ .attrs = &ad7606_attribute_group_os_and_range,
+};
+
+static const struct iio_info ad7606_info_os = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7606_read_raw,
+ .attrs = &ad7606_attribute_group_os,
+};
+
+static const struct iio_info ad7606_info_range = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7606_read_raw,
+ .attrs = &ad7606_attribute_group_range,
};
struct iio_dev *ad7606_probe(struct device *dev, int irq,
@@ -483,7 +494,19 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
st->chip_info = &ad7606_chip_info_tbl[id];
indio_dev->dev.parent = dev;
- indio_dev->info = &ad7606_info;
+ if (gpio_is_valid(st->pdata->gpio_os0) &&
+ gpio_is_valid(st->pdata->gpio_os1) &&
+ gpio_is_valid(st->pdata->gpio_os2)) {
+ if (gpio_is_valid(st->pdata->gpio_range))
+ indio_dev->info = &ad7606_info_os_and_range;
+ else
+ indio_dev->info = &ad7606_info_os;
+ } else {
+ if (gpio_is_valid(st->pdata->gpio_range))
+ indio_dev->info = &ad7606_info_range;
+ else
+ indio_dev->info = &ad7606_info_no_os_or_range;
+ }
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->name = st->chip_info->name;
indio_dev->channels = st->chip_info->channels;
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index e8f94a18a943..1ef9fbcaf2de 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -110,8 +110,6 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh,
&ad7606_trigger_handler_th_bh,
0,
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 6a058b19c49a..84ecde1ad042 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -427,8 +427,6 @@ static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7793_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
index 85076cd962e7..d1809079b63d 100644
--- a/drivers/staging/iio/adc/ad7887_ring.c
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -131,8 +131,6 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7887_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index 5dded9e7820a..28e9a4192910 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -141,8 +141,6 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
&ad799x_trigger_handler,
IRQF_ONESHOT,
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index eec2f325d549..caf57c1169b1 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -725,32 +725,19 @@ static struct attribute *adt7310_event_int_attributes[] = {
&iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_alarm_high.dev_attr.attr,
&iio_dev_attr_t_alarm_low.dev_attr.attr,
- &iio_dev_attr_t_hyst.dev_attr.attr,
- NULL,
-};
-
-static struct attribute *adt7310_event_ct_attributes[] = {
- &iio_dev_attr_event_mode.dev_attr.attr,
- &iio_dev_attr_available_event_modes.dev_attr.attr,
- &iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_crit.dev_attr.attr,
&iio_dev_attr_t_hyst.dev_attr.attr,
NULL,
};
-static struct attribute_group adt7310_event_attribute_group[ADT7310_IRQS] = {
- {
- .attrs = adt7310_event_int_attributes,
- .name = "events",
- }, {
- .attrs = adt7310_event_ct_attributes,
- .name = "events",
- }
+static struct attribute_group adt7310_event_attribute_group = {
+ .attrs = adt7310_event_int_attributes,
+ .name = "events",
};
static const struct iio_info adt7310_info = {
.attrs = &adt7310_attribute_group,
- .event_attrs = adt7310_event_attribute_group,
+ .event_attrs = &adt7310_event_attribute_group,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index c62248ceb37a..dff3e8ca2d78 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -693,32 +693,19 @@ static struct attribute *adt7410_event_int_attributes[] = {
&iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_alarm_high.dev_attr.attr,
&iio_dev_attr_t_alarm_low.dev_attr.attr,
- &iio_dev_attr_t_hyst.dev_attr.attr,
- NULL,
-};
-
-static struct attribute *adt7410_event_ct_attributes[] = {
- &iio_dev_attr_event_mode.dev_attr.attr,
- &iio_dev_attr_available_event_modes.dev_attr.attr,
- &iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_crit.dev_attr.attr,
&iio_dev_attr_t_hyst.dev_attr.attr,
NULL,
};
-static struct attribute_group adt7410_event_attribute_group[ADT7410_IRQS] = {
- {
- .attrs = adt7410_event_int_attributes,
- .name = "events",
- }, {
- .attrs = adt7410_event_ct_attributes,
- .name = "events",
- }
+static struct attribute_group adt7410_event_attribute_group = {
+ .attrs = adt7410_event_int_attributes,
+ .name = "events",
};
static const struct iio_info adt7410_info = {
.attrs = &adt7410_attribute_group,
- .event_attrs = adt7410_event_attribute_group,
+ .event_attrs = &adt7410_event_attribute_group,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index f730b3fb971a..d0a60a382930 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -116,8 +116,6 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_deallocate_sw_rb;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
indio_dev->setup_ops = &max1363_ring_setup_ops;
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index 693e7482524c..e439baea3332 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -149,30 +149,8 @@ static struct attribute *ad5446_attributes[] = {
NULL,
};
-static umode_t ad5446_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(indio_dev);
-
- umode_t mode = attr->mode;
-
- if (!st->chip_info->store_pwr_down &&
- (attr == &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr ||
- attr == &iio_dev_attr_out_voltage_powerdown_mode.
- dev_attr.attr ||
- attr ==
- &iio_const_attr_out_voltage_powerdown_mode_available.
- dev_attr.attr))
- mode = 0;
-
- return mode;
-}
-
static const struct attribute_group ad5446_attribute_group = {
.attrs = ad5446_attributes,
- .is_visible = ad5446_attr_is_visible,
};
#define AD5446_CHANNEL(bits, storage, shift) { \
@@ -321,6 +299,12 @@ static const struct iio_info ad5446_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad5446_info_no_pwr_down = {
+ .read_raw = ad5446_read_raw,
+ .write_raw = ad5446_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
static int __devinit ad5446_probe(struct spi_device *spi)
{
struct ad5446_state *st;
@@ -353,7 +337,10 @@ static int __devinit ad5446_probe(struct spi_device *spi)
/* Estabilish that the iio_dev is a child of the spi device */
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->info = &ad5446_info;
+ if (st->chip_info->store_pwr_down)
+ indio_dev->info = &ad5446_info;
+ else
+ indio_dev->info = &ad5446_info_no_pwr_down;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index 5e67104fea18..38a2de08626f 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -281,29 +281,27 @@ static struct attribute *ad9834_attributes[] = {
NULL,
};
-static umode_t ad9834_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_priv(indio_dev);
-
- umode_t mode = attr->mode;
-
- if (((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) &&
- ((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) ||
- (attr == &iio_dev_attr_dds0_out1_wavetype.dev_attr.attr) ||
- (attr ==
- &iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr) ||
- (attr == &iio_dev_attr_dds0_pincontrol_en.dev_attr.attr)))
- mode = 0;
-
- return mode;
-}
+static struct attribute *ad9833_attributes[] = {
+ &iio_dev_attr_dds0_freq0.dev_attr.attr,
+ &iio_dev_attr_dds0_freq1.dev_attr.attr,
+ &iio_const_attr_dds0_freq_scale.dev_attr.attr,
+ &iio_dev_attr_dds0_phase0.dev_attr.attr,
+ &iio_dev_attr_dds0_phase1.dev_attr.attr,
+ &iio_const_attr_dds0_phase_scale.dev_attr.attr,
+ &iio_dev_attr_dds0_freqsymbol.dev_attr.attr,
+ &iio_dev_attr_dds0_phasesymbol.dev_attr.attr,
+ &iio_dev_attr_dds0_out_enable.dev_attr.attr,
+ &iio_dev_attr_dds0_out0_wavetype.dev_attr.attr,
+ &iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr,
+ NULL,
+};
static const struct attribute_group ad9834_attribute_group = {
.attrs = ad9834_attributes,
- .is_visible = ad9834_attr_is_visible,
+};
+
+static const struct attribute_group ad9833_attribute_group = {
+ .attrs = ad9833_attributes,
};
static const struct iio_info ad9834_info = {
@@ -311,6 +309,11 @@ static const struct iio_info ad9834_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad9833_info = {
+ .attrs = &ad9833_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
static int __devinit ad9834_probe(struct spi_device *spi)
{
struct ad9834_platform_data *pdata = spi->dev.platform_data;
@@ -344,7 +347,15 @@ static int __devinit ad9834_probe(struct spi_device *spi)
st->reg = reg;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->info = &ad9834_info;
+ switch (st->devid) {
+ case ID_AD9833:
+ case ID_AD9837:
+ indio_dev->info = &ad9833_info;
+ break;
+ default:
+ indio_dev->info = &ad9834_info;
+ break;
+ }
indio_dev->modes = INDIO_DIRECT_MODE;
/* Setup default messages */
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index 699a6152c409..711f15122a08 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -115,8 +115,6 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16260_ring_setup_ops;
diff --git a/drivers/staging/iio/iio_core.h b/drivers/staging/iio/iio_core.h
index 107cfb1cbb01..c9dfcba0bac8 100644
--- a/drivers/staging/iio/iio_core.h
+++ b/drivers/staging/iio/iio_core.h
@@ -49,4 +49,8 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
#endif
+int iio_device_register_eventset(struct iio_dev *indio_dev);
+void iio_device_unregister_eventset(struct iio_dev *indio_dev);
+int iio_event_getfd(struct iio_dev *indio_dev);
+
#endif
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index d6a1c0e82a5b..bb4daf744362 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -142,8 +142,6 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
}
indio_dev->buffer = buffer;
- /* Tell the core how to access the buffer */
- buffer->access = &kfifo_access_funcs;
/* Enable timestamps by default */
buffer->scan_timestamp = true;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 9a2ca55625f4..cd82b56d58af 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -607,9 +607,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
if (!indio_dev->buffer)
return -ENOMEM;
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
-
/* Ring buffer functions - here trigger setup related */
indio_dev->setup_ops = &ad5933_ring_setup_ops;
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index ac22de573f3e..8daa038b23e6 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -187,8 +187,6 @@ int adis16400_configure_ring(struct iio_dev *indio_dev)
return ret;
}
indio_dev->buffer = ring;
- /* Effectively select the ring buffer implementation */
- ring->access = &ring_sw_access_funcs;
ring->scan_timestamp = true;
indio_dev->setup_ops = &adis16400_ring_setup_ops;
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 19f897f3c85e..e4824fe8b405 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -100,71 +100,6 @@ const struct iio_chan_spec
return NULL;
}
-/**
- * struct iio_detected_event_list - list element for events that have occurred
- * @list: linked list header
- * @ev: the event itself
- */
-struct iio_detected_event_list {
- struct list_head list;
- struct iio_event_data ev;
-};
-
-/**
- * struct iio_event_interface - chrdev interface for an event line
- * @dev: device assocated with event interface
- * @wait: wait queue to allow blocking reads of events
- * @event_list_lock: mutex to protect the list of detected events
- * @det_events: list of detected events
- * @max_events: maximum number of events before new ones are dropped
- * @current_events: number of events in detected list
- * @flags: file operations related flags including busy flag.
- */
-struct iio_event_interface {
- wait_queue_head_t wait;
- struct mutex event_list_lock;
- struct list_head det_events;
- int max_events;
- int current_events;
- struct list_head dev_attr_list;
- unsigned long flags;
- struct attribute_group group;
-};
-
-int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
-{
- struct iio_event_interface *ev_int = indio_dev->event_interface;
- struct iio_detected_event_list *ev;
- int ret = 0;
-
- /* Does anyone care? */
- mutex_lock(&ev_int->event_list_lock);
- if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- if (ev_int->current_events == ev_int->max_events) {
- mutex_unlock(&ev_int->event_list_lock);
- return 0;
- }
- ev = kmalloc(sizeof(*ev), GFP_KERNEL);
- if (ev == NULL) {
- ret = -ENOMEM;
- mutex_unlock(&ev_int->event_list_lock);
- goto error_ret;
- }
- ev->ev.id = ev_code;
- ev->ev.timestamp = timestamp;
-
- list_add_tail(&ev->list, &ev_int->det_events);
- ev_int->current_events++;
- mutex_unlock(&ev_int->event_list_lock);
- wake_up_interruptible(&ev_int->wait);
- } else
- mutex_unlock(&ev_int->event_list_lock);
-
-error_ret:
- return ret;
-}
-EXPORT_SYMBOL(iio_push_event);
-
/* This turns up an awful lot */
ssize_t iio_read_const_attr(struct device *dev,
struct device_attribute *attr,
@@ -174,110 +109,6 @@ ssize_t iio_read_const_attr(struct device *dev,
}
EXPORT_SYMBOL(iio_read_const_attr);
-static ssize_t iio_event_chrdev_read(struct file *filep,
- char __user *buf,
- size_t count,
- loff_t *f_ps)
-{
- struct iio_event_interface *ev_int = filep->private_data;
- struct iio_detected_event_list *el;
- size_t len = sizeof(el->ev);
- int ret;
-
- if (count < len)
- return -EINVAL;
-
- mutex_lock(&ev_int->event_list_lock);
- if (list_empty(&ev_int->det_events)) {
- if (filep->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto error_mutex_unlock;
- }
- mutex_unlock(&ev_int->event_list_lock);
- /* Blocking on device; waiting for something to be there */
- ret = wait_event_interruptible(ev_int->wait,
- !list_empty(&ev_int
- ->det_events));
- if (ret)
- goto error_ret;
- /* Single access device so no one else can get the data */
- mutex_lock(&ev_int->event_list_lock);
- }
-
- el = list_first_entry(&ev_int->det_events,
- struct iio_detected_event_list,
- list);
- if (copy_to_user(buf, &(el->ev), len)) {
- ret = -EFAULT;
- goto error_mutex_unlock;
- }
- list_del(&el->list);
- ev_int->current_events--;
- mutex_unlock(&ev_int->event_list_lock);
- kfree(el);
-
- return len;
-
-error_mutex_unlock:
- mutex_unlock(&ev_int->event_list_lock);
-error_ret:
-
- return ret;
-}
-
-static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
-{
- struct iio_event_interface *ev_int = filep->private_data;
- struct iio_detected_event_list *el, *t;
-
- mutex_lock(&ev_int->event_list_lock);
- clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- /*
- * In order to maintain a clean state for reopening,
- * clear out any awaiting events. The mask will prevent
- * any new __iio_push_event calls running.
- */
- list_for_each_entry_safe(el, t, &ev_int->det_events, list) {
- list_del(&el->list);
- kfree(el);
- }
- ev_int->current_events = 0;
- mutex_unlock(&ev_int->event_list_lock);
-
- return 0;
-}
-
-static const struct file_operations iio_event_chrdev_fileops = {
- .read = iio_event_chrdev_read,
- .release = iio_event_chrdev_release,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
-};
-
-static int iio_event_getfd(struct iio_dev *indio_dev)
-{
- struct iio_event_interface *ev_int = indio_dev->event_interface;
- int fd;
-
- if (ev_int == NULL)
- return -ENODEV;
-
- mutex_lock(&ev_int->event_list_lock);
- if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- mutex_unlock(&ev_int->event_list_lock);
- return -EBUSY;
- }
- mutex_unlock(&ev_int->event_list_lock);
- fd = anon_inode_getfd("iio:event",
- &iio_event_chrdev_fileops, ev_int, O_RDONLY);
- if (fd < 0) {
- mutex_lock(&ev_int->event_list_lock);
- clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- mutex_unlock(&ev_int->event_list_lock);
- }
- return fd;
-}
-
static int __init iio_init(void)
{
int ret;
@@ -726,295 +557,6 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
kfree(indio_dev->chan_attr_group.attrs);
}
-static const char * const iio_ev_type_text[] = {
- [IIO_EV_TYPE_THRESH] = "thresh",
- [IIO_EV_TYPE_MAG] = "mag",
- [IIO_EV_TYPE_ROC] = "roc",
- [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
- [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
-};
-
-static const char * const iio_ev_dir_text[] = {
- [IIO_EV_DIR_EITHER] = "either",
- [IIO_EV_DIR_RISING] = "rising",
- [IIO_EV_DIR_FALLING] = "falling"
-};
-
-static ssize_t iio_ev_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- bool val;
-
- ret = strtobool(buf, &val);
- if (ret < 0)
- return ret;
-
- ret = indio_dev->info->write_event_config(indio_dev,
- this_attr->address,
- val);
- return (ret < 0) ? ret : len;
-}
-
-static ssize_t iio_ev_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val = indio_dev->info->read_event_config(indio_dev,
- this_attr->address);
-
- if (val < 0)
- return val;
- else
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t iio_ev_value_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int val, ret;
-
- ret = indio_dev->info->read_event_value(indio_dev,
- this_attr->address, &val);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t iio_ev_value_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned long val;
- int ret;
-
- if (!indio_dev->info->write_event_value)
- return -EINVAL;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
- val);
- if (ret < 0)
- return ret;
-
- return len;
-}
-
-static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan)
-{
- int ret = 0, i, attrcount = 0;
- u64 mask = 0;
- char *postfix;
- if (!chan->event_mask)
- return 0;
-
- for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
- postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- if (chan->modified)
- mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
- else if (chan->differential)
- mask = IIO_EVENT_CODE(chan->type,
- 0, 0,
- i%IIO_EV_DIR_MAX,
- i/IIO_EV_DIR_MAX,
- 0,
- chan->channel,
- chan->channel2);
- else
- mask = IIO_UNMOD_EVENT_CODE(chan->type,
- chan->channel,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
-
- ret = __iio_add_chan_devattr(postfix,
- chan,
- &iio_ev_state_show,
- iio_ev_state_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = __iio_add_chan_devattr(postfix, chan,
- iio_ev_value_show,
- iio_ev_value_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- }
- ret = attrcount;
-error_ret:
- return ret;
-}
-
-static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
-{
- struct iio_dev_attr *p, *n;
- list_for_each_entry_safe(p, n,
- &indio_dev->event_interface->
- dev_attr_list, l) {
- kfree(p->dev_attr.attr.name);
- kfree(p);
- }
-}
-
-static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
-{
- int j, ret, attrcount = 0;
-
- INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
- /* Dynically created from the channels array */
- for (j = 0; j < indio_dev->num_channels; j++) {
- ret = iio_device_add_event_sysfs(indio_dev,
- &indio_dev->channels[j]);
- if (ret < 0)
- goto error_clear_attrs;
- attrcount += ret;
- }
- return attrcount;
-
-error_clear_attrs:
- __iio_remove_event_config_attrs(indio_dev);
-
- return ret;
-}
-
-static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
-{
- int j;
-
- for (j = 0; j < indio_dev->num_channels; j++)
- if (indio_dev->channels[j].event_mask != 0)
- return true;
- return false;
-}
-
-static void iio_setup_ev_int(struct iio_event_interface *ev_int)
-{
- mutex_init(&ev_int->event_list_lock);
- /* discussion point - make this variable? */
- ev_int->max_events = 10;
- ev_int->current_events = 0;
- INIT_LIST_HEAD(&ev_int->det_events);
- init_waitqueue_head(&ev_int->wait);
-}
-
-static const char *iio_event_group_name = "events";
-static int iio_device_register_eventset(struct iio_dev *indio_dev)
-{
- struct iio_dev_attr *p;
- int ret = 0, attrcount_orig = 0, attrcount, attrn;
- struct attribute **attr;
-
- if (!(indio_dev->info->event_attrs ||
- iio_check_for_dynamic_events(indio_dev)))
- return 0;
-
- indio_dev->event_interface =
- kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
- if (indio_dev->event_interface == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- iio_setup_ev_int(indio_dev->event_interface);
- if (indio_dev->info->event_attrs != NULL) {
- attr = indio_dev->info->event_attrs->attrs;
- while (*attr++ != NULL)
- attrcount_orig++;
- }
- attrcount = attrcount_orig;
- if (indio_dev->channels) {
- ret = __iio_add_event_config_attrs(indio_dev);
- if (ret < 0)
- goto error_free_setup_event_lines;
- attrcount += ret;
- }
-
- indio_dev->event_interface->group.name = iio_event_group_name;
- indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
- sizeof(indio_dev->event_interface->group.attrs[0]),
- GFP_KERNEL);
- if (indio_dev->event_interface->group.attrs == NULL) {
- ret = -ENOMEM;
- goto error_free_setup_event_lines;
- }
- if (indio_dev->info->event_attrs)
- memcpy(indio_dev->event_interface->group.attrs,
- indio_dev->info->event_attrs->attrs,
- sizeof(indio_dev->event_interface->group.attrs[0])
- *attrcount_orig);
- attrn = attrcount_orig;
- /* Add all elements from the list. */
- list_for_each_entry(p,
- &indio_dev->event_interface->dev_attr_list,
- l)
- indio_dev->event_interface->group.attrs[attrn++] =
- &p->dev_attr.attr;
- indio_dev->groups[indio_dev->groupcounter++] =
- &indio_dev->event_interface->group;
-
- return 0;
-
-error_free_setup_event_lines:
- __iio_remove_event_config_attrs(indio_dev);
- kfree(indio_dev->event_interface);
-error_ret:
-
- return ret;
-}
-
-static void iio_device_unregister_eventset(struct iio_dev *indio_dev)
-{
- if (indio_dev->event_interface == NULL)
- return;
- __iio_remove_event_config_attrs(indio_dev);
- kfree(indio_dev->event_interface->group.attrs);
- kfree(indio_dev->event_interface);
-}
-
static void iio_dev_release(struct device *device)
{
struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev);
diff --git a/drivers/staging/iio/industrialio-event.c b/drivers/staging/iio/industrialio-event.c
new file mode 100644
index 000000000000..66d320bf3020
--- /dev/null
+++ b/drivers/staging/iio/industrialio-event.c
@@ -0,0 +1,454 @@
+/* Industrial I/O event handling
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Based on elements of hwmon and input subsystems.
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include "iio.h"
+#include "iio_core.h"
+#include "sysfs.h"
+#include "events.h"
+
+/**
+ * struct iio_event_interface - chrdev interface for an event line
+ * @wait: wait queue to allow blocking reads of events
+ * @event_list_lock: mutex to protect the list of detected events
+ * @det_events: list of detected events
+ * @dev_attr_list: list of event interface sysfs attribute
+ * @flags: file operations related flags including busy flag.
+ * @group: event interface sysfs attribute group
+ */
+struct iio_event_interface {
+ wait_queue_head_t wait;
+ DECLARE_KFIFO(det_events, struct iio_event_data, 16);
+
+ struct list_head dev_attr_list;
+ unsigned long flags;
+ struct attribute_group group;
+};
+
+int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
+{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ struct iio_event_data ev;
+ int copied;
+
+ /* Does anyone care? */
+ spin_lock(&ev_int->wait.lock);
+ if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+
+ ev.id = ev_code;
+ ev.timestamp = timestamp;
+
+ copied = kfifo_put(&ev_int->det_events, &ev);
+ if (copied != 0)
+ wake_up_locked_poll(&ev_int->wait, POLLIN);
+ }
+ spin_unlock(&ev_int->wait.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_push_event);
+
+/**
+ * iio_event_poll() - poll the event queue to find out if it has data
+ */
+static unsigned int iio_event_poll(struct file *filep,
+ struct poll_table_struct *wait)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+ unsigned int events = 0;
+
+ poll_wait(filep, &ev_int->wait, wait);
+
+ spin_lock(&ev_int->wait.lock);
+ if (!kfifo_is_empty(&ev_int->det_events))
+ events = POLLIN | POLLRDNORM;
+ spin_unlock(&ev_int->wait.lock);
+
+ return events;
+}
+
+static ssize_t iio_event_chrdev_read(struct file *filep,
+ char __user *buf,
+ size_t count,
+ loff_t *f_ps)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+ unsigned int copied;
+ int ret;
+
+ if (count < sizeof(struct iio_event_data))
+ return -EINVAL;
+
+ spin_lock(&ev_int->wait.lock);
+ if (kfifo_is_empty(&ev_int->det_events)) {
+ if (filep->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto error_unlock;
+ }
+ /* Blocking on device; waiting for something to be there */
+ ret = wait_event_interruptible_locked(ev_int->wait,
+ !kfifo_is_empty(&ev_int->det_events));
+ if (ret)
+ goto error_unlock;
+ /* Single access device so no one else can get the data */
+ }
+
+ ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+
+error_unlock:
+ spin_unlock(&ev_int->wait.lock);
+
+ return ret ? ret : copied;
+}
+
+static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
+{
+ struct iio_event_interface *ev_int = filep->private_data;
+
+ spin_lock(&ev_int->wait.lock);
+ __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ /*
+ * In order to maintain a clean state for reopening,
+ * clear out any awaiting events. The mask will prevent
+ * any new __iio_push_event calls running.
+ */
+ kfifo_reset_out(&ev_int->det_events);
+ spin_unlock(&ev_int->wait.lock);
+
+ return 0;
+}
+
+static const struct file_operations iio_event_chrdev_fileops = {
+ .read = iio_event_chrdev_read,
+ .poll = iio_event_poll,
+ .release = iio_event_chrdev_release,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+int iio_event_getfd(struct iio_dev *indio_dev)
+{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ int fd;
+
+ if (ev_int == NULL)
+ return -ENODEV;
+
+ spin_lock(&ev_int->wait.lock);
+ if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ spin_unlock(&ev_int->wait.lock);
+ return -EBUSY;
+ }
+ spin_unlock(&ev_int->wait.lock);
+ fd = anon_inode_getfd("iio:event",
+ &iio_event_chrdev_fileops, ev_int, O_RDONLY);
+ if (fd < 0) {
+ spin_lock(&ev_int->wait.lock);
+ __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ spin_unlock(&ev_int->wait.lock);
+ }
+ return fd;
+}
+
+static const char * const iio_ev_type_text[] = {
+ [IIO_EV_TYPE_THRESH] = "thresh",
+ [IIO_EV_TYPE_MAG] = "mag",
+ [IIO_EV_TYPE_ROC] = "roc",
+ [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
+ [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
+};
+
+static const char * const iio_ev_dir_text[] = {
+ [IIO_EV_DIR_EITHER] = "either",
+ [IIO_EV_DIR_RISING] = "rising",
+ [IIO_EV_DIR_FALLING] = "falling"
+};
+
+static ssize_t iio_ev_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ bool val;
+
+ ret = strtobool(buf, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = indio_dev->info->write_event_config(indio_dev,
+ this_attr->address,
+ val);
+ return (ret < 0) ? ret : len;
+}
+
+static ssize_t iio_ev_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val = indio_dev->info->read_event_config(indio_dev,
+ this_attr->address);
+
+ if (val < 0)
+ return val;
+ else
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t iio_ev_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val, ret;
+
+ ret = indio_dev->info->read_event_value(indio_dev,
+ this_attr->address, &val);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t iio_ev_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long val;
+ int ret;
+
+ if (!indio_dev->info->write_event_value)
+ return -EINVAL;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
+ val);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ int ret = 0, i, attrcount = 0;
+ u64 mask = 0;
+ char *postfix;
+ if (!chan->event_mask)
+ return 0;
+
+ for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
+ iio_ev_type_text[i/IIO_EV_DIR_MAX],
+ iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
+ if (postfix == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ if (chan->modified)
+ mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
+ i/IIO_EV_DIR_MAX,
+ i%IIO_EV_DIR_MAX);
+ else if (chan->differential)
+ mask = IIO_EVENT_CODE(chan->type,
+ 0, 0,
+ i%IIO_EV_DIR_MAX,
+ i/IIO_EV_DIR_MAX,
+ 0,
+ chan->channel,
+ chan->channel2);
+ else
+ mask = IIO_UNMOD_EVENT_CODE(chan->type,
+ chan->channel,
+ i/IIO_EV_DIR_MAX,
+ i%IIO_EV_DIR_MAX);
+
+ ret = __iio_add_chan_devattr(postfix,
+ chan,
+ &iio_ev_state_show,
+ iio_ev_state_store,
+ mask,
+ 0,
+ &indio_dev->dev,
+ &indio_dev->event_interface->
+ dev_attr_list);
+ kfree(postfix);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
+ iio_ev_type_text[i/IIO_EV_DIR_MAX],
+ iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
+ if (postfix == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ret = __iio_add_chan_devattr(postfix, chan,
+ iio_ev_value_show,
+ iio_ev_value_store,
+ mask,
+ 0,
+ &indio_dev->dev,
+ &indio_dev->event_interface->
+ dev_attr_list);
+ kfree(postfix);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ }
+ ret = attrcount;
+error_ret:
+ return ret;
+}
+
+static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p, *n;
+ list_for_each_entry_safe(p, n,
+ &indio_dev->event_interface->
+ dev_attr_list, l) {
+ kfree(p->dev_attr.attr.name);
+ kfree(p);
+ }
+}
+
+static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
+{
+ int j, ret, attrcount = 0;
+
+ INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
+ /* Dynically created from the channels array */
+ for (j = 0; j < indio_dev->num_channels; j++) {
+ ret = iio_device_add_event_sysfs(indio_dev,
+ &indio_dev->channels[j]);
+ if (ret < 0)
+ goto error_clear_attrs;
+ attrcount += ret;
+ }
+ return attrcount;
+
+error_clear_attrs:
+ __iio_remove_event_config_attrs(indio_dev);
+
+ return ret;
+}
+
+static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
+{
+ int j;
+
+ for (j = 0; j < indio_dev->num_channels; j++)
+ if (indio_dev->channels[j].event_mask != 0)
+ return true;
+ return false;
+}
+
+static void iio_setup_ev_int(struct iio_event_interface *ev_int)
+{
+ INIT_KFIFO(ev_int->det_events);
+ init_waitqueue_head(&ev_int->wait);
+}
+
+static const char *iio_event_group_name = "events";
+int iio_device_register_eventset(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p;
+ int ret = 0, attrcount_orig = 0, attrcount, attrn;
+ struct attribute **attr;
+
+ if (!(indio_dev->info->event_attrs ||
+ iio_check_for_dynamic_events(indio_dev)))
+ return 0;
+
+ indio_dev->event_interface =
+ kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
+ if (indio_dev->event_interface == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ iio_setup_ev_int(indio_dev->event_interface);
+ if (indio_dev->info->event_attrs != NULL) {
+ attr = indio_dev->info->event_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ if (indio_dev->channels) {
+ ret = __iio_add_event_config_attrs(indio_dev);
+ if (ret < 0)
+ goto error_free_setup_event_lines;
+ attrcount += ret;
+ }
+
+ indio_dev->event_interface->group.name = iio_event_group_name;
+ indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->event_interface->group.attrs[0]),
+ GFP_KERNEL);
+ if (indio_dev->event_interface->group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_setup_event_lines;
+ }
+ if (indio_dev->info->event_attrs)
+ memcpy(indio_dev->event_interface->group.attrs,
+ indio_dev->info->event_attrs->attrs,
+ sizeof(indio_dev->event_interface->group.attrs[0])
+ *attrcount_orig);
+ attrn = attrcount_orig;
+ /* Add all elements from the list. */
+ list_for_each_entry(p,
+ &indio_dev->event_interface->dev_attr_list,
+ l)
+ indio_dev->event_interface->group.attrs[attrn++] =
+ &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] =
+ &indio_dev->event_interface->group;
+
+ return 0;
+
+error_free_setup_event_lines:
+ __iio_remove_event_config_attrs(indio_dev);
+ kfree(indio_dev->event_interface);
+error_ret:
+
+ return ret;
+}
+
+void iio_device_unregister_eventset(struct iio_dev *indio_dev)
+{
+ if (indio_dev->event_interface == NULL)
+ return;
+ __iio_remove_event_config_attrs(indio_dev);
+ kfree(indio_dev->event_interface->group.attrs);
+ kfree(indio_dev->event_interface);
+}
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index e1e9c06cde4a..9f3bd59c0e72 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -59,21 +59,6 @@ static struct attribute_group iio_kfifo_attribute_group = {
.name = "buffer",
};
-struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
-{
- struct iio_kfifo *kf;
-
- kf = kzalloc(sizeof *kf, GFP_KERNEL);
- if (!kf)
- return NULL;
- kf->update_needed = true;
- iio_buffer_init(&kf->buffer);
- kf->buffer.attrs = &iio_kfifo_attribute_group;
-
- return &kf->buffer;
-}
-EXPORT_SYMBOL(iio_kfifo_allocate);
-
static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
{
return r->bytes_per_datum;
@@ -104,12 +89,6 @@ static int iio_set_length_kfifo(struct iio_buffer *r, int length)
return 0;
}
-void iio_kfifo_free(struct iio_buffer *r)
-{
- kfree(iio_to_kfifo(r));
-}
-EXPORT_SYMBOL(iio_kfifo_free);
-
static int iio_store_to_kfifo(struct iio_buffer *r,
u8 *data,
s64 timestamp)
@@ -137,7 +116,7 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
return copied;
}
-const struct iio_buffer_access_funcs kfifo_access_funcs = {
+static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
.request_update = &iio_request_update_kfifo,
@@ -146,6 +125,27 @@ const struct iio_buffer_access_funcs kfifo_access_funcs = {
.get_length = &iio_get_length_kfifo,
.set_length = &iio_set_length_kfifo,
};
-EXPORT_SYMBOL(kfifo_access_funcs);
+
+struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
+{
+ struct iio_kfifo *kf;
+
+ kf = kzalloc(sizeof *kf, GFP_KERNEL);
+ if (!kf)
+ return NULL;
+ kf->update_needed = true;
+ iio_buffer_init(&kf->buffer);
+ kf->buffer.attrs = &iio_kfifo_attribute_group;
+ kf->buffer.access = &kfifo_access_funcs;
+
+ return &kf->buffer;
+}
+EXPORT_SYMBOL(iio_kfifo_allocate);
+
+void iio_kfifo_free(struct iio_buffer *r)
+{
+ kfree(iio_to_kfifo(r));
+}
+EXPORT_SYMBOL(iio_kfifo_free);
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/kfifo_buf.h b/drivers/staging/iio/kfifo_buf.h
index cc2bd9a1ccfe..9f7da016af04 100644
--- a/drivers/staging/iio/kfifo_buf.h
+++ b/drivers/staging/iio/kfifo_buf.h
@@ -3,8 +3,6 @@
#include "iio.h"
#include "buffer.h"
-extern const struct iio_buffer_access_funcs kfifo_access_funcs;
-
struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev);
void iio_kfifo_free(struct iio_buffer *r);
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 849d6a564afa..38ec52b65dfa 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -592,11 +592,18 @@ static const struct i2c_device_id isl29018_id[] = {
MODULE_DEVICE_TABLE(i2c, isl29018_id);
+static const struct of_device_id isl29018_of_match[] = {
+ { .compatible = "invn,isl29018", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, isl29018_of_match);
+
static struct i2c_driver isl29018_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "isl29018",
.owner = THIS_MODULE,
+ .of_match_table = isl29018_of_match,
},
.probe = isl29018_probe,
.remove = __devexit_p(isl29018_remove),
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 3158f12cb051..d5ddac3d8831 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -564,9 +564,17 @@ static const struct i2c_device_id ak8975_id[] = {
MODULE_DEVICE_TABLE(i2c, ak8975_id);
+static const struct of_device_id ak8975_of_match[] = {
+ { .compatible = "asahi-kasei,ak8975", },
+ { .compatible = "ak8975", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ak8975_of_match);
+
static struct i2c_driver ak8975_driver = {
.driver = {
.name = "ak8975",
+ .of_match_table = ak8975_of_match,
},
.probe = ak8975_probe,
.remove = __devexit_p(ak8975_remove),
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index f29f2b278fe4..c5c522bb69ab 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -144,8 +144,6 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
return ret;
}
- /* Effectively select the ring buffer implementation */
- indio_dev->buffer->access = &ring_sw_access_funcs;
indio_dev->setup_ops = &ade7758_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index 3e24ec455854..eeac0daf47bd 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -329,6 +329,16 @@ static struct attribute_group iio_ring_attribute_group = {
.name = "buffer",
};
+static const struct iio_buffer_access_funcs ring_sw_access_funcs = {
+ .store_to = &iio_store_to_sw_rb,
+ .read_first_n = &iio_read_first_n_sw_rb,
+ .request_update = &iio_request_update_sw_rb,
+ .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
+ .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
+ .get_length = &iio_get_length_sw_rb,
+ .set_length = &iio_set_length_sw_rb,
+};
+
struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
{
struct iio_buffer *buf;
@@ -341,6 +351,7 @@ struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
buf = &ring->buf;
iio_buffer_init(buf);
buf->attrs = &iio_ring_attribute_group;
+ buf->access = &ring_sw_access_funcs;
return buf;
}
@@ -352,16 +363,5 @@ void iio_sw_rb_free(struct iio_buffer *r)
}
EXPORT_SYMBOL(iio_sw_rb_free);
-const struct iio_buffer_access_funcs ring_sw_access_funcs = {
- .store_to = &iio_store_to_sw_rb,
- .read_first_n = &iio_read_first_n_sw_rb,
- .request_update = &iio_request_update_sw_rb,
- .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
- .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
- .get_length = &iio_get_length_sw_rb,
- .set_length = &iio_set_length_sw_rb,
-};
-EXPORT_SYMBOL(ring_sw_access_funcs);
-
MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index e6a6e2c40960..7556e2122367 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -25,11 +25,6 @@
#define _IIO_RING_SW_H_
#include "buffer.h"
-/**
- * ring_sw_access_funcs - access functions for a software ring buffer
- **/
-extern const struct iio_buffer_access_funcs ring_sw_access_funcs;
-
struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev);
void iio_sw_rb_free(struct iio_buffer *ring);
#endif /* _IIO_RING_SW_H_ */
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 127f95247749..c85c5b6bffb7 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -107,7 +107,7 @@ void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm)
Wait until unlinking of all currently active capture URBs has been
finished.
*/
-static void wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
+void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
{
int timeout = HZ;
unsigned int i;
@@ -134,7 +134,7 @@ static void wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm)
{
line6_unlink_audio_in_urbs(line6pcm);
- wait_clear_audio_in_urbs(line6pcm);
+ line6_wait_clear_audio_in_urbs(line6pcm);
}
/*
@@ -193,25 +193,6 @@ void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length)
}
}
-int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm)
-{
- /* We may be invoked multiple times in a row so allocate once only */
- if (line6pcm->buffer_in)
- return 0;
-
- line6pcm->buffer_in =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
-
- if (!line6pcm->buffer_in) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc capture buffer\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm)
{
kfree(line6pcm->buffer_in);
@@ -273,9 +254,9 @@ static void audio_in_callback(struct urb *urb)
line6pcm->prev_fsize = fsize;
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (!(line6pcm->flags & MASK_PCM_IMPULSE))
+ if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
#endif
- if (test_bit(BIT_PCM_ALSA_CAPTURE, &line6pcm->flags)
+ if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags)
&& (fsize > 0))
line6_capture_copy(line6pcm, fbuf, fsize);
}
@@ -291,9 +272,9 @@ static void audio_in_callback(struct urb *urb)
submit_audio_in_urb(line6pcm);
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (!(line6pcm->flags & MASK_PCM_IMPULSE))
+ if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE))
#endif
- if (test_bit(BIT_PCM_ALSA_CAPTURE, &line6pcm->flags))
+ if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags))
line6_capture_check_period(line6pcm, length);
}
}
@@ -341,17 +322,17 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
- if ((line6pcm->flags & MASK_CAPTURE) == 0) {
- ret = line6_alloc_capture_buffer(line6pcm);
+ ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
- if (ret < 0)
- return ret;
- }
+ if (ret < 0)
+ return ret;
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- if (ret < 0)
+ if (ret < 0) {
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
return ret;
+ }
line6pcm->period_in = params_period_bytes(hw_params);
return 0;
@@ -361,12 +342,7 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream)
{
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- if ((line6pcm->flags & MASK_CAPTURE) == 0) {
- line6_unlink_wait_clear_audio_in_urbs(line6pcm);
- line6_free_capture_buffer(line6pcm);
- }
-
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER);
return snd_pcm_lib_free_pages(substream);
}
@@ -380,7 +356,7 @@ int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_RESUME:
#endif
- err = line6_pcm_start(line6pcm, MASK_PCM_ALSA_CAPTURE);
+ err = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
if (err < 0)
return err;
@@ -391,7 +367,7 @@ int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_SUSPEND:
#endif
- err = line6_pcm_stop(line6pcm, MASK_PCM_ALSA_CAPTURE);
+ err = line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM);
if (err < 0)
return err;
diff --git a/drivers/staging/line6/capture.h b/drivers/staging/line6/capture.h
index 366cbaa7c88d..4157bcb598a9 100644
--- a/drivers/staging/line6/capture.h
+++ b/drivers/staging/line6/capture.h
@@ -19,7 +19,6 @@
extern struct snd_pcm_ops snd_line6_capture_ops;
-extern int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm);
extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
int fsize);
extern void line6_capture_check_period(struct snd_line6_pcm *line6pcm,
@@ -30,6 +29,7 @@ extern int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm
*line6pcm);
+extern void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm);
extern int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd);
#endif
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 6a1959e16e00..e8023afd3656 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -1346,7 +1346,7 @@ static void __exit line6_exit(void)
if (line6pcm == NULL)
continue;
- line6_pcm_stop(line6pcm, ~0);
+ line6_pcm_release(line6pcm, ~0);
}
usb_deregister(&line6_driver);
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 37675e66da81..90d2d4475cb4 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -52,9 +52,9 @@ static ssize_t pcm_set_impulse_volume(struct device *dev,
line6pcm->impulse_volume = value;
if (value > 0)
- line6_pcm_start(line6pcm, MASK_PCM_IMPULSE);
+ line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_IMPULSE);
else
- line6_pcm_stop(line6pcm, MASK_PCM_IMPULSE);
+ line6_pcm_release(line6pcm, LINE6_BITS_PCM_IMPULSE);
return count;
}
@@ -92,29 +92,43 @@ static bool test_flags(unsigned long flags0, unsigned long flags1,
return ((flags0 & mask) == 0) && ((flags1 & mask) != 0);
}
-int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
+int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels)
{
unsigned long flags_old =
__sync_fetch_and_or(&line6pcm->flags, channels);
unsigned long flags_new = flags_old | channels;
+ unsigned long flags_final = flags_old;
int err = 0;
line6pcm->prev_fbuf = NULL;
- if (test_flags(flags_old, flags_new, MASK_CAPTURE)) {
+ if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_BUFFER)) {
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (!line6pcm->buffer_in) {
+ line6pcm->buffer_in =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_in) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc capture buffer\n");
+ err = -ENOMEM;
+ goto pcm_acquire_error;
+ }
+
+ flags_final |= channels & LINE6_BITS_CAPTURE_BUFFER;
+ }
+ }
+
+ if (test_flags(flags_old, flags_new, LINE6_BITS_CAPTURE_STREAM)) {
/*
Waiting for completion of active URBs in the stop handler is
a bug, we therefore report an error if capturing is restarted
too soon.
*/
- if (line6pcm->active_urb_in | line6pcm->unlink_urb_in)
+ if (line6pcm->active_urb_in | line6pcm->unlink_urb_in) {
+ dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
return -EBUSY;
-
- if (!(flags_new & MASK_PCM_ALSA_CAPTURE)) {
- err = line6_alloc_capture_buffer(line6pcm);
-
- if (err < 0)
- goto pcm_start_error;
}
line6pcm->count_in = 0;
@@ -122,55 +136,78 @@ int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
err = line6_submit_audio_in_all_urbs(line6pcm);
if (err < 0)
- goto pcm_start_error;
+ goto pcm_acquire_error;
+
+ flags_final |= channels & LINE6_BITS_CAPTURE_STREAM;
}
- if (test_flags(flags_old, flags_new, MASK_PLAYBACK)) {
- /*
- See comment above regarding PCM restart.
- */
- if (line6pcm->active_urb_out | line6pcm->unlink_urb_out)
- return -EBUSY;
+ if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_BUFFER)) {
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (!line6pcm->buffer_out) {
+ line6pcm->buffer_out =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_out) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc playback buffer\n");
+ err = -ENOMEM;
+ goto pcm_acquire_error;
+ }
- if (!(flags_new & MASK_PCM_ALSA_PLAYBACK)) {
- err = line6_alloc_playback_buffer(line6pcm);
+ flags_final |= channels & LINE6_BITS_PLAYBACK_BUFFER;
+ }
+ }
- if (err < 0)
- goto pcm_start_error;
+ if (test_flags(flags_old, flags_new, LINE6_BITS_PLAYBACK_STREAM)) {
+ /*
+ See comment above regarding PCM restart.
+ */
+ if (line6pcm->active_urb_out | line6pcm->unlink_urb_out) {
+ dev_err(line6pcm->line6->ifcdev, "Device not yet ready\n");
+ return -EBUSY;
}
line6pcm->count_out = 0;
err = line6_submit_audio_out_all_urbs(line6pcm);
if (err < 0)
- goto pcm_start_error;
+ goto pcm_acquire_error;
+
+ flags_final |= channels & LINE6_BITS_PLAYBACK_STREAM;
}
return 0;
-pcm_start_error:
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
+pcm_acquire_error:
+ /*
+ If not all requested resources/streams could be obtained, release
+ those which were successfully obtained (if any).
+ */
+ line6_pcm_release(line6pcm, flags_final & channels);
return err;
}
-int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
+int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels)
{
unsigned long flags_old =
__sync_fetch_and_and(&line6pcm->flags, ~channels);
unsigned long flags_new = flags_old & ~channels;
- if (test_flags(flags_new, flags_old, MASK_CAPTURE)) {
+ if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_STREAM))
line6_unlink_audio_in_urbs(line6pcm);
- if (!(flags_old & MASK_PCM_ALSA_CAPTURE))
- line6_free_capture_buffer(line6pcm);
+ if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_BUFFER)) {
+ line6_wait_clear_audio_in_urbs(line6pcm);
+ line6_free_capture_buffer(line6pcm);
}
- if (test_flags(flags_new, flags_old, MASK_PLAYBACK)) {
+ if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_STREAM))
line6_unlink_audio_out_urbs(line6pcm);
- if (!(flags_old & MASK_PCM_ALSA_PLAYBACK))
- line6_free_playback_buffer(line6pcm);
+ if (test_flags(flags_new, flags_old, LINE6_BITS_PLAYBACK_BUFFER)) {
+ line6_wait_clear_audio_out_urbs(line6pcm);
+ line6_free_playback_buffer(line6pcm);
}
return 0;
@@ -185,7 +222,7 @@ int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd)
unsigned long flags;
spin_lock_irqsave(&line6pcm->lock_trigger, flags);
- clear_bit(BIT_PREPARED, &line6pcm->flags);
+ clear_bit(LINE6_INDEX_PREPARED, &line6pcm->flags);
snd_pcm_group_for_each_entry(s, substream) {
switch (s->stream) {
@@ -498,13 +535,13 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
switch (substream->stream) {
case SNDRV_PCM_STREAM_PLAYBACK:
- if ((line6pcm->flags & MASK_PLAYBACK) == 0)
+ if ((line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM) == 0)
line6_unlink_wait_clear_audio_out_urbs(line6pcm);
break;
case SNDRV_PCM_STREAM_CAPTURE:
- if ((line6pcm->flags & MASK_CAPTURE) == 0)
+ if ((line6pcm->flags & LINE6_BITS_CAPTURE_STREAM) == 0)
line6_unlink_wait_clear_audio_in_urbs(line6pcm);
break;
@@ -513,7 +550,7 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
MISSING_CASE;
}
- if (!test_and_set_bit(BIT_PREPARED, &line6pcm->flags)) {
+ if (!test_and_set_bit(LINE6_INDEX_PREPARED, &line6pcm->flags)) {
line6pcm->count_out = 0;
line6pcm->pos_out = 0;
line6pcm->pos_out_done = 0;
diff --git a/drivers/staging/line6/pcm.h b/drivers/staging/line6/pcm.h
index 55d8297dd3d9..5210ec8dbe16 100644
--- a/drivers/staging/line6/pcm.h
+++ b/drivers/staging/line6/pcm.h
@@ -46,57 +46,131 @@
(line6pcm->pcm->streams[stream].substream)
/*
- PCM mode bits and masks.
- "ALSA": operations triggered by applications via ALSA
- "MONITOR": software monitoring
- "IMPULSE": optional impulse response operation
+ PCM mode bits.
+
+ There are several features of the Line6 USB driver which require PCM
+ data to be exchanged with the device:
+ *) PCM playback and capture via ALSA
+ *) software monitoring (for devices without hardware monitoring)
+ *) optional impulse response measurement
+ However, from the device's point of view, there is just a single
+ capture and playback stream, which must be shared between these
+ subsystems. It is therefore necessary to maintain the state of the
+ subsystems with respect to PCM usage. We define several constants of
+ the form LINE6_BIT_PCM_<subsystem>_<direction>_<resource> with the
+ following meanings:
+ *) <subsystem> is one of
+ -) ALSA: PCM playback and capture via ALSA
+ -) MONITOR: software monitoring
+ -) IMPULSE: optional impulse response measurement
+ *) <direction> is one of
+ -) PLAYBACK: audio output (from host to device)
+ -) CAPTURE: audio input (from device to host)
+ *) <resource> is one of
+ -) BUFFER: buffer required by PCM data stream
+ -) STREAM: actual PCM data stream
+
+ The subsystems call line6_pcm_acquire() to acquire the (shared)
+ resources needed for a particular operation (e.g., allocate the buffer
+ for ALSA playback or start the capture stream for software monitoring).
+ When a resource is no longer needed, it is released by calling
+ line6_pcm_release(). Buffer allocation and stream startup are handled
+ separately to allow the ALSA kernel driver to perform them at
+ appropriate places (since the callback which starts a PCM stream is not
+ allowed to sleep).
*/
enum {
- /* individual bits: */
- BIT_PCM_ALSA_PLAYBACK,
- BIT_PCM_ALSA_CAPTURE,
- BIT_PCM_MONITOR_PLAYBACK,
- BIT_PCM_MONITOR_CAPTURE,
+ /* individual bit indices: */
+ LINE6_INDEX_PCM_ALSA_PLAYBACK_BUFFER,
+ LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM,
+ LINE6_INDEX_PCM_ALSA_CAPTURE_BUFFER,
+ LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM,
+ LINE6_INDEX_PCM_MONITOR_PLAYBACK_BUFFER,
+ LINE6_INDEX_PCM_MONITOR_PLAYBACK_STREAM,
+ LINE6_INDEX_PCM_MONITOR_CAPTURE_BUFFER,
+ LINE6_INDEX_PCM_MONITOR_CAPTURE_STREAM,
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- BIT_PCM_IMPULSE_PLAYBACK,
- BIT_PCM_IMPULSE_CAPTURE,
+ LINE6_INDEX_PCM_IMPULSE_PLAYBACK_BUFFER,
+ LINE6_INDEX_PCM_IMPULSE_PLAYBACK_STREAM,
+ LINE6_INDEX_PCM_IMPULSE_CAPTURE_BUFFER,
+ LINE6_INDEX_PCM_IMPULSE_CAPTURE_STREAM,
#endif
- BIT_PAUSE_PLAYBACK,
- BIT_PREPARED,
-
- /* individual masks: */
-/* *INDENT-OFF* */
- MASK_PCM_ALSA_PLAYBACK = 1 << BIT_PCM_ALSA_PLAYBACK,
- MASK_PCM_ALSA_CAPTURE = 1 << BIT_PCM_ALSA_CAPTURE,
- MASK_PCM_MONITOR_PLAYBACK = 1 << BIT_PCM_MONITOR_PLAYBACK,
- MASK_PCM_MONITOR_CAPTURE = 1 << BIT_PCM_MONITOR_CAPTURE,
+ LINE6_INDEX_PAUSE_PLAYBACK,
+ LINE6_INDEX_PREPARED,
+
+ /* individual bit masks: */
+ LINE6_BIT(PCM_ALSA_PLAYBACK_BUFFER),
+ LINE6_BIT(PCM_ALSA_PLAYBACK_STREAM),
+ LINE6_BIT(PCM_ALSA_CAPTURE_BUFFER),
+ LINE6_BIT(PCM_ALSA_CAPTURE_STREAM),
+ LINE6_BIT(PCM_MONITOR_PLAYBACK_BUFFER),
+ LINE6_BIT(PCM_MONITOR_PLAYBACK_STREAM),
+ LINE6_BIT(PCM_MONITOR_CAPTURE_BUFFER),
+ LINE6_BIT(PCM_MONITOR_CAPTURE_STREAM),
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- MASK_PCM_IMPULSE_PLAYBACK = 1 << BIT_PCM_IMPULSE_PLAYBACK,
- MASK_PCM_IMPULSE_CAPTURE = 1 << BIT_PCM_IMPULSE_CAPTURE,
+ LINE6_BIT(PCM_IMPULSE_PLAYBACK_BUFFER),
+ LINE6_BIT(PCM_IMPULSE_PLAYBACK_STREAM),
+ LINE6_BIT(PCM_IMPULSE_CAPTURE_BUFFER),
+ LINE6_BIT(PCM_IMPULSE_CAPTURE_STREAM),
#endif
- MASK_PAUSE_PLAYBACK = 1 << BIT_PAUSE_PLAYBACK,
- MASK_PREPARED = 1 << BIT_PREPARED,
-/* *INDENT-ON* */
+ LINE6_BIT(PAUSE_PLAYBACK),
+ LINE6_BIT(PREPARED),
- /* combined masks (by operation): */
- MASK_PCM_ALSA = MASK_PCM_ALSA_PLAYBACK | MASK_PCM_ALSA_CAPTURE,
- MASK_PCM_MONITOR = MASK_PCM_MONITOR_PLAYBACK | MASK_PCM_MONITOR_CAPTURE,
+ /* combined bit masks (by operation): */
+ LINE6_BITS_PCM_ALSA_BUFFER =
+ LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER,
+
+ LINE6_BITS_PCM_ALSA_STREAM =
+ LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_ALSA_CAPTURE_STREAM,
+
+ LINE6_BITS_PCM_MONITOR =
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
+
+#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
+ LINE6_BITS_PCM_IMPULSE =
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM,
+#endif
+
+ /* combined bit masks (by direction): */
+ LINE6_BITS_PLAYBACK_BUFFER =
+#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_BUFFER |
+#endif
+ LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER |
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_BUFFER ,
+
+ LINE6_BITS_PLAYBACK_STREAM =
+#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
+ LINE6_BIT_PCM_IMPULSE_PLAYBACK_STREAM |
+#endif
+ LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM |
+ LINE6_BIT_PCM_MONITOR_PLAYBACK_STREAM ,
+
+ LINE6_BITS_CAPTURE_BUFFER =
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- MASK_PCM_IMPULSE = MASK_PCM_IMPULSE_PLAYBACK | MASK_PCM_IMPULSE_CAPTURE,
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_BUFFER |
#endif
+ LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_BUFFER ,
- /* combined masks (by direction): */
+ LINE6_BITS_CAPTURE_STREAM =
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- MASK_PLAYBACK =
- MASK_PCM_ALSA_PLAYBACK | MASK_PCM_MONITOR_PLAYBACK |
- MASK_PCM_IMPULSE_PLAYBACK,
- MASK_CAPTURE =
- MASK_PCM_ALSA_CAPTURE | MASK_PCM_MONITOR_CAPTURE |
- MASK_PCM_IMPULSE_CAPTURE
-#else
- MASK_PLAYBACK = MASK_PCM_ALSA_PLAYBACK | MASK_PCM_MONITOR_PLAYBACK,
- MASK_CAPTURE = MASK_PCM_ALSA_CAPTURE | MASK_PCM_MONITOR_CAPTURE
+ LINE6_BIT_PCM_IMPULSE_CAPTURE_STREAM |
#endif
+ LINE6_BIT_PCM_ALSA_CAPTURE_STREAM |
+ LINE6_BIT_PCM_MONITOR_CAPTURE_STREAM,
+
+ LINE6_BITS_STREAM =
+ LINE6_BITS_PLAYBACK_STREAM |
+ LINE6_BITS_CAPTURE_STREAM
};
struct line6_pcm_properties {
@@ -290,7 +364,7 @@ struct snd_line6_pcm {
#endif
/**
- Several status bits (see BIT_*).
+ Several status bits (see LINE6_BIT_*).
*/
unsigned long flags;
@@ -302,16 +376,7 @@ extern int line6_init_pcm(struct usb_line6 *line6,
extern int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd);
extern int snd_line6_prepare(struct snd_pcm_substream *substream);
extern void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm);
-extern int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels);
-extern int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels);
-
-#define PRINT_FRAME_DIFF(op) { \
- static int diff_prev = 1000; \
- int diff = line6pcm->last_frame_out - line6pcm->last_frame_in; \
- if ((diff != diff_prev) && (abs(diff) < 100)) { \
- printk(KERN_INFO "%s frame diff = %d\n", op, diff); \
- diff_prev = diff; \
- } \
-}
+extern int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels);
+extern int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels);
#endif
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 4152db2328b7..a0ab9d0493fa 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -166,7 +166,7 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
struct usb_iso_packet_descriptor *fout =
&urb_out->iso_frame_desc[i];
- if (line6pcm->flags & MASK_CAPTURE)
+ if (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM)
fsize = line6pcm->prev_fsize;
if (fsize == 0) {
@@ -196,8 +196,8 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
urb_out->transfer_buffer_length = urb_size;
urb_out->context = line6pcm;
- if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags) &&
- !test_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags) &&
+ !test_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime =
get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK)->runtime;
@@ -238,10 +238,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
if (line6pcm->prev_fbuf != NULL) {
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
- if (line6pcm->flags & MASK_PCM_IMPULSE) {
+ if (line6pcm->flags & LINE6_BITS_PCM_IMPULSE) {
create_impulse_test_signal(line6pcm, urb_out,
bytes_per_frame);
- if (line6pcm->flags & MASK_PCM_ALSA_CAPTURE) {
+ if (line6pcm->flags & LINE6_BIT_PCM_ALSA_CAPTURE_STREAM) {
line6_capture_copy(line6pcm,
urb_out->transfer_buffer,
urb_out->
@@ -254,8 +254,8 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
if (!
(line6pcm->line6->
properties->capabilities & LINE6_BIT_HWMON)
-&& (line6pcm->flags & MASK_PLAYBACK)
-&& (line6pcm->flags & MASK_CAPTURE))
+ && (line6pcm->flags & LINE6_BITS_PLAYBACK_STREAM)
+ && (line6pcm->flags & LINE6_BITS_CAPTURE_STREAM))
add_monitor_signal(urb_out, line6pcm->prev_fbuf,
line6pcm->volume_monitor,
bytes_per_frame);
@@ -321,7 +321,7 @@ void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm)
/*
Wait until unlinking of all currently active playback URBs has been finished.
*/
-static void wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
+void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
{
int timeout = HZ;
unsigned int i;
@@ -348,26 +348,7 @@ static void wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
{
line6_unlink_audio_out_urbs(line6pcm);
- wait_clear_audio_out_urbs(line6pcm);
-}
-
-int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm)
-{
- /* We may be invoked multiple times in a row so allocate once only */
- if (line6pcm->buffer_out)
- return 0;
-
- line6pcm->buffer_out =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
-
- if (!line6pcm->buffer_out) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc playback buffer\n");
- return -ENOMEM;
- }
-
- return 0;
+ line6_wait_clear_audio_out_urbs(line6pcm);
}
void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm)
@@ -407,7 +388,7 @@ static void audio_out_callback(struct urb *urb)
spin_lock_irqsave(&line6pcm->lock_audio_out, flags);
- if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime = substream->runtime;
line6pcm->pos_out_done +=
length / line6pcm->properties->bytes_per_frame;
@@ -432,7 +413,7 @@ static void audio_out_callback(struct urb *urb)
if (!shutdown) {
submit_audio_out_urb(line6pcm);
- if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags)) {
+ if (test_bit(LINE6_INDEX_PCM_ALSA_PLAYBACK_STREAM, &line6pcm->flags)) {
line6pcm->bytes_out += length;
if (line6pcm->bytes_out >= line6pcm->period_out) {
line6pcm->bytes_out %= line6pcm->period_out;
@@ -484,17 +465,17 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
- if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
- ret = line6_alloc_playback_buffer(line6pcm);
+ ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
- if (ret < 0)
- return ret;
- }
+ if (ret < 0)
+ return ret;
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- if (ret < 0)
+ if (ret < 0) {
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
return ret;
+ }
line6pcm->period_out = params_period_bytes(hw_params);
return 0;
@@ -504,12 +485,7 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
static int snd_line6_playback_hw_free(struct snd_pcm_substream *substream)
{
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
-
- if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
- line6_unlink_wait_clear_audio_out_urbs(line6pcm);
- line6_free_playback_buffer(line6pcm);
- }
-
+ line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_BUFFER);
return snd_pcm_lib_free_pages(substream);
}
@@ -523,7 +499,7 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_RESUME:
#endif
- err = line6_pcm_start(line6pcm, MASK_PCM_ALSA_PLAYBACK);
+ err = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
if (err < 0)
return err;
@@ -534,7 +510,7 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
#ifdef CONFIG_PM
case SNDRV_PCM_TRIGGER_SUSPEND:
#endif
- err = line6_pcm_stop(line6pcm, MASK_PCM_ALSA_PLAYBACK);
+ err = line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_PLAYBACK_STREAM);
if (err < 0)
return err;
@@ -542,11 +518,11 @@ int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd)
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- set_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags);
+ set_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- clear_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags);
+ clear_bit(LINE6_INDEX_PAUSE_PLAYBACK, &line6pcm->flags);
break;
default:
diff --git a/drivers/staging/line6/playback.h b/drivers/staging/line6/playback.h
index 02487ff24538..743bd6f74c57 100644
--- a/drivers/staging/line6/playback.h
+++ b/drivers/staging/line6/playback.h
@@ -29,13 +29,13 @@
extern struct snd_pcm_ops snd_line6_playback_ops;
-extern int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm
*line6pcm);
+extern void line6_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern int snd_line6_playback_trigger(struct snd_line6_pcm *line6pcm, int cmd);
#endif
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index f31057830dbc..b754f69a29c4 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -207,9 +207,9 @@ static int snd_toneport_monitor_put(struct snd_kcontrol *kcontrol,
line6pcm->volume_monitor = ucontrol->value.integer.value[0];
if (line6pcm->volume_monitor > 0)
- line6_pcm_start(line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_acquire(line6pcm, LINE6_BITS_PCM_MONITOR);
else
- line6_pcm_stop(line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
return 1;
}
@@ -264,7 +264,7 @@ static void toneport_start_pcm(unsigned long arg)
{
struct usb_line6_toneport *toneport = (struct usb_line6_toneport *)arg;
struct usb_line6 *line6 = &toneport->line6;
- line6_pcm_start(line6->line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_acquire(line6->line6pcm, LINE6_BITS_PCM_MONITOR);
}
/* control definition */
@@ -320,7 +320,9 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
/* initialize source select: */
switch (usbdev->descriptor.idProduct) {
case LINE6_DEVID_TONEPORT_UX1:
+ case LINE6_DEVID_TONEPORT_UX2:
case LINE6_DEVID_PODSTUDIO_UX1:
+ case LINE6_DEVID_PODSTUDIO_UX2:
toneport_send_cmd(usbdev,
toneport_source_info[toneport->source].code,
0x0000);
@@ -363,7 +365,9 @@ static int toneport_try_init(struct usb_interface *interface,
/* register source select control: */
switch (usbdev->descriptor.idProduct) {
case LINE6_DEVID_TONEPORT_UX1:
+ case LINE6_DEVID_TONEPORT_UX2:
case LINE6_DEVID_PODSTUDIO_UX1:
+ case LINE6_DEVID_PODSTUDIO_UX2:
err =
snd_ctl_add(line6->card,
snd_ctl_new1(&toneport_control_source,
@@ -442,7 +446,7 @@ void line6_toneport_disconnect(struct usb_interface *interface)
struct snd_line6_pcm *line6pcm = toneport->line6.line6pcm;
if (line6pcm != NULL) {
- line6_pcm_stop(line6pcm, MASK_PCM_MONITOR);
+ line6_pcm_release(line6pcm, LINE6_BITS_PCM_MONITOR);
line6_pcm_disconnect(line6pcm);
}
}
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index aff9e5caea46..353d59d77b04 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -39,31 +39,29 @@
#define LINE6_DEVID_TONEPORT_UX2 0x4142
#define LINE6_DEVID_VARIAX 0x534d
-enum {
- LINE6_ID_BASSPODXT,
- LINE6_ID_BASSPODXTLIVE,
- LINE6_ID_BASSPODXTPRO,
- LINE6_ID_GUITARPORT,
- LINE6_ID_POCKETPOD,
- LINE6_ID_PODHD300,
- LINE6_ID_PODHD500,
- LINE6_ID_PODSTUDIO_GX,
- LINE6_ID_PODSTUDIO_UX1,
- LINE6_ID_PODSTUDIO_UX2,
- LINE6_ID_PODX3,
- LINE6_ID_PODX3LIVE,
- LINE6_ID_PODXT,
- LINE6_ID_PODXTLIVE,
- LINE6_ID_PODXTPRO,
- LINE6_ID_TONEPORT_GX,
- LINE6_ID_TONEPORT_UX1,
- LINE6_ID_TONEPORT_UX2,
- LINE6_ID_VARIAX
-};
-
-#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_ID_ ## x
+#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_INDEX_ ## x
enum {
+ LINE6_INDEX_BASSPODXT,
+ LINE6_INDEX_BASSPODXTLIVE,
+ LINE6_INDEX_BASSPODXTPRO,
+ LINE6_INDEX_GUITARPORT,
+ LINE6_INDEX_POCKETPOD,
+ LINE6_INDEX_PODHD300,
+ LINE6_INDEX_PODHD500,
+ LINE6_INDEX_PODSTUDIO_GX,
+ LINE6_INDEX_PODSTUDIO_UX1,
+ LINE6_INDEX_PODSTUDIO_UX2,
+ LINE6_INDEX_PODX3,
+ LINE6_INDEX_PODX3LIVE,
+ LINE6_INDEX_PODXT,
+ LINE6_INDEX_PODXTLIVE,
+ LINE6_INDEX_PODXTPRO,
+ LINE6_INDEX_TONEPORT_GX,
+ LINE6_INDEX_TONEPORT_UX1,
+ LINE6_INDEX_TONEPORT_UX2,
+ LINE6_INDEX_VARIAX,
+
LINE6_BIT(BASSPODXT),
LINE6_BIT(BASSPODXTLIVE),
LINE6_BIT(BASSPODXTPRO),
diff --git a/drivers/staging/media/easycap/easycap_main.c b/drivers/staging/media/easycap/easycap_main.c
index 8ff5f38ea196..3d439b790cc6 100644
--- a/drivers/staging/media/easycap/easycap_main.c
+++ b/drivers/staging/media/easycap/easycap_main.c
@@ -3825,6 +3825,7 @@ static int easycap_usb_probe(struct usb_interface *intf,
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
if (!pdata_urb) {
+ usb_free_urb(purb);
SAM("ERROR: Could not allocate struct data_urb.\n");
return -ENOMEM;
}
diff --git a/drivers/staging/mei/TODO b/drivers/staging/mei/TODO
index 7d9a13b0f2dd..fc266018355e 100644
--- a/drivers/staging/mei/TODO
+++ b/drivers/staging/mei/TODO
@@ -3,5 +3,8 @@ TODO:
Upon Unstaging:
- move mei.h to include/linux/mei.h
- Documentation/ioctl/ioctl-number.txt
+ - move mei.txt under Documentation/mei/
+ - move mei-amt-version.c under Documentation/mei
+ - add hostprogs-y for mei-amt-version.c
- drop mei_version.h
- Updated MAINTAINERS
diff --git a/drivers/staging/mei/hw.h b/drivers/staging/mei/hw.h
index 9b9008cb6938..b08c90193f73 100644
--- a/drivers/staging/mei/hw.h
+++ b/drivers/staging/mei/hw.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 4ac3696883cb..393da4bb2022 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
index eb5df7fc2269..484be56a6594 100644
--- a/drivers/staging/mei/interface.c
+++ b/drivers/staging/mei/interface.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -215,26 +215,17 @@ int mei_count_full_read_slots(struct mei_device *dev)
* @buffer: message buffer will be written
* @buffer_length: message size will be read
*/
-void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer, unsigned long buffer_length)
+void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
+ unsigned long buffer_length)
{
- u32 i = 0;
- unsigned char temp_buf[sizeof(u32)];
+ u32 *reg_buf = (u32 *)buffer;
- while (buffer_length >= sizeof(u32)) {
- ((u32 *) buffer)[i] = mei_mecbrw_read(dev);
-
- dev_dbg(&dev->pdev->dev,
- "buffer[%d]= %d\n",
- i, ((u32 *) buffer)[i]);
-
- i++;
- buffer_length -= sizeof(u32);
- }
+ for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+ *reg_buf++ = mei_mecbrw_read(dev);
if (buffer_length > 0) {
- *((u32 *) &temp_buf) = mei_mecbrw_read(dev);
- memcpy(&buffer[i * 4], temp_buf, buffer_length);
+ u32 reg = mei_mecbrw_read(dev);
+ memcpy(reg_buf, &reg, buffer_length);
}
dev->host_hw_state |= H_IG;
diff --git a/drivers/staging/mei/interface.h b/drivers/staging/mei/interface.h
index aeae511419c7..e5bfb18865fc 100644
--- a/drivers/staging/mei/interface.h
+++ b/drivers/staging/mei/interface.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -33,7 +33,8 @@
void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer, unsigned long buffer_length);
+ unsigned char *buffer,
+ unsigned long buffer_length);
int mei_write_message(struct mei_device *dev,
struct mei_msg_hdr *header,
diff --git a/drivers/staging/mei/interrupt.c b/drivers/staging/mei/interrupt.c
index 3544fee34e48..3c21c93af8ea 100644
--- a/drivers/staging/mei/interrupt.c
+++ b/drivers/staging/mei/interrupt.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -123,8 +123,7 @@ static int mei_irq_thread_read_amthi_message(struct mei_io_list *complete_list,
BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
- buffer = (unsigned char *) (dev->iamthif_msg_buf +
- dev->iamthif_msg_buf_index);
+ buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index;
BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
mei_read_slots(dev, buffer, mei_hdr->length);
@@ -206,9 +205,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
cl = (struct mei_cl *)cb_pos->file_private;
if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
cl->reading_state = MEI_READING;
- buffer = (unsigned char *)
- (cb_pos->response_buffer.data +
- cb_pos->information);
+ buffer = cb_pos->response_buffer.data + cb_pos->information;
if (cb_pos->response_buffer.size <
mei_hdr->length + cb_pos->information) {
@@ -247,8 +244,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
quit:
dev_dbg(&dev->pdev->dev, "message read\n");
if (!buffer) {
- mei_read_slots(dev, (unsigned char *) dev->rd_msg_buf,
- mei_hdr->length);
+ mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
*(u32 *) dev->rd_msg_buf);
}
@@ -632,13 +628,11 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
struct hbm_host_stop_request *host_stop_req;
int res;
- unsigned char *buffer;
/* read the message to our buffer */
- buffer = (unsigned char *) dev->rd_msg_buf;
BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
- mei_read_slots(dev, buffer, mei_hdr->length);
- mei_msg = (struct mei_bus_message *) buffer;
+ mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
+ mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
switch (*(u8 *) mei_msg) {
case HOST_START_RES_CMD:
@@ -1423,7 +1417,7 @@ void mei_timer(struct work_struct *work)
if (dev->iamthif_stall_timer) {
if (--dev->iamthif_stall_timer == 0) {
- dev_dbg(&dev->pdev->dev, "reseting because of hang to amthi.\n");
+ dev_dbg(&dev->pdev->dev, "resetting because of hang to amthi.\n");
mei_reset(dev, 1);
dev->iamthif_msg_buf_size = 0;
dev->iamthif_msg_buf_index = 0;
diff --git a/drivers/staging/mei/iorw.c b/drivers/staging/mei/iorw.c
index 0752ead4269a..a3fbac9546f1 100644
--- a/drivers/staging/mei/iorw.c
+++ b/drivers/staging/mei/iorw.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/staging/mei/main.c b/drivers/staging/mei/main.c
index 1e1a9f996e7c..22afc92319b3 100644
--- a/drivers/staging/mei/main.c
+++ b/drivers/staging/mei/main.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/staging/mei/mei-amt-version.c b/drivers/staging/mei/mei-amt-version.c
new file mode 100644
index 000000000000..be04934cd329
--- /dev/null
+++ b/drivers/staging/mei/mei-amt-version.c
@@ -0,0 +1,479 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bits/wordsize.h>
+#include "mei.h"
+
+/*****************************************************************************
+ * Intel Management Enginin Interface
+ *****************************************************************************/
+
+#define mei_msg(_me, fmt, ARGS...) do { \
+ if (_me->verbose) \
+ fprintf(stderr, fmt, ##ARGS); \
+} while (0)
+
+#define mei_err(_me, fmt, ARGS...) do { \
+ fprintf(stderr, "Error: " fmt, ##ARGS); \
+} while (0)
+
+struct mei {
+ uuid_le guid;
+ bool initialized;
+ bool verbose;
+ unsigned int buf_size;
+ unsigned char prot_ver;
+ int fd;
+};
+
+static void mei_deinit(struct mei *cl)
+{
+ if (cl->fd != -1)
+ close(cl->fd);
+ cl->fd = -1;
+ cl->buf_size = 0;
+ cl->prot_ver = 0;
+ cl->initialized = false;
+}
+
+static bool mei_init(struct mei *me, const uuid_le *guid,
+ unsigned char req_protocol_version, bool verbose)
+{
+ int result;
+ struct mei_client *cl;
+ struct mei_connect_client_data data;
+
+ mei_deinit(me);
+
+ me->verbose = verbose;
+
+ me->fd = open("/dev/mei", O_RDWR);
+ if (me->fd == -1) {
+ mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
+ goto err;
+ }
+ memcpy(&me->guid, guid, sizeof(*guid));
+ memset(&data, 0, sizeof(data));
+ me->initialized = true;
+
+ memcpy(&data.in_client_uuid, &me->guid, sizeof(me->guid));
+ result = ioctl(me->fd, IOCTL_MEI_CONNECT_CLIENT, &data);
+ if (result) {
+ mei_err(me, "IOCTL_MEI_CONNECT_CLIENT receive message. err=%d\n", result);
+ goto err;
+ }
+ cl = &data.out_client_properties;
+ mei_msg(me, "max_message_length %d\n", cl->max_msg_length);
+ mei_msg(me, "protocol_version %d\n", cl->protocol_version);
+
+ if ((req_protocol_version > 0) &&
+ (cl->protocol_version != req_protocol_version)) {
+ mei_err(me, "Intel MEI protocol version not supported\n");
+ goto err;
+ }
+
+ me->buf_size = cl->max_msg_length;
+ me->prot_ver = cl->protocol_version;
+
+ return true;
+err:
+ mei_deinit(me);
+ return false;
+}
+
+static ssize_t mei_recv_msg(struct mei *me, unsigned char *buffer,
+ ssize_t len, unsigned long timeout)
+{
+ ssize_t rc;
+
+ mei_msg(me, "call read length = %zd\n", len);
+
+ rc = read(me->fd, buffer, len);
+ if (rc < 0) {
+ mei_err(me, "read failed with status %zd %s\n",
+ rc, strerror(errno));
+ mei_deinit(me);
+ } else {
+ mei_msg(me, "read succeeded with result %zd\n", rc);
+ }
+ return rc;
+}
+
+static ssize_t mei_send_msg(struct mei *me, const unsigned char *buffer,
+ ssize_t len, unsigned long timeout)
+{
+ struct timeval tv;
+ ssize_t written;
+ ssize_t rc;
+ fd_set set;
+
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000000;
+
+ mei_msg(me, "call write length = %zd\n", len);
+
+ written = write(me->fd, buffer, len);
+ if (written < 0) {
+ rc = -errno;
+ mei_err(me, "write failed with status %zd %s\n",
+ written, strerror(errno));
+ goto out;
+ }
+
+ FD_ZERO(&set);
+ FD_SET(me->fd, &set);
+ rc = select(me->fd + 1 , &set, NULL, NULL, &tv);
+ if (rc > 0 && FD_ISSET(me->fd, &set)) {
+ mei_msg(me, "write success\n");
+ } else if (rc == 0) {
+ mei_err(me, "write failed on timeout with status\n");
+ goto out;
+ } else { /* rc < 0 */
+ mei_err(me, "write failed on select with status %zd\n", rc);
+ goto out;
+ }
+
+ rc = written;
+out:
+ if (rc < 0)
+ mei_deinit(me);
+
+ return rc;
+}
+
+/***************************************************************************
+ * Intel Advanced Management Technolgy ME Client
+ ***************************************************************************/
+
+#define AMT_MAJOR_VERSION 1
+#define AMT_MINOR_VERSION 1
+
+#define AMT_STATUS_SUCCESS 0x0
+#define AMT_STATUS_INTERNAL_ERROR 0x1
+#define AMT_STATUS_NOT_READY 0x2
+#define AMT_STATUS_INVALID_AMT_MODE 0x3
+#define AMT_STATUS_INVALID_MESSAGE_LENGTH 0x4
+
+#define AMT_STATUS_HOST_IF_EMPTY_RESPONSE 0x4000
+#define AMT_STATUS_SDK_RESOURCES 0x1004
+
+
+#define AMT_BIOS_VERSION_LEN 65
+#define AMT_VERSIONS_NUMBER 50
+#define AMT_UNICODE_STRING_LEN 20
+
+struct amt_unicode_string {
+ uint16_t length;
+ char string[AMT_UNICODE_STRING_LEN];
+} __attribute__((packed));
+
+struct amt_version_type {
+ struct amt_unicode_string description;
+ struct amt_unicode_string version;
+} __attribute__((packed));
+
+struct amt_version {
+ uint8_t major;
+ uint8_t minor;
+} __attribute__((packed));
+
+struct amt_code_versions {
+ uint8_t bios[AMT_BIOS_VERSION_LEN];
+ uint32_t count;
+ struct amt_version_type versions[AMT_VERSIONS_NUMBER];
+} __attribute__((packed));
+
+/***************************************************************************
+ * Intel Advanced Management Technolgy Host Interface
+ ***************************************************************************/
+
+struct amt_host_if_msg_header {
+ struct amt_version version;
+ uint16_t _reserved;
+ uint32_t command;
+ uint32_t length;
+} __attribute__((packed));
+
+struct amt_host_if_resp_header {
+ struct amt_host_if_msg_header header;
+ uint32_t status;
+ unsigned char data[0];
+} __attribute__((packed));
+
+const uuid_le MEI_IAMTHIF = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, \
+ 0xac, 0xa8, 0x46, 0xe0, 0xff, 0x65, 0x81, 0x4c);
+
+#define AMT_HOST_IF_CODE_VERSIONS_REQUEST 0x0400001A
+#define AMT_HOST_IF_CODE_VERSIONS_RESPONSE 0x0480001A
+
+const struct amt_host_if_msg_header CODE_VERSION_REQ = {
+ .version = {AMT_MAJOR_VERSION, AMT_MINOR_VERSION},
+ ._reserved = 0,
+ .command = AMT_HOST_IF_CODE_VERSIONS_REQUEST,
+ .length = 0
+};
+
+
+struct amt_host_if {
+ struct mei mei_cl;
+ unsigned long send_timeout;
+ bool initialized;
+};
+
+
+bool amt_host_if_init(struct amt_host_if *acmd,
+ unsigned long send_timeout, bool verbose)
+{
+ acmd->send_timeout = (send_timeout) ? send_timeout : 20000;
+ acmd->initialized = mei_init(&acmd->mei_cl, &MEI_IAMTHIF, 0, verbose);
+ return acmd->initialized;
+}
+
+void amt_host_if_deinit(struct amt_host_if *acmd)
+{
+ mei_deinit(&acmd->mei_cl);
+ acmd->initialized = false;
+}
+
+uint32_t amt_verify_code_versions(const struct amt_host_if_resp_header *resp)
+{
+ uint32_t status = AMT_STATUS_SUCCESS;
+ struct amt_code_versions *code_ver;
+ size_t code_ver_len;
+ uint32_t ver_type_cnt;
+ uint32_t len;
+ uint32_t i;
+
+ code_ver = (struct amt_code_versions *)resp->data;
+ /* length - sizeof(status) */
+ code_ver_len = resp->header.length - sizeof(uint32_t);
+ ver_type_cnt = code_ver_len -
+ sizeof(code_ver->bios) -
+ sizeof(code_ver->count);
+ if (code_ver->count != ver_type_cnt / sizeof(struct amt_version_type)) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+
+ for (i = 0; i < code_ver->count; i++) {
+ len = code_ver->versions[i].description.length;
+
+ if (len > AMT_UNICODE_STRING_LEN) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+
+ len = code_ver->versions[i].version.length;
+ if (code_ver->versions[i].version.string[len] != '\0' ||
+ len != strlen(code_ver->versions[i].version.string)) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+uint32_t amt_verify_response_header(uint32_t command,
+ const struct amt_host_if_msg_header *resp_hdr,
+ uint32_t response_size)
+{
+ if (response_size < sizeof(struct amt_host_if_resp_header)) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (response_size != (resp_hdr->length +
+ sizeof(struct amt_host_if_msg_header))) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->command != command) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->_reserved != 0) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->version.major != AMT_MAJOR_VERSION ||
+ resp_hdr->version.minor < AMT_MINOR_VERSION) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ }
+ return AMT_STATUS_SUCCESS;
+}
+
+static uint32_t amt_host_if_call(struct amt_host_if *acmd,
+ const unsigned char *command, ssize_t command_sz,
+ uint8_t **read_buf, uint32_t rcmd,
+ unsigned int expected_sz)
+{
+ uint32_t in_buf_sz;
+ uint32_t out_buf_sz;
+ ssize_t written;
+ uint32_t status;
+ struct amt_host_if_resp_header *msg_hdr;
+
+ in_buf_sz = acmd->mei_cl.buf_size;
+ *read_buf = (uint8_t *)malloc(sizeof(uint8_t) * in_buf_sz);
+ if (*read_buf == NULL)
+ return AMT_STATUS_SDK_RESOURCES;
+ memset(*read_buf, 0, in_buf_sz);
+ msg_hdr = (struct amt_host_if_resp_header *)*read_buf;
+
+ written = mei_send_msg(&acmd->mei_cl,
+ command, command_sz, acmd->send_timeout);
+ if (written != command_sz)
+ return AMT_STATUS_INTERNAL_ERROR;
+
+ out_buf_sz = mei_recv_msg(&acmd->mei_cl, *read_buf, in_buf_sz, 2000);
+ if (out_buf_sz <= 0)
+ return AMT_STATUS_HOST_IF_EMPTY_RESPONSE;
+
+ status = msg_hdr->status;
+ if (status != AMT_STATUS_SUCCESS)
+ return status;
+
+ status = amt_verify_response_header(rcmd,
+ &msg_hdr->header, out_buf_sz);
+ if (status != AMT_STATUS_SUCCESS)
+ return status;
+
+ if (expected_sz && expected_sz != out_buf_sz)
+ return AMT_STATUS_INTERNAL_ERROR;
+
+ return AMT_STATUS_SUCCESS;
+}
+
+
+uint32_t amt_get_code_versions(struct amt_host_if *cmd,
+ struct amt_code_versions *versions)
+{
+ struct amt_host_if_resp_header *response = NULL;
+ uint32_t status;
+
+ status = amt_host_if_call(cmd,
+ (const unsigned char *)&CODE_VERSION_REQ,
+ sizeof(CODE_VERSION_REQ),
+ (uint8_t **)&response,
+ AMT_HOST_IF_CODE_VERSIONS_RESPONSE, 0);
+
+ if (status != AMT_STATUS_SUCCESS)
+ goto out;
+
+ status = amt_verify_code_versions(response);
+ if (status != AMT_STATUS_SUCCESS)
+ goto out;
+
+ memcpy(versions, response->data, sizeof(struct amt_code_versions));
+out:
+ if (response != NULL)
+ free(response);
+
+ return status;
+}
+
+/************************** end of amt_host_if_command ***********************/
+int main(int argc, char **argv)
+{
+ struct amt_code_versions ver;
+ struct amt_host_if acmd;
+ unsigned int i;
+ uint32_t status;
+ int ret;
+ bool verbose;
+
+ verbose = (argc > 1 && strcmp(argv[1], "-v") == 0);
+
+ if (!amt_host_if_init(&acmd, 5000, verbose)) {
+ ret = 1;
+ goto out;
+ }
+
+ status = amt_get_code_versions(&acmd, &ver);
+
+ switch (status) {
+ case AMT_STATUS_HOST_IF_EMPTY_RESPONSE:
+ printf("Intel AMT: DISABLED\n");
+ ret = 0;
+ break;
+ case AMT_STATUS_SUCCESS:
+ printf("Intel AMT: ENABLED\n");
+ for (i = 0; i < ver.count; i++) {
+ printf("%s:\t%s\n", ver.versions[i].description.string,
+ ver.versions[i].version.string);
+ }
+ ret = 0;
+ break;
+ default:
+ printf("An error has occurred\n");
+ ret = 1;
+ break;
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/staging/mei/mei.h b/drivers/staging/mei/mei.h
index 6da7c4f33f91..b09b11cbe30d 100644
--- a/drivers/staging/mei/mei.h
+++ b/drivers/staging/mei/mei.h
@@ -1,63 +1,68 @@
-/*
-
- Intel Management Engine Interface (Intel MEI) Linux driver
- Intel MEI Interface Header
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2003-2011 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- Intel Corporation.
- linux-mei@linux.intel.com
- http://www.intel.com
-
-
- BSD LICENSE
-
- Copyright(c) 2003-2011 Intel Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
#ifndef _LINUX_MEI_H
#define _LINUX_MEI_H
diff --git a/drivers/staging/mei/mei_dev.h b/drivers/staging/mei/mei_dev.h
index 82bacfc624c5..0d937b075322 100644
--- a/drivers/staging/mei/mei_dev.h
+++ b/drivers/staging/mei/mei_dev.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,8 @@
#define MEI_WD_PARAMS_SIZE 4
#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0)
+#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
+
/*
* MEI PCI Device object
*/
@@ -125,7 +127,7 @@ enum mei_cb_major_types {
*/
struct mei_message_data {
u32 size;
- char *data;
+ unsigned char *data;
} __packed;
@@ -219,7 +221,7 @@ struct mei_device {
bool need_reset;
u32 extra_write_index;
- u32 rd_msg_buf[128]; /* used for control messages */
+ unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
u32 wr_msg_buf[128]; /* used for control messages */
u32 ext_msg_buf[8]; /* for control responses */
u32 rd_msg_hdr;
diff --git a/drivers/staging/mei/mei_version.h b/drivers/staging/mei/mei_version.h
index 075bad8f0bf5..21955e7921c3 100644
--- a/drivers/staging/mei/mei_version.h
+++ b/drivers/staging/mei/mei_version.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index 8094941a98f1..f397835d7be9 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2011, Intel Corporation.
+ * Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index 86a8b8c418c0..731301f524a6 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -7,21 +7,21 @@ config MFD_NVEC
config KEYBOARD_NVEC
bool "Keyboard on nVidia compliant EC"
- depends on MFD_NVEC && INPUT=y
+ depends on MFD_NVEC && INPUT
help
Say Y here to enable support for a keyboard connected to
a nVidia compliant embedded controller.
config SERIO_NVEC_PS2
bool "PS2 on nVidia EC"
- depends on MFD_NVEC && MOUSE_PS2
+ depends on MFD_NVEC && SERIO
help
Say Y here to enable support for a Touchpad / Mouse connected
to a nVidia compliant embedded controller.
config NVEC_POWER
bool "NVEC charger and battery"
- depends on MFD_NVEC && POWER_SUPPLY=y
+ depends on MFD_NVEC && POWER_SUPPLY
help
Say Y to enable support for battery and charger interface for
nVidia compliant embedded controllers.
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index fafdfa25e139..3c60088871e0 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -49,7 +49,7 @@
#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
#define I2C_SL_CNFG 0x20
-#define I2C_SL_NEWL (1<<2)
+#define I2C_SL_NEWSL (1<<2)
#define I2C_SL_NACK (1<<1)
#define I2C_SL_RESP (1<<0)
#define I2C_SL_IRQ (1<<3)
@@ -687,7 +687,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
clk_set_rate(nvec->i2c_clk, 8 * 80000);
- writel(I2C_SL_NEWL, nvec->base + I2C_SL_CNFG);
+ writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
@@ -701,7 +701,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
{
disable_irq(nvec->irq);
- writel(I2C_SL_NEWL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
+ writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
clk_disable(nvec->i2c_clk);
}
@@ -784,11 +784,6 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
nvec->i2c_clk = i2c_clk;
nvec->rx = &nvec->msg_pool[0];
- /* Set the gpio to low when we've got something to say */
- err = gpio_request(nvec->gpio, "nvec gpio");
- if (err < 0)
- dev_err(nvec->dev, "couldn't request gpio\n");
-
ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
init_completion(&nvec->sync_write);
@@ -802,6 +797,12 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
INIT_WORK(&nvec->tx_work, nvec_request_master);
nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2);
+ err = gpio_request_one(nvec->gpio, GPIOF_OUT_INIT_HIGH, "nvec gpio");
+ if (err < 0) {
+ dev_err(nvec->dev, "couldn't request gpio\n");
+ goto failed;
+ }
+
err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec);
if (err) {
dev_err(nvec->dev, "couldn't request irq\n");
@@ -813,8 +814,6 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
clk_enable(i2c_clk);
- gpio_direction_output(nvec->gpio, 1);
- gpio_set_value(nvec->gpio, 1);
/* enable event reporting */
nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 742f5ccfe763..14a6f687cf75 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -21,10 +21,18 @@
#include "nvec.h"
-#define START_STREAMING {'\x06', '\x03', '\x04'}
+#define START_STREAMING {'\x06', '\x03', '\x06'}
#define STOP_STREAMING {'\x06', '\x04'}
#define SEND_COMMAND {'\x06', '\x01', '\xf4', '\x01'}
+#ifdef NVEC_PS2_DEBUG
+#define NVEC_PHD(str, buf, len) \
+ print_hex_dump(KERN_DEBUG, str, DUMP_PREFIX_NONE, \
+ 16, 1, buf, len, false)
+#else
+#define NVEC_PHD(str, buf, len)
+#endif
+
static const unsigned char MOUSE_RESET[] = {'\x06', '\x01', '\xff', '\x03'};
struct nvec_ps2 {
@@ -67,18 +75,18 @@ static int nvec_ps2_notifier(struct notifier_block *nb,
case NVEC_PS2_EVT:
for (i = 0; i < msg[1]; i++)
serio_interrupt(ps2_dev.ser_dev, msg[2 + i], 0);
+ NVEC_PHD("ps/2 mouse event: ", &msg[2], msg[1]);
return NOTIFY_STOP;
case NVEC_PS2:
- if (msg[2] == 1)
+ if (msg[2] == 1) {
for (i = 0; i < (msg[1] - 2); i++)
serio_interrupt(ps2_dev.ser_dev, msg[i + 4], 0);
- else if (msg[1] != 2) { /* !ack */
- print_hex_dump(KERN_WARNING, "unhandled mouse event: ",
- DUMP_PREFIX_NONE, 16, 1,
- msg, msg[1] + 2, true);
+ NVEC_PHD("ps/2 mouse reply: ", &msg[4], msg[1] - 2);
}
+ else if (msg[1] != 2) /* !ack */
+ NVEC_PHD("unhandled mouse event: ", msg, msg[1] + 2);
return NOTIFY_STOP;
}
@@ -90,10 +98,10 @@ static int __devinit nvec_mouse_probe(struct platform_device *pdev)
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
- ser_dev->id.type = SERIO_8042;
+ ser_dev->id.type = SERIO_PS_PSTHRU;
ser_dev->write = ps2_sendcommand;
- ser_dev->open = ps2_startstreaming;
- ser_dev->close = ps2_stopstreaming;
+ ser_dev->start = ps2_startstreaming;
+ ser_dev->stop = ps2_stopstreaming;
strlcpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
strlcpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
@@ -111,8 +119,35 @@ static int __devinit nvec_mouse_probe(struct platform_device *pdev)
return 0;
}
+static int nvec_mouse_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+
+ /* disable mouse */
+ nvec_write_async(nvec, "\x06\xf4", 2);
+
+ /* send cancel autoreceive */
+ nvec_write_async(nvec, "\x06\x04", 2);
+
+ return 0;
+}
+
+static int nvec_mouse_resume(struct platform_device *pdev)
+{
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+
+ ps2_startstreaming(ps2_dev.ser_dev);
+
+ /* enable mouse */
+ nvec_write_async(nvec, "\x06\xf5", 2);
+
+ return 0;
+}
+
static struct platform_driver nvec_mouse_driver = {
.probe = nvec_mouse_probe,
+ .suspend = nvec_mouse_suspend,
+ .resume = nvec_mouse_resume,
.driver = {
.name = "nvec-mouse",
.owner = THIS_MODULE,
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/staging/omapdrm/omap_gem_helpers.c
index 29275c7209e9..f895363a5e54 100644
--- a/drivers/staging/omapdrm/omap_gem_helpers.c
+++ b/drivers/staging/omapdrm/omap_gem_helpers.c
@@ -84,7 +84,7 @@ fail:
page_cache_release(pages[i]);
}
drm_free_large(pages);
- return ERR_PTR(PTR_ERR(p));
+ return ERR_CAST(p);
}
/**
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index c60911c6ab3f..88f8ff38662d 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2007-2010 Angelo Arrifano <miknix@gmail.com>
*
- * Information gathered from disassebled dsdt and from here:
+ * Information gathered from disassembled dsdt and from here:
* <http://www.microsoft.com/whdc/system/platform/firmware/DirAppLaunch.mspx>
*
* This program is free software; you can redistribute it and/or modify
@@ -25,6 +25,8 @@
#define QUICKSTART_VERSION "1.03"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -37,118 +39,73 @@ MODULE_AUTHOR("Angelo Arrifano");
MODULE_DESCRIPTION("ACPI Direct App Launch driver");
MODULE_LICENSE("GPL");
-#define QUICKSTART_ACPI_DEVICE_NAME "quickstart"
-#define QUICKSTART_ACPI_CLASS "quickstart"
-#define QUICKSTART_ACPI_HID "PNP0C32"
-
-#define QUICKSTART_PF_DRIVER_NAME "quickstart"
-#define QUICKSTART_PF_DEVICE_NAME "quickstart"
-#define QUICKSTART_PF_DEVATTR_NAME "pressed_button"
+#define QUICKSTART_ACPI_DEVICE_NAME "quickstart"
+#define QUICKSTART_ACPI_CLASS "quickstart"
+#define QUICKSTART_ACPI_HID "PNP0C32"
-#define QUICKSTART_MAX_BTN_NAME_LEN 16
+#define QUICKSTART_PF_DRIVER_NAME "quickstart"
+#define QUICKSTART_PF_DEVICE_NAME "quickstart"
-/* There will be two events:
- * 0x02 - A hot button was pressed while device was off/sleeping.
- * 0x80 - A hot button was pressed while device was up. */
-#define QUICKSTART_EVENT_WAKE 0x02
-#define QUICKSTART_EVENT_RUNTIME 0x80
+/*
+ * There will be two events:
+ * 0x02 - A hot button was pressed while device was off/sleeping.
+ * 0x80 - A hot button was pressed while device was up.
+ */
+#define QUICKSTART_EVENT_WAKE 0x02
+#define QUICKSTART_EVENT_RUNTIME 0x80
-struct quickstart_btn {
+struct quickstart_button {
char *name;
unsigned int id;
- struct quickstart_btn *next;
+ struct list_head list;
};
-static struct quickstart_driver_data {
- struct quickstart_btn *btn_lst;
- struct quickstart_btn *pressed;
-} quickstart_data;
-
-/* ACPI driver Structs */
struct quickstart_acpi {
struct acpi_device *device;
- struct quickstart_btn *btn;
-};
-static int quickstart_acpi_add(struct acpi_device *device);
-static int quickstart_acpi_remove(struct acpi_device *device, int type);
-static const struct acpi_device_id quickstart_device_ids[] = {
- {QUICKSTART_ACPI_HID, 0},
- {"", 0},
+ struct quickstart_button *button;
};
-static struct acpi_driver quickstart_acpi_driver = {
- .name = "quickstart",
- .class = QUICKSTART_ACPI_CLASS,
- .ids = quickstart_device_ids,
- .ops = {
- .add = quickstart_acpi_add,
- .remove = quickstart_acpi_remove,
- },
-};
+static LIST_HEAD(buttons);
+static struct quickstart_button *pressed;
-/* Input device structs */
-struct input_dev *quickstart_input;
+static struct input_dev *quickstart_input;
-/* Platform driver structs */
-static ssize_t buttons_show(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-static ssize_t pressed_button_show(struct device *dev,
+/* Platform driver functions */
+static ssize_t quickstart_buttons_show(struct device *dev,
struct device_attribute *attr,
- char *buf);
-static ssize_t pressed_button_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count);
-static DEVICE_ATTR(pressed_button, 0666, pressed_button_show,
- pressed_button_store);
-static DEVICE_ATTR(buttons, 0444, buttons_show, NULL);
-static struct platform_device *pf_device;
-static struct platform_driver pf_driver = {
- .driver = {
- .name = QUICKSTART_PF_DRIVER_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-/*
- * Platform driver functions
- */
-static ssize_t buttons_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ char *buf)
{
int count = 0;
- struct quickstart_btn *ptr = quickstart_data.btn_lst;
+ struct quickstart_button *b;
- if (!ptr)
+ if (list_empty(&buttons))
return snprintf(buf, PAGE_SIZE, "none");
- while (ptr && (count < PAGE_SIZE)) {
- if (ptr->name) {
- count += snprintf(buf + count,
- PAGE_SIZE - count,
- "%d\t%s\n", ptr->id, ptr->name);
+ list_for_each_entry(b, &buttons, list) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%u\t%s\n",
+ b->id, b->name);
+
+ if (count >= PAGE_SIZE) {
+ count = PAGE_SIZE;
+ break;
}
- ptr = ptr->next;
}
return count;
}
-static ssize_t pressed_button_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t quickstart_pressed_button_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n",
- (quickstart_data.pressed ?
- quickstart_data.pressed->name : "none"));
+ (pressed ? pressed->name : "none"));
}
-static ssize_t pressed_button_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t quickstart_pressed_button_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
if (count < 2)
return -EINVAL;
@@ -156,60 +113,40 @@ static ssize_t pressed_button_store(struct device *dev,
if (strncasecmp(buf, "none", 4) != 0)
return -EINVAL;
- quickstart_data.pressed = NULL;
+ pressed = NULL;
return count;
}
-/* Hotstart Helper functions */
-static int quickstart_btnlst_add(struct quickstart_btn **data)
+/* Helper functions */
+static struct quickstart_button *quickstart_buttons_add(void)
{
- struct quickstart_btn **ptr = &quickstart_data.btn_lst;
+ struct quickstart_button *b;
- while (*ptr)
- ptr = &((*ptr)->next);
+ b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return NULL;
- *ptr = kzalloc(sizeof(struct quickstart_btn), GFP_KERNEL);
- if (!*ptr) {
- *data = NULL;
- return -ENOMEM;
- }
- *data = *ptr;
+ list_add_tail(&b->list, &buttons);
- return 0;
+ return b;
}
-static void quickstart_btnlst_del(struct quickstart_btn *data)
+static void quickstart_button_del(struct quickstart_button *data)
{
- struct quickstart_btn **ptr = &quickstart_data.btn_lst;
-
if (!data)
return;
- while (*ptr) {
- if (*ptr == data) {
- *ptr = (*ptr)->next;
- kfree(data);
- return;
- }
- ptr = &((*ptr)->next);
- }
-
- return;
+ list_del(&data->list);
+ kfree(data->name);
+ kfree(data);
}
-static void quickstart_btnlst_free(void)
+static void quickstart_buttons_free(void)
{
- struct quickstart_btn *ptr = quickstart_data.btn_lst;
- struct quickstart_btn *lptr = NULL;
-
- while (ptr) {
- lptr = ptr;
- ptr = ptr->next;
- kfree(lptr->name);
- kfree(lptr);
- }
+ struct quickstart_button *b, *n;
- return;
+ list_for_each_entry_safe(b, n, &buttons, list)
+ quickstart_button_del(b);
}
/* ACPI Driver functions */
@@ -220,107 +157,136 @@ static void quickstart_acpi_notify(acpi_handle handle, u32 event, void *data)
if (!quickstart)
return;
- if (event == QUICKSTART_EVENT_WAKE)
- quickstart_data.pressed = quickstart->btn;
- else if (event == QUICKSTART_EVENT_RUNTIME) {
- input_report_key(quickstart_input, quickstart->btn->id, 1);
+ switch (event) {
+ case QUICKSTART_EVENT_WAKE:
+ pressed = quickstart->button;
+ break;
+ case QUICKSTART_EVENT_RUNTIME:
+ input_report_key(quickstart_input, quickstart->button->id, 1);
input_sync(quickstart_input);
- input_report_key(quickstart_input, quickstart->btn->id, 0);
+ input_report_key(quickstart_input, quickstart->button->id, 0);
input_sync(quickstart_input);
+ break;
+ default:
+ pr_err("Unexpected ACPI event notify (%u)\n", event);
+ break;
}
- return;
}
-static void quickstart_acpi_ghid(struct quickstart_acpi *quickstart)
+static int quickstart_acpi_ghid(struct quickstart_acpi *quickstart)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- uint32_t usageid = 0;
-
- if (!quickstart)
- return;
+ int ret = 0;
- /* This returns a buffer telling the button usage ID,
- * and triggers pending notify events (The ones before booting). */
- status = acpi_evaluate_object(quickstart->device->handle,
- "GHID", NULL, &buffer);
- if (ACPI_FAILURE(status) || !buffer.pointer) {
- printk(KERN_ERR "quickstart: %s GHID method failed.\n",
- quickstart->btn->name);
- return;
+ /*
+ * This returns a buffer telling the button usage ID,
+ * and triggers pending notify events (The ones before booting).
+ */
+ status = acpi_evaluate_object(quickstart->device->handle, "GHID", NULL,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_err("%s GHID method failed\n", quickstart->button->name);
+ return -EINVAL;
}
- if (buffer.length < 8)
- return;
-
- /* <<The GHID method can return a BYTE, WORD, or DWORD.
+ /*
+ * <<The GHID method can return a BYTE, WORD, or DWORD.
* The value must be encoded in little-endian byte
- * order (least significant byte first).>> */
- usageid = *((uint32_t *)(buffer.pointer + (buffer.length - 8)));
- quickstart->btn->id = usageid;
+ * order (least significant byte first).>>
+ */
+ switch (buffer.length) {
+ case 1:
+ quickstart->button->id = *(uint8_t *)buffer.pointer;
+ break;
+ case 2:
+ quickstart->button->id = *(uint16_t *)buffer.pointer;
+ break;
+ case 4:
+ quickstart->button->id = *(uint32_t *)buffer.pointer;
+ break;
+ case 8:
+ quickstart->button->id = *(uint64_t *)buffer.pointer;
+ break;
+ default:
+ pr_err("%s GHID method returned buffer of unexpected length %u\n",
+ quickstart->button->name, buffer.length);
+ ret = -EINVAL;
+ break;
+ }
kfree(buffer.pointer);
+
+ return ret;
}
-static int quickstart_acpi_config(struct quickstart_acpi *quickstart, char *bid)
+static int quickstart_acpi_config(struct quickstart_acpi *quickstart)
{
- int len = strlen(bid);
- int ret;
+ char *bid = acpi_device_bid(quickstart->device);
+ char *name;
- /* Add button to list */
- ret = quickstart_btnlst_add(&quickstart->btn);
- if (ret)
- return ret;
+ name = kmalloc(strlen(bid) + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
- quickstart->btn->name = kzalloc(len + 1, GFP_KERNEL);
- if (!quickstart->btn->name) {
- quickstart_btnlst_free();
+ /* Add new button to list */
+ quickstart->button = quickstart_buttons_add();
+ if (!quickstart->button) {
+ kfree(name);
return -ENOMEM;
}
- strcpy(quickstart->btn->name, bid);
+
+ quickstart->button->name = name;
+ strcpy(quickstart->button->name, bid);
return 0;
}
static int quickstart_acpi_add(struct acpi_device *device)
{
- int ret = 0;
- acpi_status status = AE_OK;
- struct quickstart_acpi *quickstart = NULL;
+ int ret;
+ acpi_status status;
+ struct quickstart_acpi *quickstart;
if (!device)
return -EINVAL;
- quickstart = kzalloc(sizeof(struct quickstart_acpi), GFP_KERNEL);
+ quickstart = kzalloc(sizeof(*quickstart), GFP_KERNEL);
if (!quickstart)
return -ENOMEM;
quickstart->device = device;
+
strcpy(acpi_device_name(device), QUICKSTART_ACPI_DEVICE_NAME);
strcpy(acpi_device_class(device), QUICKSTART_ACPI_CLASS);
device->driver_data = quickstart;
/* Add button to list and initialize some stuff */
- ret = quickstart_acpi_config(quickstart, acpi_device_bid(device));
- if (ret)
+ ret = quickstart_acpi_config(quickstart);
+ if (ret < 0)
goto fail_config;
- status = acpi_install_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
+ status = acpi_install_notify_handler(device->handle, ACPI_ALL_NOTIFY,
quickstart_acpi_notify,
quickstart);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR "quickstart: Notify handler install error\n");
+ pr_err("Notify handler install error\n");
ret = -ENODEV;
goto fail_installnotify;
}
- quickstart_acpi_ghid(quickstart);
+ ret = quickstart_acpi_ghid(quickstart);
+ if (ret < 0)
+ goto fail_ghid;
return 0;
+fail_ghid:
+ acpi_remove_notify_handler(device->handle, ACPI_ALL_NOTIFY,
+ quickstart_acpi_notify);
+
fail_installnotify:
- quickstart_btnlst_del(quickstart->btn);
+ quickstart_button_del(quickstart->button);
fail_config:
@@ -331,28 +297,54 @@ fail_config:
static int quickstart_acpi_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
- struct quickstart_acpi *quickstart = NULL;
+ acpi_status status;
+ struct quickstart_acpi *quickstart;
- if (!device || !acpi_driver_data(device))
+ if (!device)
return -EINVAL;
quickstart = acpi_driver_data(device);
+ if (!quickstart)
+ return -EINVAL;
- status = acpi_remove_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
- quickstart_acpi_notify);
+ status = acpi_remove_notify_handler(device->handle, ACPI_ALL_NOTIFY,
+ quickstart_acpi_notify);
if (ACPI_FAILURE(status))
- printk(KERN_ERR "quickstart: Error removing notify handler\n");
-
+ pr_err("Error removing notify handler\n");
kfree(quickstart);
return 0;
}
-/* Module functions */
+/* Platform driver structs */
+static DEVICE_ATTR(pressed_button, 0666, quickstart_pressed_button_show,
+ quickstart_pressed_button_store);
+static DEVICE_ATTR(buttons, 0444, quickstart_buttons_show, NULL);
+static struct platform_device *pf_device;
+static struct platform_driver pf_driver = {
+ .driver = {
+ .name = QUICKSTART_PF_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static const struct acpi_device_id quickstart_device_ids[] = {
+ {QUICKSTART_ACPI_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver quickstart_acpi_driver = {
+ .name = "quickstart",
+ .class = QUICKSTART_ACPI_CLASS,
+ .ids = quickstart_device_ids,
+ .ops = {
+ .add = quickstart_acpi_add,
+ .remove = quickstart_acpi_remove,
+ },
+};
+/* Module functions */
static void quickstart_exit(void)
{
input_unregister_device(quickstart_input);
@@ -366,15 +358,12 @@ static void quickstart_exit(void)
acpi_bus_unregister_driver(&quickstart_acpi_driver);
- quickstart_btnlst_free();
-
- return;
+ quickstart_buttons_free();
}
static int __init quickstart_init_input(void)
{
- struct quickstart_btn **ptr = &quickstart_data.btn_lst;
- int count;
+ struct quickstart_button *b;
int ret;
quickstart_input = input_allocate_device();
@@ -385,11 +374,9 @@ static int __init quickstart_init_input(void)
quickstart_input->name = "Quickstart ACPI Buttons";
quickstart_input->id.bustype = BUS_HOST;
- while (*ptr) {
- count++;
+ list_for_each_entry(b, &buttons, list) {
set_bit(EV_KEY, quickstart_input->evbit);
- set_bit((*ptr)->id, quickstart_input->keybit);
- ptr = &((*ptr)->next);
+ set_bit(b->id, quickstart_input->keybit);
}
ret = input_register_device(quickstart_input);
@@ -415,7 +402,7 @@ static int __init quickstart_init(void)
return ret;
/* If existing bus with no devices */
- if (!quickstart_data.btn_lst) {
+ if (list_empty(&buttons)) {
ret = -ENODEV;
goto fail_pfdrv_reg;
}
@@ -444,14 +431,12 @@ static int __init quickstart_init(void)
if (ret)
goto fail_dev_file2;
-
/* Input device */
ret = quickstart_init_input();
if (ret)
goto fail_input;
- printk(KERN_INFO "quickstart: ACPI Direct App Launch ver %s\n",
- QUICKSTART_VERSION);
+ pr_info("ACPI Direct App Launch ver %s\n", QUICKSTART_VERSION);
return 0;
fail_input:
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 6c5061f12bad..13979b5ea32a 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -2453,7 +2453,7 @@ static inline void update_network(struct rtllib_network *dst,
if (src->wmm_param[0].ac_aci_acm_aifsn ||
src->wmm_param[1].ac_aci_acm_aifsn ||
src->wmm_param[2].ac_aci_acm_aifsn ||
- src->wmm_param[1].ac_aci_acm_aifsn)
+ src->wmm_param[3].ac_aci_acm_aifsn)
memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
dst->SignalStrength = src->SignalStrength;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index c9bdc7f6bdce..be2a28cf8edd 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -237,7 +237,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
#ifdef NOT_YET
if (ieee->iw_mode == IW_MODE_MASTER) {
- printk(KERN_DEBUG "%s: Master mode not yet suppported.\n",
+ printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
ieee->dev->name);
return 0;
/*
diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
index ea37473f71e5..6a43312380e0 100644
--- a/drivers/staging/rtl8712/Kconfig
+++ b/drivers/staging/rtl8712/Kconfig
@@ -9,13 +9,6 @@ config R8712U
This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
If built as a module, it will be called r8712u.
-config R8712_AP
- bool "Realtek RTL8712U AP code"
- depends on R8712U
- default N
- ---help---
- This option allows the Realtek RTL8712 USB device to be an Access Point.
-
config R8712_TX_AGGR
bool "Realtek RTL8712U Transmit Aggregation code"
depends on R8712U && BROKEN
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index ed85b4415207..e83665d06020 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -140,7 +140,6 @@ struct dvobj_priv {
u8 ishighspeed;
uint(*inirp_init)(struct _adapter *adapter);
uint(*inirp_deinit)(struct _adapter *adapter);
- struct semaphore usb_suspend_sema;
struct usb_device *pusbdev;
};
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 98a3d684f9b2..b22fea53a746 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -330,7 +330,6 @@ u8 r8712_init_drv_sw(struct _adapter *padapter)
padapter->stapriv.padapter = padapter;
r8712_init_bcmc_stainfo(padapter);
r8712_init_pwrctrl_priv(padapter);
- sema_init(&(padapter->pwrctrlpriv.pnp_pwr_mgnt_sema), 0);
mp871xinit(padapter);
if (init_default_value(padapter) != _SUCCESS)
return _FAIL;
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 1ee943a58c4c..9ba603310fdc 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -72,18 +72,6 @@ static inline struct list_head *get_list_head(struct __queue *queue)
#define LIST_CONTAINOR(ptr, type, member) \
((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
-static inline void _enter_hwio_critical(struct semaphore *prwlock,
- unsigned long *pirqL)
-{
- down(prwlock);
-}
-
-static inline void _exit_hwio_critical(struct semaphore *prwlock,
- unsigned long *pirqL)
-{
- up(prwlock);
-}
-
static inline void list_delete(struct list_head *plist)
{
list_del_init(plist);
@@ -152,11 +140,6 @@ static inline u32 _down_sema(struct semaphore *sema)
return _SUCCESS;
}
-static inline void _rtl_rwlock_init(struct semaphore *prwlock)
-{
- sema_init(prwlock, 1);
-}
-
static inline void _init_listhead(struct list_head *list)
{
INIT_LIST_HEAD(list);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 6d692657e784..fa6dc9c09b3f 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -55,8 +55,6 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
int alignment = 0;
struct sk_buff *pskb = NULL;
- sema_init(&precvpriv->recv_sema, 0);
- sema_init(&precvpriv->terminate_recvthread_sema, 0);
/*init recv_buf*/
_init_queue(&precvpriv->free_recv_buf_queue);
precvpriv->pallocated_recv_buf = _malloc(NR_RECVBUFF *
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index ca84ee02eacc..abc1c97378f7 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -131,7 +131,6 @@ uint r8712_alloc_io_queue(struct _adapter *adapter)
pio_req = (struct io_req *)(pio_queue->free_ioreqs_buf);
for (i = 0; i < NUM_IOREQ; i++) {
_init_listhead(&pio_req->list);
- sema_init(&pio_req->sema, 0);
list_insert_tail(&pio_req->list, &pio_queue->free_ioreqs);
pio_req++;
}
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
index 86308a0093ed..d3d8727c2ec5 100644
--- a/drivers/staging/rtl8712/rtl871x_io.h
+++ b/drivers/staging/rtl8712/rtl871x_io.h
@@ -117,7 +117,6 @@ struct io_req {
u32 command;
u32 status;
u8 *pbuf;
- struct semaphore sema;
void (*_async_io_callback)(struct _adapter *padater,
struct io_req *pio_req, u8 *cnxt);
u8 *cnxt;
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index 23e72a0401a8..9fd2ec7596cc 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -100,7 +100,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
{
struct pwrctrl_priv *pwrpriv = &(padapter->pwrctrlpriv);
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
- struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
return;
@@ -110,8 +109,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
if (pwrpriv->cpwm >= PS_STATE_S2) {
if (pwrpriv->alives & CMD_ALIVE)
up(&(pcmdpriv->cmd_queue_sema));
- if (pwrpriv->alives & XMIT_ALIVE)
- up(&(pxmitpriv->xmit_sema));
}
pwrpriv->cpwm_tog = (preportpwrstate->state) & 0x80;
up(&pwrpriv->lock);
@@ -145,12 +142,12 @@ static void SetPSModeWorkItemCallback(struct work_struct *work)
struct pwrctrl_priv, SetPSModeWorkItem);
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
- _enter_pwrlock(&pwrpriv->lock);
if (!pwrpriv->bSleep) {
+ _enter_pwrlock(&pwrpriv->lock);
if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
r8712_set_rpwm(padapter, PS_STATE_S4);
+ up(&pwrpriv->lock);
}
- up(&pwrpriv->lock);
}
static void rpwm_workitem_callback(struct work_struct *work)
@@ -160,13 +157,13 @@ static void rpwm_workitem_callback(struct work_struct *work)
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
u8 cpwm = pwrpriv->cpwm;
- _enter_pwrlock(&pwrpriv->lock);
if (pwrpriv->cpwm != pwrpriv->rpwm) {
+ _enter_pwrlock(&pwrpriv->lock);
cpwm = r8712_read8(padapter, SDIO_HCPWM);
pwrpriv->rpwm_retry = 1;
r8712_set_rpwm(padapter, pwrpriv->rpwm);
+ up(&pwrpriv->lock);
}
- up(&pwrpriv->lock);
}
static void rpwm_check_handler (void *FunctionContext)
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index b41ca2892be5..6024c4f63d5b 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -133,7 +133,6 @@ struct pwrctrl_priv {
u8 rpwm_retry;
uint bSetPSModeWorkItemInProgress;
- struct semaphore pnp_pwr_mgnt_sema;
spinlock_t pnp_pwr_mgnt_lock;
s32 pnp_current_pwr_state;
u8 pnp_bstop_trx;
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 7069f06d9b5d..5b03b405883e 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -93,7 +93,6 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
precvframe++;
}
precvpriv->rx_pending_cnt = 1;
- sema_init(&precvpriv->allrxreturnevt, 0);
return r8712_init_recv_priv(precvpriv, padapter);
}
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index cc7a72fee1c2..e42e6f0a15e6 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -85,8 +85,6 @@ using enter_critical section to protect
*/
struct recv_priv {
spinlock_t lock;
- struct semaphore recv_sema;
- struct semaphore terminate_recvthread_sema;
struct __queue free_recv_queue;
struct __queue recv_pending_queue;
u8 *pallocated_frame_buf;
@@ -100,7 +98,6 @@ struct recv_priv {
uint rx_largepacket_crcerr;
uint rx_smallpacket_crcerr;
uint rx_middlepacket_crcerr;
- struct semaphore allrxreturnevt;
u8 rx_pending_cnt;
uint ff_hwaddr;
struct tasklet_struct recv_tasklet;
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 81bde803c59f..1247b3d9719d 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -42,10 +42,8 @@ static void _init_stainfo(struct sta_info *psta)
_init_listhead(&psta->hash_list);
_r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
_r8712_init_sta_recv_priv(&psta->sta_recvpriv);
-#ifdef CONFIG_R8712_AP
_init_listhead(&psta->asoc_list);
_init_listhead(&psta->auth_list);
-#endif
}
u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
@@ -72,10 +70,8 @@ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
get_list_head(&pstapriv->free_sta_queue));
psta++;
}
-#ifdef CONFIG_R8712_AP
_init_listhead(&pstapriv->asoc_list);
_init_listhead(&pstapriv->auth_list);
-#endif
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 8bbdee70f867..aa57e7754f04 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,8 +71,6 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
spin_lock_init(&pxmitpriv->lock);
- sema_init(&pxmitpriv->xmit_sema, 0);
- sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
Please insert all the queue initializaiton using _init_queue below
*/
@@ -121,7 +119,6 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_r8712_init_hw_txqueue(&pxmitpriv->bmc_txqueue, BMC_QUEUE_INX);
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
pxmitpriv->txirp_cnt = 1;
- sema_init(&(pxmitpriv->tx_retevt), 0);
/*per AC pending irp*/
pxmitpriv->beq_cnt = 0;
pxmitpriv->bkq_cnt = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index a034c0fec718..638b79b4c5a8 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -202,8 +202,6 @@ struct hw_txqueue {
struct xmit_priv {
spinlock_t lock;
- struct semaphore xmit_sema;
- struct semaphore terminate_xmitthread_sema;
struct __queue be_pending;
struct __queue bk_pending;
struct __queue vi_pending;
@@ -233,7 +231,6 @@ struct xmit_priv {
uint tx_drop;
struct hw_xmit *hwxmits;
u8 hwxmit_entry;
- struct semaphore tx_retevt;/*all tx return event;*/
u8 txirp_cnt;
struct tasklet_struct xmit_tasklet;
_workitem xmit_pipe4_reset_wi;
diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
index 48d6a14c8f5f..f8016e9abffd 100644
--- a/drivers/staging/rtl8712/sta_info.h
+++ b/drivers/staging/rtl8712/sta_info.h
@@ -90,7 +90,6 @@ struct sta_info {
* curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO
* sta_info: (AP & STA) CAP/INFO
*/
-#ifdef CONFIG_R8712_AP
struct list_head asoc_list;
struct list_head auth_list;
unsigned int expire_to;
@@ -98,7 +97,6 @@ struct sta_info {
unsigned int authalg;
unsigned char chg_txt[128];
unsigned int tx_ra_bitmap;
-#endif
};
struct sta_priv {
@@ -111,13 +109,11 @@ struct sta_priv {
struct __queue sleep_q;
struct __queue wakeup_q;
struct _adapter *padapter;
-#ifdef CONFIG_R8712_AP
struct list_head asoc_list;
struct list_head auth_list;
unsigned int auth_to; /* sec, time to expire in authenticating. */
unsigned int assoc_to; /* sec, time to expire before associating. */
unsigned int expire_to; /* sec , time to expire after associated. */
-#endif
};
static inline u32 wifi_mac_hash(u8 *mac)
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bade184883b..fe0e2ffc5250 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -281,7 +281,6 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
}
if ((r8712_alloc_io_queue(padapter)) == _FAIL)
status = _FAIL;
- sema_init(&(padapter->dvobjpriv.usb_suspend_sema), 0);
return status;
}
diff --git a/drivers/staging/rts5139/ms.h b/drivers/staging/rts5139/ms.h
index f9d46d210f23..3ce1dc90f19d 100644
--- a/drivers/staging/rts5139/ms.h
+++ b/drivers/staging/rts5139/ms.h
@@ -249,9 +249,9 @@ int ms_delay_write(struct rts51x_chip *chip);
#ifdef SUPPORT_MAGIC_GATE
int ms_switch_clock(struct rts51x_chip *chip);
-int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data,
+int ms_write_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
int data_len);
-int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 * data,
+int ms_read_bytes(struct rts51x_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data,
int data_len);
int ms_set_rw_reg_addr(struct rts51x_chip *chip, u8 read_start, u8 read_cnt,
u8 write_start, u8 write_cnt);
diff --git a/drivers/staging/rts5139/rts51x_chip.c b/drivers/staging/rts5139/rts51x_chip.c
index adc0d0005735..b3e0bb22b0ff 100644
--- a/drivers/staging/rts5139/rts51x_chip.c
+++ b/drivers/staging/rts5139/rts51x_chip.c
@@ -541,7 +541,7 @@ int rts51x_get_rsp(struct rts51x_chip *chip, int rsp_len, int timeout)
return STATUS_SUCCESS;
}
-int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status)
+int rts51x_get_card_status(struct rts51x_chip *chip, u16 *status)
{
int retval;
u16 val;
@@ -577,7 +577,7 @@ int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data)
return STATUS_SUCCESS;
}
-int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data)
+int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 *data)
{
int retval;
@@ -620,7 +620,7 @@ int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
return STATUS_SUCCESS;
}
-int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data)
+int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 *data)
{
int retval;
u16 value = 0;
@@ -720,7 +720,7 @@ int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
return STATUS_SUCCESS;
}
-int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
+int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len)
{
int retval;
@@ -735,7 +735,7 @@ int rts51x_read_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
return STATUS_SUCCESS;
}
-int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 * buf, int buf_len)
+int rts51x_write_ppbuf(struct rts51x_chip *chip, u8 *buf, int buf_len)
{
int retval;
@@ -776,7 +776,7 @@ int rts51x_write_phy_register(struct rts51x_chip *chip, u8 addr, u8 val)
return STATUS_SUCCESS;
}
-int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 * val)
+int rts51x_read_phy_register(struct rts51x_chip *chip, u8 addr, u8 *val)
{
int retval;
@@ -921,7 +921,7 @@ void rts51x_trace_msg(struct rts51x_chip *chip, unsigned char *buf, int clear)
}
#endif
-void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 * status,
+void rts51x_pp_status(struct rts51x_chip *chip, unsigned int lun, u8 *status,
u8 status_len)
{
struct sd_info *sd_card = &(chip->sd_card);
diff --git a/drivers/staging/rts5139/rts51x_chip.h b/drivers/staging/rts5139/rts51x_chip.h
index 321ece750ede..13fc2a410d90 100644
--- a/drivers/staging/rts5139/rts51x_chip.h
+++ b/drivers/staging/rts5139/rts51x_chip.h
@@ -857,12 +857,12 @@ static inline u8 *rts51x_get_rsp_data(struct rts51x_chip *chip)
return chip->rsp_buf;
}
-int rts51x_get_card_status(struct rts51x_chip *chip, u16 * status);
+int rts51x_get_card_status(struct rts51x_chip *chip, u16 *status);
int rts51x_write_register(struct rts51x_chip *chip, u16 addr, u8 mask, u8 data);
-int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 * data);
+int rts51x_read_register(struct rts51x_chip *chip, u16 addr, u8 *data);
int rts51x_ep0_write_register(struct rts51x_chip *chip, u16 addr, u8 mask,
u8 data);
-int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 * data);
+int rts51x_ep0_read_register(struct rts51x_chip *chip, u16 addr, u8 *data);
int rts51x_seq_write_register(struct rts51x_chip *chip, u16 addr, u16 len,
u8 *data);
int rts51x_seq_read_register(struct rts51x_chip *chip, u16 addr, u16 len,
diff --git a/drivers/staging/rts5139/rts51x_fop.h b/drivers/staging/rts5139/rts51x_fop.h
index 0453f57d1a84..94d75f08d255 100644
--- a/drivers/staging/rts5139/rts51x_fop.h
+++ b/drivers/staging/rts5139/rts51x_fop.h
@@ -48,7 +48,7 @@ int rts51x_open(struct inode *inode, struct file *filp);
int rts51x_release(struct inode *inode, struct file *filp);
ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos);
-ssize_t rts51x_write(struct file *filp, const char __user * buf, size_t count,
+ssize_t rts51x_write(struct file *filp, const char __user *buf, size_t count,
loff_t *f_pos);
#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
diff --git a/drivers/staging/rts5139/rts51x_transport.c b/drivers/staging/rts5139/rts51x_transport.c
index e11467acc57b..da9c83b49426 100644
--- a/drivers/staging/rts5139/rts51x_transport.c
+++ b/drivers/staging/rts5139/rts51x_transport.c
@@ -883,7 +883,7 @@ int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe,
return result;
}
-int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status)
+int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status)
{
unsigned int pipe = RCV_INTR_PIPE(chip);
struct usb_host_endpoint *ep;
diff --git a/drivers/staging/rts5139/rts51x_transport.h b/drivers/staging/rts5139/rts51x_transport.h
index 8464c4836d5b..9dd556ea9c08 100644
--- a/drivers/staging/rts5139/rts51x_transport.h
+++ b/drivers/staging/rts5139/rts51x_transport.h
@@ -73,7 +73,7 @@ int rts51x_start_epc_transfer(struct rts51x_chip *chip);
void rts51x_cancel_epc_transfer(struct rts51x_chip *chip);
#endif
-int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status);
+int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status);
void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip);
#endif /* __RTS51X_TRANSPORT_H */
diff --git a/drivers/staging/rts5139/sd_cprm.c b/drivers/staging/rts5139/sd_cprm.c
index 407cd43ad3b1..d5969d992d84 100644
--- a/drivers/staging/rts5139/sd_cprm.c
+++ b/drivers/staging/rts5139/sd_cprm.c
@@ -233,7 +233,7 @@ RTY_SEND_CMD:
return STATUS_SUCCESS;
}
-int ext_sd_get_rsp(struct rts51x_chip *chip, int len, u8 * rsp, u8 rsp_type)
+int ext_sd_get_rsp(struct rts51x_chip *chip, int len, u8 *rsp, u8 rsp_type)
{
int retval, rsp_len;
u16 reg_addr;
diff --git a/drivers/staging/sbe-2t3e3/intr.c b/drivers/staging/sbe-2t3e3/intr.c
index 7ad1a8382037..1336aab11bdd 100644
--- a/drivers/staging/sbe-2t3e3/intr.c
+++ b/drivers/staging/sbe-2t3e3/intr.c
@@ -188,7 +188,7 @@ void dc_intr_rx(struct channel *sc)
}
if (sc->s.LOS) {
- error_mask &= ~(SBE_2T3E3_RX_DESC_DRIBBLING_BIT ||
+ error_mask &= ~(SBE_2T3E3_RX_DESC_DRIBBLING_BIT |
SBE_2T3E3_RX_DESC_MII_ERROR);
}
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
index 92bf16667d04..185b676d858a 100644
--- a/drivers/staging/sep/Kconfig
+++ b/drivers/staging/sep/Kconfig
@@ -3,7 +3,8 @@ config DX_SEP
depends on PCI
help
Discretix SEP driver; used for the security processor subsystem
- on bard the Intel Mobile Internet Device.
+ on board the Intel Mobile Internet Device and adds SEP availability
+ to the kernel crypto infrastructure
The driver's name is sep_driver.
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
index 628d5f919414..e48a7959289e 100644
--- a/drivers/staging/sep/Makefile
+++ b/drivers/staging/sep/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_DX_SEP) := sep_driver.o
-
+ccflags-y += -I$(srctree)/$(src)
+obj-$(CONFIG_DX_SEP) += sep_driver.o
+sep_driver-objs := sep_crypto.o sep_main.o
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
index 8f3b878ad8ae..3524d0cf84ba 100644
--- a/drivers/staging/sep/TODO
+++ b/drivers/staging/sep/TODO
@@ -1,4 +1,3 @@
Todo's so far (from Alan Cox)
-- Check whether it can be plugged into any of the kernel crypto API
- interfaces - Crypto API 'glue' is still not ready to submit
-- Clean up un-needed debug prints - Started to work on this
+- Clean up unused ioctls
+- Clean up unused fields in ioctl structures
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
new file mode 100644
index 000000000000..89b6814c70af
--- /dev/null
+++ b/drivers/staging/sep/sep_crypto.c
@@ -0,0 +1,4054 @@
+/*
+ *
+ * sep_crypto.c - Crypto interface structures
+ *
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2010.09.14 Upgrade to Medfield
+ * 2011.02.22 Enable Kernel Crypto
+ *
+ */
+
+/* #define DEBUG */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/hash.h>
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_dev.h"
+#include "sep_crypto.h"
+
+/* Globals for queuing */
+static spinlock_t queue_lock;
+static struct crypto_queue sep_queue;
+
+/* Declare of dequeuer */
+static void sep_dequeuer(void *data);
+
+/* TESTING */
+/**
+ * crypto_sep_dump_message - dump the message that is pending
+ * @sep: SEP device
+ * This will only print dump if DEBUG is set; it does
+ * follow kernel debug print enabling
+ */
+static void crypto_sep_dump_message(struct sep_device *sep, void *msg)
+{
+#if 0
+ u32 *p;
+ u32 *i;
+ int count;
+
+ p = sep->shared_addr;
+ i = (u32 *)msg;
+ for (count = 0; count < 10 * 4; count += 4)
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Word %d of the message is %x (local)%x\n",
+ current->pid, count/4, *p++, *i++);
+#endif
+}
+
+/**
+ * sep_do_callback
+ * @work: pointer to work_struct
+ * This is what is called by the queue; it is generic so that it
+ * can be used by any type of operation as each different callback
+ * function can use the data parameter in its own way
+ */
+static void sep_do_callback(struct work_struct *work)
+{
+ struct sep_work_struct *sep_work = container_of(work,
+ struct sep_work_struct, work);
+ if (sep_work != NULL) {
+ (sep_work->callback)(sep_work->data);
+ kfree(sep_work);
+ } else {
+ pr_debug("sep crypto: do callback - NULL container\n");
+ }
+}
+
+/**
+ * sep_submit_work
+ * @work_queue: pointer to struct_workqueue
+ * @funct: pointer to function to execute
+ * @data: pointer to data; function will know
+ * how to use it
+ * This is a generic API to submit something to
+ * the queue. The callback function will depend
+ * on what operation is to be done
+ */
+static int sep_submit_work(struct workqueue_struct *work_queue,
+ void(*funct)(void *),
+ void *data)
+{
+ struct sep_work_struct *sep_work;
+ int result;
+
+ sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
+
+ if (sep_work == NULL) {
+ pr_debug("sep crypto: cant allocate work structure\n");
+ return -ENOMEM;
+ }
+
+ sep_work->callback = funct;
+ sep_work->data = data;
+ INIT_WORK(&sep_work->work, sep_do_callback);
+ result = queue_work(work_queue, &sep_work->work);
+ if (!result) {
+ pr_debug("sep_crypto: queue_work failed\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * sep_alloc_sg_buf -
+ * @sep: pointer to struct sep_device
+ * @size: total size of area
+ * @block_size: minimum size of chunks
+ * each page is minimum or modulo this size
+ * @returns: pointer to struct scatterlist for new
+ * buffer
+ **/
+static struct scatterlist *sep_alloc_sg_buf(
+ struct sep_device *sep,
+ size_t size,
+ size_t block_size)
+{
+ u32 nbr_pages;
+ u32 ct1;
+ void *buf;
+ size_t current_size;
+ size_t real_page_size;
+
+ struct scatterlist *sg, *sg_temp;
+
+ if (size == 0)
+ return NULL;
+
+ dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
+
+ current_size = 0;
+ nbr_pages = 0;
+ real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
+ /**
+ * The size of each page must be modulo of the operation
+ * block size; increment by the modified page size until
+ * the total size is reached, then you have the number of
+ * pages
+ */
+ while (current_size < size) {
+ current_size += real_page_size;
+ nbr_pages += 1;
+ }
+
+ sg = kmalloc((sizeof(struct scatterlist) * nbr_pages), GFP_ATOMIC);
+ if (!sg) {
+ dev_warn(&sep->pdev->dev, "Cannot allocate page for new sg\n");
+ return NULL;
+ }
+
+ sg_init_table(sg, nbr_pages);
+
+ current_size = 0;
+ sg_temp = sg;
+ for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
+ buf = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (!buf) {
+ dev_warn(&sep->pdev->dev,
+ "Cannot allocate page for new buffer\n");
+ kfree(sg);
+ return NULL;
+ }
+
+ sg_set_buf(sg_temp, buf, real_page_size);
+ if ((size - current_size) > real_page_size) {
+ sg_temp->length = real_page_size;
+ current_size += real_page_size;
+ } else {
+ sg_temp->length = (size - current_size);
+ current_size = size;
+ }
+ sg_temp = sg_next(sg);
+ }
+ return sg;
+}
+
+/**
+ * sep_free_sg_buf -
+ * @sg: pointer to struct scatterlist; points to area to free
+ */
+static void sep_free_sg_buf(struct scatterlist *sg)
+{
+ struct scatterlist *sg_temp = sg;
+ while (sg_temp) {
+ free_page((unsigned long)sg_virt(sg_temp));
+ sg_temp = sg_next(sg_temp);
+ }
+ kfree(sg);
+}
+
+/**
+ * sep_copy_sg -
+ * @sep: pointer to struct sep_device
+ * @sg_src: pointer to struct scatterlist for source
+ * @sg_dst: pointer to struct scatterlist for destination
+ * @size: size (in bytes) of data to copy
+ *
+ * Copy data from one scatterlist to another; both must
+ * be the same size
+ */
+static void sep_copy_sg(
+ struct sep_device *sep,
+ struct scatterlist *sg_src,
+ struct scatterlist *sg_dst,
+ size_t size)
+{
+ u32 seg_size;
+ u32 in_offset, out_offset;
+
+ u32 count = 0;
+ struct scatterlist *sg_src_tmp = sg_src;
+ struct scatterlist *sg_dst_tmp = sg_dst;
+ in_offset = 0;
+ out_offset = 0;
+
+ dev_dbg(&sep->pdev->dev, "sep copy sg\n");
+
+ if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
+ return;
+
+ dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
+
+ while (count < size) {
+ if ((sg_src_tmp->length - in_offset) >
+ (sg_dst_tmp->length - out_offset))
+ seg_size = sg_dst_tmp->length - out_offset;
+ else
+ seg_size = sg_src_tmp->length - in_offset;
+
+ if (seg_size > (size - count))
+ seg_size = (size = count);
+
+ memcpy(sg_virt(sg_dst_tmp) + out_offset,
+ sg_virt(sg_src_tmp) + in_offset,
+ seg_size);
+
+ in_offset += seg_size;
+ out_offset += seg_size;
+ count += seg_size;
+
+ if (in_offset >= sg_src_tmp->length) {
+ sg_src_tmp = sg_next(sg_src_tmp);
+ in_offset = 0;
+ }
+
+ if (out_offset >= sg_dst_tmp->length) {
+ sg_dst_tmp = sg_next(sg_dst_tmp);
+ out_offset = 0;
+ }
+ }
+}
+
+/**
+ * sep_oddball_pages -
+ * @sep: pointer to struct sep_device
+ * @sg: pointer to struct scatterlist - buffer to check
+ * @size: total data size
+ * @blocksize: minimum block size; must be multiples of this size
+ * @to_copy: 1 means do copy, 0 means do not copy
+ * @new_sg: pointer to location to put pointer to new sg area
+ * @returns: 1 if new scatterlist is needed; 0 if not needed;
+ * error value if operation failed
+ *
+ * The SEP device requires all pages to be multiples of the
+ * minimum block size appropriate for the operation
+ * This function check all pages; if any are oddball sizes
+ * (not multiple of block sizes), it creates a new scatterlist.
+ * If the to_copy parameter is set to 1, then a scatter list
+ * copy is performed. The pointer to the new scatterlist is
+ * put into the address supplied by the new_sg parameter; if
+ * no new scatterlist is needed, then a NULL is put into
+ * the location at new_sg.
+ *
+ */
+static int sep_oddball_pages(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ size_t data_size,
+ u32 block_size,
+ struct scatterlist **new_sg,
+ u32 do_copy)
+{
+ struct scatterlist *sg_temp;
+ u32 flag;
+ u32 nbr_pages, page_count;
+
+ dev_dbg(&sep->pdev->dev, "sep oddball\n");
+ if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
+ return 0;
+
+ dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
+ flag = 0;
+ nbr_pages = 0;
+ page_count = 0;
+ sg_temp = sg;
+
+ while (sg_temp) {
+ nbr_pages += 1;
+ sg_temp = sg_next(sg_temp);
+ }
+
+ sg_temp = sg;
+ while ((sg_temp) && (flag == 0)) {
+ page_count += 1;
+ if (sg_temp->length % block_size)
+ flag = 1;
+ else
+ sg_temp = sg_next(sg_temp);
+ }
+
+ /* Do not process if last (or only) page is oddball */
+ if (nbr_pages == page_count)
+ flag = 0;
+
+ if (flag) {
+ dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
+ *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
+ if (*new_sg == NULL) {
+ dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
+ return -ENOMEM;
+ }
+
+ if (do_copy)
+ sep_copy_sg(sep, sg, *new_sg, data_size);
+
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * sep_copy_offset_sg -
+ * @sep: pointer to struct sep_device;
+ * @sg: pointer to struct scatterlist
+ * @offset: offset into scatterlist memory
+ * @dst: place to put data
+ * @len: length of data
+ * @returns: number of bytes copies
+ *
+ * This copies data from scatterlist buffer
+ * offset from beginning - it is needed for
+ * handling tail data in hash
+ */
+static size_t sep_copy_offset_sg(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ u32 offset,
+ void *dst,
+ u32 len)
+{
+ size_t page_start;
+ size_t page_end;
+ size_t offset_within_page;
+ size_t length_within_page;
+ size_t length_remaining;
+ size_t current_offset;
+
+ /* Find which page is beginning of segment */
+ page_start = 0;
+ page_end = sg->length;
+ while ((sg) && (offset > page_end)) {
+ page_start += sg->length;
+ sg = sg_next(sg);
+ if (sg)
+ page_end += sg->length;
+ }
+
+ if (sg == NULL)
+ return -ENOMEM;
+
+ offset_within_page = offset - page_start;
+ if ((sg->length - offset_within_page) >= len) {
+ /* All within this page */
+ memcpy(dst, sg_virt(sg) + offset_within_page, len);
+ return len;
+ } else {
+ /* Scattered multiple pages */
+ current_offset = 0;
+ length_remaining = len;
+ while ((sg) && (current_offset < len)) {
+ length_within_page = sg->length - offset_within_page;
+ if (length_within_page >= length_remaining) {
+ memcpy(dst+current_offset,
+ sg_virt(sg) + offset_within_page,
+ length_remaining);
+ length_remaining = 0;
+ current_offset = len;
+ } else {
+ memcpy(dst+current_offset,
+ sg_virt(sg) + offset_within_page,
+ length_within_page);
+ length_remaining -= length_within_page;
+ current_offset += length_within_page;
+ offset_within_page = 0;
+ sg = sg_next(sg);
+ }
+ }
+
+ if (sg == NULL)
+ return -ENOMEM;
+ }
+ return len;
+}
+
+/**
+ * partial_overlap -
+ * @src_ptr: source pointer
+ * @dst_ptr: destination pointer
+ * @nbytes: number of bytes
+ * @returns: 0 for success; -1 for failure
+ * We cannot have any partial overlap. Total overlap
+ * where src is the same as dst is okay
+ */
+static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
+{
+ /* Check for partial overlap */
+ if (src_ptr != dst_ptr) {
+ if (src_ptr < dst_ptr) {
+ if ((src_ptr + nbytes) > dst_ptr)
+ return -EINVAL;
+ } else {
+ if ((dst_ptr + nbytes) > src_ptr)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Debug - prints only if DEBUG is defined; follows kernel debug model */
+static void sep_dump(struct sep_device *sep, char *stg, void *start, int len)
+{
+#if 0
+ int ct1;
+ u8 *ptt;
+
+ dev_dbg(&sep->pdev->dev,
+ "Dump of %s starting at %08lx for %08x bytes\n",
+ stg, (unsigned long)start, len);
+ for (ct1 = 0; ct1 < len; ct1 += 1) {
+ ptt = (u8 *)(start + ct1);
+ dev_dbg(&sep->pdev->dev, "%02x ", *ptt);
+ if (ct1 % 16 == 15)
+ dev_dbg(&sep->pdev->dev, "\n");
+ }
+ dev_dbg(&sep->pdev->dev, "\n");
+#endif
+}
+
+/* Debug - prints only if DEBUG is defined; follows kernel debug model */
+static void sep_dump_sg(struct sep_device *sep, char *stg,
+ struct scatterlist *sg)
+{
+#if 0
+ int ct1, ct2;
+ u8 *ptt;
+
+ dev_dbg(&sep->pdev->dev, "Dump of scatterlist %s\n", stg);
+
+ ct1 = 0;
+ while (sg) {
+ dev_dbg(&sep->pdev->dev, "page %x\n size %x", ct1,
+ sg->length);
+ dev_dbg(&sep->pdev->dev, "phys addr is %lx",
+ (unsigned long)sg_phys(sg));
+ ptt = sg_virt(sg);
+ for (ct2 = 0; ct2 < sg->length; ct2 += 1) {
+ dev_dbg(&sep->pdev->dev, "byte %x is %02x\n",
+ ct2, (unsigned char)*(ptt + ct2));
+ }
+
+ ct1 += 1;
+ sg = sg_next(sg);
+ }
+ dev_dbg(&sep->pdev->dev, "\n");
+#endif
+}
+
+/* Debug - prints only if DEBUG is defined */
+static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
+
+ {
+ unsigned char *cptr;
+ struct sep_aes_internal_context *aes_internal;
+ struct sep_des_internal_context *des_internal;
+ int ct1;
+
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+
+ des_internal = (struct sep_des_internal_context *)
+ sctx->des_private_ctx.ctx_buf;
+ /* print vendor */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - vendor iv for DES\n");
+ cptr = (unsigned char *)des_internal->iv_context;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+
+ /* print walk */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - walk from kernel crypto iv for DES\n");
+ cptr = (unsigned char *)ta_ctx->walk.iv;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+ } else if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+
+ aes_internal = (struct sep_aes_internal_context *)
+ sctx->aes_private_ctx.cbuff;
+ /* print vendor */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - vendor iv for AES\n");
+ cptr = (unsigned char *)aes_internal->aes_ctx_iv;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+
+ /* print walk */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep - walk from kernel crypto iv for AES\n");
+ cptr = (unsigned char *)ta_ctx->walk.iv;
+ for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "%02x\n", *(cptr + ct1));
+ }
+}
+
+/**
+ * RFC2451: Weak key check
+ * Returns: 1 (weak), 0 (not weak)
+ */
+static int sep_weak_key(const u8 *key, unsigned int keylen)
+{
+ static const u8 parity[] = {
+ 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 3,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 0, 8,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 0,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 0, 8,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 0,
+ 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
+ 0, 8, 8, 0, 8, 0, 0, 8, 8,
+ 0, 0, 8, 0, 8, 8, 0,
+ 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
+ 8, 5, 0, 8, 0, 8, 8, 0, 0,
+ 8, 8, 0, 8, 0, 6, 8,
+ };
+
+ u32 n, w;
+
+ n = parity[key[0]]; n <<= 4;
+ n |= parity[key[1]]; n <<= 4;
+ n |= parity[key[2]]; n <<= 4;
+ n |= parity[key[3]]; n <<= 4;
+ n |= parity[key[4]]; n <<= 4;
+ n |= parity[key[5]]; n <<= 4;
+ n |= parity[key[6]]; n <<= 4;
+ n |= parity[key[7]];
+ w = 0x88888888L;
+
+ /* 1 in 10^10 keys passes this test */
+ if (!((n - (w >> 3)) & w)) {
+ if (n < 0x41415151) {
+ if (n < 0x31312121) {
+ if (n < 0x14141515) {
+ /* 01 01 01 01 01 01 01 01 */
+ if (n == 0x11111111)
+ goto weak;
+ /* 01 1F 01 1F 01 0E 01 0E */
+ if (n == 0x13131212)
+ goto weak;
+ } else {
+ /* 01 E0 01 E0 01 F1 01 F1 */
+ if (n == 0x14141515)
+ goto weak;
+ /* 01 FE 01 FE 01 FE 01 FE */
+ if (n == 0x16161616)
+ goto weak;
+ }
+ } else {
+ if (n < 0x34342525) {
+ /* 1F 01 1F 01 0E 01 0E 01 */
+ if (n == 0x31312121)
+ goto weak;
+ /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
+ if (n == 0x33332222)
+ goto weak;
+ } else {
+ /* 1F E0 1F E0 0E F1 0E F1 */
+ if (n == 0x34342525)
+ goto weak;
+ /* 1F FE 1F FE 0E FE 0E FE */
+ if (n == 0x36362626)
+ goto weak;
+ }
+ }
+ } else {
+ if (n < 0x61616161) {
+ if (n < 0x44445555) {
+ /* E0 01 E0 01 F1 01 F1 01 */
+ if (n == 0x41415151)
+ goto weak;
+ /* E0 1F E0 1F F1 0E F1 0E */
+ if (n == 0x43435252)
+ goto weak;
+ } else {
+ /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
+ if (n == 0x44445555)
+ goto weak;
+ /* E0 FE E0 FE F1 FE F1 FE */
+ if (n == 0x46465656)
+ goto weak;
+ }
+ } else {
+ if (n < 0x64646565) {
+ /* FE 01 FE 01 FE 01 FE 01 */
+ if (n == 0x61616161)
+ goto weak;
+ /* FE 1F FE 1F FE 0E FE 0E */
+ if (n == 0x63636262)
+ goto weak;
+ } else {
+ /* FE E0 FE E0 FE F1 FE F1 */
+ if (n == 0x64646565)
+ goto weak;
+ /* FE FE FE FE FE FE FE FE */
+ if (n == 0x66666666)
+ goto weak;
+ }
+ }
+ }
+ }
+ return 0;
+weak:
+ return 1;
+}
+/**
+ * sep_sg_nents
+ */
+static u32 sep_sg_nents(struct scatterlist *sg)
+{
+ u32 ct1 = 0;
+ while (sg) {
+ ct1 += 1;
+ sg = sg_next(sg);
+ }
+
+ return ct1;
+}
+
+/**
+ * sep_start_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @returns: offset to place for the next word in the message
+ * Set up pointer in message pool for new message
+ */
+static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
+{
+ u32 *word_ptr;
+ ta_ctx->msg_len_words = 2;
+ ta_ctx->msgptr = ta_ctx->msg;
+ memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+ ta_ctx->msgptr += sizeof(u32) * 2;
+ word_ptr = (u32 *)ta_ctx->msgptr;
+ *word_ptr = SEP_START_MSG_TOKEN;
+ return sizeof(u32) * 2;
+}
+
+/**
+ * sep_end_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @messages_offset: current message offset
+ * Returns: 0 for success; <0 otherwise
+ * End message; set length and CRC; and
+ * send interrupt to the SEP
+ */
+static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
+{
+ u32 *word_ptr;
+ /* Msg size goes into msg after token */
+ ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
+ word_ptr = (u32 *)ta_ctx->msgptr;
+ word_ptr += 1;
+ *word_ptr = ta_ctx->msg_len_words;
+
+ /* CRC (currently 0) goes at end of msg */
+ word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
+ *word_ptr = 0;
+}
+
+/**
+ * sep_start_inbound_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: offset to place for the next word in the message
+ * @returns: 0 for success; error value for failure
+ * Set up pointer in message pool for inbound message
+ */
+static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
+{
+ u32 *word_ptr;
+ u32 token;
+ u32 error = SEP_OK;
+
+ *msg_offset = sizeof(u32) * 2;
+ word_ptr = (u32 *)ta_ctx->msgptr;
+ token = *word_ptr;
+ ta_ctx->msg_len_words = *(word_ptr + 1);
+
+ if (token != SEP_START_MSG_TOKEN) {
+ error = SEP_INVALID_START;
+ goto end_function;
+ }
+
+end_function:
+
+ return error;
+}
+
+/**
+ * sep_write_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @in_addr: pointer to start of parameter
+ * @size: size of parameter to copy (in bytes)
+ * @max_size: size to move up offset; SEP mesg is in word sizes
+ * @msg_offset: pointer to current offset (is updated)
+ * @byte_array: flag ti indicate wheter endian must be changed
+ * Copies data into the message area from caller
+ */
+static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
+ u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
+{
+ u32 *word_ptr;
+ void *void_ptr;
+ void_ptr = ta_ctx->msgptr + *msg_offset;
+ word_ptr = (u32 *)void_ptr;
+ memcpy(void_ptr, in_addr, size);
+ *msg_offset += max_size;
+
+ /* Do we need to manipulate endian? */
+ if (byte_array) {
+ u32 i;
+ for (i = 0; i < ((size + 3) / 4); i += 1)
+ *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
+ }
+}
+
+/**
+ * sep_make_header
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: pointer to current offset (is updated)
+ * @op_code: op code to put into message
+ * Puts op code into message and updates offset
+ */
+static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
+ u32 op_code)
+{
+ u32 *word_ptr;
+
+ *msg_offset = sep_start_msg(ta_ctx);
+ word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
+ *word_ptr = op_code;
+ *msg_offset += sizeof(u32);
+}
+
+
+
+/**
+ * sep_read_msg -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @in_addr: pointer to start of parameter
+ * @size: size of parameter to copy (in bytes)
+ * @max_size: size to move up offset; SEP mesg is in word sizes
+ * @msg_offset: pointer to current offset (is updated)
+ * @byte_array: flag ti indicate wheter endian must be changed
+ * Copies data out of the message area to caller
+ */
+static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
+ u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
+{
+ u32 *word_ptr;
+ void *void_ptr;
+ void_ptr = ta_ctx->msgptr + *msg_offset;
+ word_ptr = (u32 *)void_ptr;
+
+ /* Do we need to manipulate endian? */
+ if (byte_array) {
+ u32 i;
+ for (i = 0; i < ((size + 3) / 4); i += 1)
+ *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
+ }
+
+ memcpy(in_addr, void_ptr, size);
+ *msg_offset += max_size;
+}
+
+/**
+ * sep_verify_op -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @op_code: expected op_code
+ * @msg_offset: pointer to current offset (is updated)
+ * @returns: 0 for success; error for failure
+ */
+static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
+ u32 *msg_offset)
+{
+ u32 error;
+ u32 in_ary[2];
+
+ struct sep_device *sep = ta_ctx->sep_used;
+
+ dev_dbg(&sep->pdev->dev, "dumping return message\n");
+ error = sep_start_inbound_msg(ta_ctx, msg_offset);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "sep_start_inbound_msg error\n");
+ return error;
+ }
+
+ sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
+ msg_offset, 0);
+
+ if (in_ary[0] != op_code) {
+ dev_warn(&sep->pdev->dev,
+ "sep got back wrong opcode\n");
+ dev_warn(&sep->pdev->dev,
+ "got back %x; expected %x\n",
+ in_ary[0], op_code);
+ return SEP_WRONG_OPCODE;
+ }
+
+ if (in_ary[1] != SEP_OK) {
+ dev_warn(&sep->pdev->dev,
+ "sep execution error\n");
+ dev_warn(&sep->pdev->dev,
+ "got back %x; expected %x\n",
+ in_ary[1], SEP_OK);
+ return in_ary[0];
+ }
+
+return 0;
+}
+
+/**
+ * sep_read_context -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: point to current place in SEP msg; is updated
+ * @dst: pointer to place to put the context
+ * @len: size of the context structure (differs for crypro/hash)
+ * This function reads the context from the msg area
+ * There is a special way the vendor needs to have the maximum
+ * length calculated so that the msg_offset is updated properly;
+ * it skips over some words in the msg area depending on the size
+ * of the context
+ */
+static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
+ void *dst, u32 len)
+{
+ u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
+ sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
+}
+
+/**
+ * sep_write_context -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * @msg_offset: point to current place in SEP msg; is updated
+ * @src: pointer to the current context
+ * @len: size of the context structure (differs for crypro/hash)
+ * This function writes the context to the msg area
+ * There is a special way the vendor needs to have the maximum
+ * length calculated so that the msg_offset is updated properly;
+ * it skips over some words in the msg area depending on the size
+ * of the context
+ */
+static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
+ void *src, u32 len)
+{
+ u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
+ sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
+}
+
+/**
+ * sep_clear_out -
+ * @ta_ctx: pointer to struct this_task_ctx
+ * Clear out crypto related values in sep device structure
+ * to enable device to be used by anyone; either kernel
+ * crypto or userspace app via middleware
+ */
+static void sep_clear_out(struct this_task_ctx *ta_ctx)
+{
+ if (ta_ctx->src_sg_hold) {
+ sep_free_sg_buf(ta_ctx->src_sg_hold);
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ if (ta_ctx->dst_sg_hold) {
+ sep_free_sg_buf(ta_ctx->dst_sg_hold);
+ ta_ctx->dst_sg_hold = NULL;
+ }
+
+ ta_ctx->src_sg = NULL;
+ ta_ctx->dst_sg = NULL;
+
+ sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
+
+ if (ta_ctx->i_own_sep) {
+ /**
+ * The following unlocks the sep and makes it available
+ * to any other application
+ * First, null out crypto entries in sep before relesing it
+ */
+ ta_ctx->sep_used->current_hash_req = NULL;
+ ta_ctx->sep_used->current_cypher_req = NULL;
+ ta_ctx->sep_used->current_request = 0;
+ ta_ctx->sep_used->current_hash_stage = 0;
+ ta_ctx->sep_used->ta_ctx = NULL;
+ ta_ctx->sep_used->in_kernel = 0;
+
+ ta_ctx->call_status.status = 0;
+
+ /* Remove anything confidentail */
+ memset(ta_ctx->sep_used->shared_addr, 0,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
+
+#ifdef SEP_ENABLE_RUNTIME_PM
+ ta_ctx->sep_used->in_use = 0;
+ pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
+ pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
+#endif
+
+ clear_bit(SEP_WORKING_LOCK_BIT,
+ &ta_ctx->sep_used->in_use_flags);
+ ta_ctx->sep_used->pid_doing_transaction = 0;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "[PID%d] waking up next transaction\n",
+ current->pid);
+
+ clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
+ &ta_ctx->sep_used->in_use_flags);
+ wake_up(&ta_ctx->sep_used->event_transactions);
+
+ ta_ctx->i_own_sep = 0;
+ }
+}
+
+/**
+ * Release crypto infrastructure from EINPROGRESS and
+ * clear sep_dev so that SEP is available to anyone
+ */
+static void sep_crypto_release(struct sep_system_ctx *sctx,
+ struct this_task_ctx *ta_ctx, u32 error)
+{
+ struct ahash_request *hash_req = ta_ctx->current_hash_req;
+ struct ablkcipher_request *cypher_req =
+ ta_ctx->current_cypher_req;
+ struct sep_device *sep = ta_ctx->sep_used;
+
+ sep_clear_out(ta_ctx);
+
+ /**
+ * This may not yet exist depending when we
+ * chose to bail out. If it does exist, set
+ * it to 1
+ */
+ if (ta_ctx->are_we_done_yet != NULL)
+ *ta_ctx->are_we_done_yet = 1;
+
+ if (cypher_req != NULL) {
+ if ((sctx->key_sent == 1) ||
+ ((error != 0) && (error != -EINPROGRESS))) {
+ if (cypher_req->base.complete == NULL) {
+ dev_dbg(&sep->pdev->dev,
+ "release is null for cypher!");
+ } else {
+ cypher_req->base.complete(
+ &cypher_req->base, error);
+ }
+ }
+ }
+
+ if (hash_req != NULL) {
+ if (hash_req->base.complete == NULL) {
+ dev_dbg(&sep->pdev->dev,
+ "release is null for hash!");
+ } else {
+ hash_req->base.complete(
+ &hash_req->base, error);
+ }
+ }
+}
+
+/**
+ * This is where we grab the sep itself and tell it to do something.
+ * It will sleep if the sep is currently busy
+ * and it will return 0 if sep is now ours; error value if there
+ * were problems
+ */
+static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
+{
+ struct sep_device *sep = ta_ctx->sep_used;
+ int result;
+ struct sep_msgarea_hdr *my_msg_header;
+
+ my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
+
+ /* add to status queue */
+ ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
+ ta_ctx->nbytes, current->pid,
+ current->comm, sizeof(current->comm));
+
+ if (!ta_ctx->queue_elem) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
+ " status error\n", current->pid);
+ return -EINVAL;
+ }
+
+ /* get the device; this can sleep */
+ result = sep_wait_transaction(sep);
+ if (result)
+ return result;
+
+ if (sep_dev->power_save_setup == 1)
+ pm_runtime_get_sync(&sep_dev->pdev->dev);
+
+ /* Copy in the message */
+ memcpy(sep->shared_addr, ta_ctx->msg,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /* Copy in the dcb information if there is any */
+ if (ta_ctx->dcb_region) {
+ result = sep_activate_dcb_dmatables_context(sep,
+ &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
+ ta_ctx->dma_ctx);
+ if (result)
+ return result;
+ }
+
+ /* Mark the device so we know how to finish the job in the tasklet */
+ if (ta_ctx->current_hash_req)
+ sep->current_hash_req = ta_ctx->current_hash_req;
+ else
+ sep->current_cypher_req = ta_ctx->current_cypher_req;
+
+ sep->current_request = ta_ctx->current_request;
+ sep->current_hash_stage = ta_ctx->current_hash_stage;
+ sep->ta_ctx = ta_ctx;
+ sep->in_kernel = 1;
+ ta_ctx->i_own_sep = 1;
+
+ /* need to set bit first to avoid race condition with interrupt */
+ set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
+
+ result = sep_send_command_handler(sep);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
+ current->pid);
+
+ if (!result)
+ dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
+ current->pid);
+ else {
+ dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
+ current->pid);
+ clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &ta_ctx->call_status.status);
+ }
+
+ return result;
+}
+
+/**
+ * This function sets things up for a crypto data block process
+ * This does all preparation, but does not try to grab the
+ * sep
+ * @req: pointer to struct ablkcipher_request
+ * returns: 0 if all went well, non zero if error
+ */
+static int sep_crypto_block_data(struct ablkcipher_request *req)
+{
+
+ int int_error;
+ u32 msg_offset;
+ static u32 msg[10];
+ void *src_ptr;
+ void *dst_ptr;
+
+ static char small_buf[100];
+ ssize_t copy_result;
+ int result;
+
+ struct scatterlist *new_sg;
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+
+ struct sep_des_internal_context *des_internal;
+ struct sep_aes_internal_context *aes_internal;
+
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ /* start the walk on scatterlists */
+ ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
+ req->nbytes);
+
+ int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
+ int_error);
+ return -ENOMEM;
+ }
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "crypto block: src is %lx dst is %lx\n",
+ (unsigned long)req->src, (unsigned long)req->dst);
+
+ /* Make sure all pages are even block */
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
+ req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page eerror\n");
+ return -ENOMEM;
+ } else if (int_error == 1) {
+ ta_ctx->src_sg = new_sg;
+ ta_ctx->src_sg_hold = new_sg;
+ } else {
+ ta_ctx->src_sg = req->src;
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
+ req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
+ int_error);
+ return -ENOMEM;
+ } else if (int_error == 1) {
+ ta_ctx->dst_sg = new_sg;
+ ta_ctx->dst_sg_hold = new_sg;
+ } else {
+ ta_ctx->dst_sg = req->dst;
+ ta_ctx->dst_sg_hold = NULL;
+ }
+
+ /* set nbytes for queue status */
+ ta_ctx->nbytes = req->nbytes;
+
+ /* Key already done; this is for data */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
+
+ sep_dump_sg(ta_ctx->sep_used,
+ "block sg in", ta_ctx->src_sg);
+
+ /* check for valid data and proper spacing */
+ src_ptr = sg_virt(ta_ctx->src_sg);
+ dst_ptr = sg_virt(ta_ctx->dst_sg);
+
+ if (!src_ptr || !dst_ptr ||
+ (ta_ctx->current_cypher_req->nbytes %
+ crypto_ablkcipher_blocksize(tfm))) {
+
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cipher block size odd\n");
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cipher block size is %x\n",
+ crypto_ablkcipher_blocksize(tfm));
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cipher data size is %x\n",
+ ta_ctx->current_cypher_req->nbytes);
+ return -EINVAL;
+ }
+
+ if (partial_overlap(src_ptr, dst_ptr,
+ ta_ctx->current_cypher_req->nbytes)) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "block partial overlap\n");
+ return -EINVAL;
+ }
+
+ /* Put together the message */
+ sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
+
+ /* If des, and size is 1 block, put directly in msg */
+ if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
+ (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "writing out one block des\n");
+
+ copy_result = sg_copy_to_buffer(
+ ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
+ small_buf, crypto_ablkcipher_blocksize(tfm));
+
+ if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "des block copy faild\n");
+ return -ENOMEM;
+ }
+
+ /* Put data into message */
+ sep_write_msg(ta_ctx, small_buf,
+ crypto_ablkcipher_blocksize(tfm),
+ crypto_ablkcipher_blocksize(tfm) * 2,
+ &msg_offset, 1);
+
+ /* Put size into message */
+ sep_write_msg(ta_ctx, &req->nbytes,
+ sizeof(u32), sizeof(u32), &msg_offset, 0);
+ } else {
+ /* Otherwise, fill out dma tables */
+ ta_ctx->dcb_input_data.app_in_address = src_ptr;
+ ta_ctx->dcb_input_data.data_in_size = req->nbytes;
+ ta_ctx->dcb_input_data.app_out_address = dst_ptr;
+ ta_ctx->dcb_input_data.block_size =
+ crypto_ablkcipher_blocksize(tfm);
+ ta_ctx->dcb_input_data.tail_block_size = 0;
+ ta_ctx->dcb_input_data.is_applet = 0;
+ ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
+ ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
+
+ result = sep_create_dcb_dmatables_context_kernel(
+ ta_ctx->sep_used,
+ &ta_ctx->dcb_region,
+ &ta_ctx->dmatables_region,
+ &ta_ctx->dma_ctx,
+ &ta_ctx->dcb_input_data,
+ 1);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "crypto dma table create failed\n");
+ return -EINVAL;
+ }
+
+ /* Portion of msg is nulled (no data) */
+ msg[0] = (u32)0;
+ msg[1] = (u32)0;
+ msg[2] = (u32)0;
+ msg[3] = (u32)0;
+ msg[4] = (u32)0;
+ sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
+ sizeof(u32) * 5, &msg_offset, 0);
+ }
+
+ /**
+ * Before we write the message, we need to overwrite the
+ * vendor's IV with the one from our own ablkcipher walk
+ * iv because this is needed for dm-crypt
+ */
+ sep_dump_ivs(req, "sending data block to sep\n");
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "overwrite vendor iv on DES\n");
+ des_internal = (struct sep_des_internal_context *)
+ sctx->des_private_ctx.ctx_buf;
+ memcpy((void *)des_internal->iv_context,
+ ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
+ } else if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "overwrite vendor iv on AES\n");
+ aes_internal = (struct sep_aes_internal_context *)
+ sctx->aes_private_ctx.cbuff;
+ memcpy((void *)aes_internal->aes_ctx_iv,
+ ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
+ }
+
+ /* Write context into message */
+ if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
+ sep_write_context(ta_ctx, &msg_offset,
+ &sctx->des_private_ctx,
+ sizeof(struct sep_des_private_context));
+ sep_dump(ta_ctx->sep_used, "ctx to block des",
+ &sctx->des_private_ctx, 40);
+ } else {
+ sep_write_context(ta_ctx, &msg_offset,
+ &sctx->aes_private_ctx,
+ sizeof(struct sep_aes_private_context));
+ sep_dump(ta_ctx->sep_used, "ctx to block aes",
+ &sctx->aes_private_ctx, 20);
+ }
+
+ /* conclude message */
+ sep_end_msg(ta_ctx, msg_offset);
+
+ /* Parent (caller) is now ready to tell the sep to do ahead */
+ return 0;
+}
+
+
+/**
+ * This function sets things up for a crypto key submit process
+ * This does all preparation, but does not try to grab the
+ * sep
+ * @req: pointer to struct ablkcipher_request
+ * returns: 0 if all went well, non zero if error
+ */
+static int sep_crypto_send_key(struct ablkcipher_request *req)
+{
+
+ int int_error;
+ u32 msg_offset;
+ static u32 msg[10];
+
+ u32 max_length;
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
+
+ /* start the walk on scatterlists */
+ ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep crypto block data size of %x\n", req->nbytes);
+
+ int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
+ int_error);
+ return -ENOMEM;
+ }
+
+ /* check iv */
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+ if (!ta_ctx->walk.iv) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
+ return -EINVAL;
+ }
+
+ memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
+ sep_dump(ta_ctx->sep_used, "iv",
+ ta_ctx->iv, SEP_DES_IV_SIZE_BYTES);
+ }
+
+ if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+ if (!ta_ctx->walk.iv) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
+ return -EINVAL;
+ }
+
+ memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
+ sep_dump(ta_ctx->sep_used, "iv",
+ ta_ctx->iv, SEP_AES_IV_SIZE_BYTES);
+ }
+
+ /* put together message to SEP */
+ /* Start with op code */
+ sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
+
+ /* now deal with IV */
+ if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
+ if (ta_ctx->des_opmode == SEP_DES_CBC) {
+ sep_write_msg(ta_ctx, ta_ctx->iv,
+ SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
+ &msg_offset, 1);
+ sep_dump(ta_ctx->sep_used, "initial IV",
+ ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
+ } else {
+ /* Skip if ECB */
+ msg_offset += 4 * sizeof(u32);
+ }
+ } else {
+ max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
+ sizeof(u32)) * sizeof(u32);
+ if (ta_ctx->aes_opmode == SEP_AES_CBC) {
+ sep_write_msg(ta_ctx, ta_ctx->iv,
+ SEP_AES_IV_SIZE_BYTES, max_length,
+ &msg_offset, 1);
+ sep_dump(ta_ctx->sep_used, "initial IV",
+ ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
+ } else {
+ /* Skip if ECB */
+ msg_offset += max_length;
+ }
+ }
+
+ /* load the key */
+ if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
+ sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
+ sizeof(u32) * 8, sizeof(u32) * 8,
+ &msg_offset, 1);
+
+ msg[0] = (u32)sctx->des_nbr_keys;
+ msg[1] = (u32)ta_ctx->des_encmode;
+ msg[2] = (u32)ta_ctx->des_opmode;
+
+ sep_write_msg(ta_ctx, (void *)msg,
+ sizeof(u32) * 3, sizeof(u32) * 3,
+ &msg_offset, 0);
+ } else {
+ sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
+ sctx->keylen,
+ SEP_AES_MAX_KEY_SIZE_BYTES,
+ &msg_offset, 1);
+
+ msg[0] = (u32)sctx->aes_key_size;
+ msg[1] = (u32)ta_ctx->aes_encmode;
+ msg[2] = (u32)ta_ctx->aes_opmode;
+ msg[3] = (u32)0; /* Secret key is not used */
+ sep_write_msg(ta_ctx, (void *)msg,
+ sizeof(u32) * 4, sizeof(u32) * 4,
+ &msg_offset, 0);
+ }
+
+ /* conclude message */
+ sep_end_msg(ta_ctx, msg_offset);
+
+ /* Parent (caller) is now ready to tell the sep to do ahead */
+ return 0;
+}
+
+
+/* This needs to be run as a work queue as it can be put asleep */
+static void sep_crypto_block(void *data)
+{
+ unsigned long end_time;
+
+ int result;
+
+ struct ablkcipher_request *req;
+ struct this_task_ctx *ta_ctx;
+ struct crypto_ablkcipher *tfm;
+ struct sep_system_ctx *sctx;
+ int are_we_done_yet;
+
+ req = (struct ablkcipher_request *)data;
+ ta_ctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ pr_debug("sep_crypto_block\n");
+ pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
+ tfm, sctx, ta_ctx);
+ pr_debug("key_sent is %d\n", sctx->key_sent);
+
+ /* do we need to send the key */
+ if (sctx->key_sent == 0) {
+ are_we_done_yet = 0;
+ result = sep_crypto_send_key(req); /* prep to send key */
+ if (result != 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "could not prep key %x\n", result);
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_crypto_take_sep for key send failed\n");
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) &&
+ (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "Send key job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* Set the key sent variable so this can be skipped later */
+ sctx->key_sent = 1;
+ }
+
+ /* Key sent (or maybe not if we did not have to), now send block */
+ are_we_done_yet = 0;
+
+ result = sep_crypto_block_data(req);
+
+ if (result != 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "could prep not send block %x\n", result);
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_crypto_take_sep for block send failed\n");
+ sep_crypto_release(sctx, ta_ctx, result);
+ return;
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "Send block job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* That's it; entire thing done, get out of queue */
+
+ pr_debug("crypto_block leaving\n");
+ pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
+}
+
+/**
+ * Post operation (after interrupt) for crypto block
+ */
+static u32 crypto_post_op(struct sep_device *sep)
+{
+ /* HERE */
+ u32 u32_error;
+ u32 msg_offset;
+
+ ssize_t copy_result;
+ static char small_buf[100];
+
+ struct ablkcipher_request *req;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ struct crypto_ablkcipher *tfm;
+
+ struct sep_des_internal_context *des_internal;
+ struct sep_aes_internal_context *aes_internal;
+
+ if (!sep->current_cypher_req)
+ return -EINVAL;
+
+ /* hold req since we need to submit work after clearing sep */
+ req = sep->current_cypher_req;
+
+ ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
+ tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
+ sctx = crypto_ablkcipher_ctx(tfm);
+
+ pr_debug("crypto_post op\n");
+ pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
+ sctx->key_sent, tfm, sctx, ta_ctx);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
+ crypto_sep_dump_message(ta_ctx->sep_used, ta_ctx->msg);
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /* Is this the result of performing init (key to SEP */
+ if (sctx->key_sent == 0) {
+
+ /* Did SEP do it okay */
+ u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
+ &msg_offset);
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "aes init error %x\n", u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Read Context */
+ if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->des_private_ctx,
+ sizeof(struct sep_des_private_context));
+
+ sep_dump(ta_ctx->sep_used, "ctx init des",
+ &sctx->des_private_ctx, 40);
+ } else {
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->aes_private_ctx,
+ sizeof(struct sep_aes_private_context));
+
+ sep_dump(ta_ctx->sep_used, "ctx init aes",
+ &sctx->aes_private_ctx, 20);
+ }
+
+ sep_dump_ivs(req, "after sending key to sep\n");
+
+ /* key sent went okay; release sep, and set are_we_done_yet */
+ sctx->key_sent = 1;
+ sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
+
+ } else {
+
+ /**
+ * This is the result of a block request
+ */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "crypto_post_op block response\n");
+
+ u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep block error %x\n", u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return -EINVAL;
+ }
+
+ if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "post op for DES\n");
+
+ /* special case for 1 block des */
+ if (sep->current_cypher_req->nbytes ==
+ crypto_ablkcipher_blocksize(tfm)) {
+
+ sep_read_msg(ta_ctx, small_buf,
+ crypto_ablkcipher_blocksize(tfm),
+ crypto_ablkcipher_blocksize(tfm) * 2,
+ &msg_offset, 1);
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "reading in block des\n");
+
+ copy_result = sg_copy_from_buffer(
+ ta_ctx->dst_sg,
+ sep_sg_nents(ta_ctx->dst_sg),
+ small_buf,
+ crypto_ablkcipher_blocksize(tfm));
+
+ if (copy_result !=
+ crypto_ablkcipher_blocksize(tfm)) {
+
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "des block copy faild\n");
+ sep_crypto_release(sctx, ta_ctx,
+ -ENOMEM);
+ return -ENOMEM;
+ }
+ }
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->des_private_ctx,
+ sizeof(struct sep_des_private_context));
+ } else {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "post op for AES\n");
+
+ /* Skip the MAC Output */
+ msg_offset += (sizeof(u32) * 4);
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->aes_private_ctx,
+ sizeof(struct sep_aes_private_context));
+ }
+
+ sep_dump_sg(ta_ctx->sep_used,
+ "block sg out", ta_ctx->dst_sg);
+
+ /* Copy to correct sg if this block had oddball pages */
+ if (ta_ctx->dst_sg_hold)
+ sep_copy_sg(ta_ctx->sep_used,
+ ta_ctx->dst_sg,
+ ta_ctx->current_cypher_req->dst,
+ ta_ctx->current_cypher_req->nbytes);
+
+ /**
+ * Copy the iv's back to the walk.iv
+ * This is required for dm_crypt
+ */
+ sep_dump_ivs(req, "got data block from sep\n");
+ if ((ta_ctx->current_request == DES_CBC) &&
+ (ta_ctx->des_opmode == SEP_DES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "returning result iv to walk on DES\n");
+ des_internal = (struct sep_des_internal_context *)
+ sctx->des_private_ctx.ctx_buf;
+ memcpy(ta_ctx->walk.iv,
+ (void *)des_internal->iv_context,
+ crypto_ablkcipher_ivsize(tfm));
+ } else if ((ta_ctx->current_request == AES_CBC) &&
+ (ta_ctx->aes_opmode == SEP_AES_CBC)) {
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "returning result iv to walk on AES\n");
+ aes_internal = (struct sep_aes_internal_context *)
+ sctx->aes_private_ctx.cbuff;
+ memcpy(ta_ctx->walk.iv,
+ (void *)aes_internal->aes_ctx_iv,
+ crypto_ablkcipher_ivsize(tfm));
+ }
+
+ /* finished, release everything */
+ sep_crypto_release(sctx, ta_ctx, 0);
+ }
+ pr_debug("crypto_post_op done\n");
+ pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
+ sctx->key_sent, tfm, sctx, ta_ctx);
+
+ return 0;
+}
+
+static u32 hash_init_post_op(struct sep_device *sep)
+{
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash init post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
+ u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+static u32 hash_update_post_op(struct sep_device *sep)
+{
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash update post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
+ u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Read Context */
+ sep_read_context(ta_ctx, &msg_offset,
+ &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ /**
+ * Following is only for finup; if we just completd the
+ * data portion of finup, we now need to kick off the
+ * finish portion of finup.
+ */
+
+ if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
+
+ /* first reset stage to HASH_FINUP_FINISH */
+ ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
+
+ /* now enqueue the finish operation */
+ spin_lock_irq(&queue_lock);
+ u32_error = crypto_enqueue_request(&sep_queue,
+ &ta_ctx->sep_used->current_hash_req->base);
+ spin_unlock_irq(&queue_lock);
+
+ if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "spe cypher post op cant queue\n");
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* schedule the data send */
+ u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "cant submit work sep_crypto_block\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return -EINVAL;
+ }
+ }
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+static u32 hash_final_post_op(struct sep_device *sep)
+{
+ int max_length;
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash final post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
+ u32_error);
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Grab the result */
+ if (ta_ctx->current_hash_req->result == NULL) {
+ /* Oops, null buffer; error out here */
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash finish null buffer\n");
+ sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
+ return -ENOMEM;
+ }
+
+ max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
+ sizeof(u32)) * sizeof(u32);
+
+ sep_read_msg(ta_ctx,
+ ta_ctx->current_hash_req->result,
+ crypto_ahash_digestsize(tfm), max_length,
+ &msg_offset, 0);
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+static u32 hash_digest_post_op(struct sep_device *sep)
+{
+ int max_length;
+ u32 u32_error;
+ u32 msg_offset;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
+ struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash digest post op\n");
+
+ /* first bring msg from shared area to local area */
+ memcpy(ta_ctx->msg, sep->shared_addr,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
+ &msg_offset);
+
+ if (u32_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash digest finish error %x\n", u32_error);
+
+ sep_crypto_release(sctx, ta_ctx, u32_error);
+ return u32_error;
+ }
+
+ /* Grab the result */
+ if (ta_ctx->current_hash_req->result == NULL) {
+ /* Oops, null buffer; error out here */
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash digest finish null buffer\n");
+ sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
+ return -ENOMEM;
+ }
+
+ max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
+ sizeof(u32)) * sizeof(u32);
+
+ sep_read_msg(ta_ctx,
+ ta_ctx->current_hash_req->result,
+ crypto_ahash_digestsize(tfm), max_length,
+ &msg_offset, 0);
+
+ /* Signal to crypto infrastructure and clear out */
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash digest finish post op done\n");
+
+ sep_crypto_release(sctx, ta_ctx, 0);
+ return 0;
+}
+
+/**
+ * The sep_finish function is the function that is schedule (via tasket)
+ * by the interrupt service routine when the SEP sends and interrupt
+ * This is only called by the interrupt handler as a tasklet.
+ */
+static void sep_finish(unsigned long data)
+{
+ struct sep_device *sep_dev;
+ int res;
+
+ res = 0;
+
+ if (data == 0) {
+ pr_debug("sep_finish called with null data\n");
+ return;
+ }
+
+ sep_dev = (struct sep_device *)data;
+ if (sep_dev == NULL) {
+ pr_debug("sep_finish; sep_dev is NULL\n");
+ return;
+ }
+
+ if (sep_dev->in_kernel == (u32)0) {
+ dev_warn(&sep_dev->pdev->dev,
+ "sep_finish; not in kernel operation\n");
+ return;
+ }
+
+ /* Did we really do a sep command prior to this? */
+ if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &sep_dev->ta_ctx->call_status.status)) {
+
+ dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
+ current->pid);
+ return;
+ }
+
+ if (sep_dev->send_ct != sep_dev->reply_ct) {
+ dev_warn(&sep_dev->pdev->dev,
+ "[PID%d] poll; no message came back\n",
+ current->pid);
+ return;
+ }
+
+ /* Check for error (In case time ran out) */
+ if ((res != 0x0) && (res != 0x8)) {
+ dev_warn(&sep_dev->pdev->dev,
+ "[PID%d] poll; poll error GPR3 is %x\n",
+ current->pid, res);
+ return;
+ }
+
+ /* What kind of interrupt from sep was this? */
+ res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
+ current->pid, res);
+
+ /* Print request? */
+ if ((res >> 30) & 0x1) {
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
+ current->pid);
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
+ current->pid,
+ (char *)(sep_dev->shared_addr +
+ SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
+ return;
+ }
+
+ /* Request for daemon (not currently in POR)? */
+ if (res >> 31) {
+ dev_dbg(&sep_dev->pdev->dev,
+ "[PID%d] sep request; ignoring\n",
+ current->pid);
+ return;
+ }
+
+ /* If we got here, then we have a replay to a sep command */
+
+ dev_dbg(&sep_dev->pdev->dev,
+ "[PID%d] sep reply to command; processing request: %x\n",
+ current->pid, sep_dev->current_request);
+
+ switch (sep_dev->current_request) {
+ case AES_CBC:
+ case AES_ECB:
+ case DES_CBC:
+ case DES_ECB:
+ res = crypto_post_op(sep_dev);
+ break;
+ case SHA1:
+ case MD5:
+ case SHA224:
+ case SHA256:
+ switch (sep_dev->current_hash_stage) {
+ case HASH_INIT:
+ res = hash_init_post_op(sep_dev);
+ break;
+ case HASH_UPDATE:
+ case HASH_FINUP_DATA:
+ res = hash_update_post_op(sep_dev);
+ break;
+ case HASH_FINUP_FINISH:
+ case HASH_FINISH:
+ res = hash_final_post_op(sep_dev);
+ break;
+ case HASH_DIGEST:
+ res = hash_digest_post_op(sep_dev);
+ break;
+ default:
+ pr_debug("sep - invalid stage for hash finish\n");
+ }
+ break;
+ default:
+ pr_debug("sep - invalid request for finish\n");
+ }
+
+ if (res)
+ pr_debug("sep - finish returned error %x\n", res);
+}
+
+static int sep_hash_cra_init(struct crypto_tfm *tfm)
+ {
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ pr_debug("sep_hash_cra_init name is %s\n", alg_name);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct this_task_ctx));
+ return 0;
+ }
+
+static void sep_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ pr_debug("sep_hash_cra_exit\n");
+}
+
+static void sep_hash_init(void *data)
+{
+ u32 msg_offset;
+ int result;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ unsigned long end_time;
+ int are_we_done_yet;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_init\n");
+ ta_ctx->current_hash_stage = HASH_INIT;
+ /* opcode and mode */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
+ sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
+ sizeof(u32), sizeof(u32), &msg_offset, 0);
+ sep_end_msg(ta_ctx, msg_offset);
+
+ are_we_done_yet = 0;
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_init take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash init never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+static void sep_hash_update(void *data)
+{
+ int int_error;
+ u32 msg_offset;
+ u32 len;
+ struct sep_hash_internal_context *int_ctx;
+ u32 block_size;
+ u32 head_len;
+ u32 tail_len;
+ int are_we_done_yet;
+
+ static u32 msg[10];
+ static char small_buf[100];
+ void *src_ptr;
+ struct scatterlist *new_sg;
+ ssize_t copy_result;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ unsigned long end_time;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ /* length for queue status */
+ ta_ctx->nbytes = req->nbytes;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_update\n");
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+ len = req->nbytes;
+
+ block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ tail_len = req->nbytes % block_size;
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
+
+ /* Compute header/tail sizes */
+ int_ctx = (struct sep_hash_internal_context *)&sctx->
+ hash_private_ctx.internal_context;
+ head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
+ tail_len = (req->nbytes - head_len) % block_size;
+
+ /* Make sure all pages are even block */
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
+ req->nbytes,
+ block_size, &new_sg, 1);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "oddball pages error in crash update\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ } else if (int_error == 1) {
+ ta_ctx->src_sg = new_sg;
+ ta_ctx->src_sg_hold = new_sg;
+ } else {
+ ta_ctx->src_sg = req->src;
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ src_ptr = sg_virt(ta_ctx->src_sg);
+
+ if ((!req->nbytes) || (!ta_ctx->src_sg)) {
+ /* null data */
+ src_ptr = NULL;
+ }
+
+ sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
+
+ ta_ctx->dcb_input_data.app_in_address = src_ptr;
+ ta_ctx->dcb_input_data.data_in_size =
+ req->nbytes - (head_len + tail_len);
+ ta_ctx->dcb_input_data.app_out_address = NULL;
+ ta_ctx->dcb_input_data.block_size = block_size;
+ ta_ctx->dcb_input_data.tail_block_size = 0;
+ ta_ctx->dcb_input_data.is_applet = 0;
+ ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
+ ta_ctx->dcb_input_data.dst_sg = NULL;
+
+ int_error = sep_create_dcb_dmatables_context_kernel(
+ ta_ctx->sep_used,
+ &ta_ctx->dcb_region,
+ &ta_ctx->dmatables_region,
+ &ta_ctx->dma_ctx,
+ &ta_ctx->dcb_input_data,
+ 1);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash update dma table create failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* Construct message to SEP */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
+
+ msg[0] = (u32)0;
+ msg[1] = (u32)0;
+ msg[2] = (u32)0;
+
+ sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
+ &msg_offset, 0);
+
+ /* Handle remainders */
+
+ /* Head */
+ sep_write_msg(ta_ctx, &head_len, sizeof(u32),
+ sizeof(u32), &msg_offset, 0);
+
+ if (head_len) {
+ copy_result = sg_copy_to_buffer(
+ req->src,
+ sep_sg_nents(ta_ctx->src_sg),
+ small_buf, head_len);
+
+ if (copy_result != head_len) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sg head copy failure in hash block\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ }
+
+ sep_write_msg(ta_ctx, small_buf, head_len,
+ sizeof(u32) * 32, &msg_offset, 1);
+ } else {
+ msg_offset += sizeof(u32) * 32;
+ }
+
+ /* Tail */
+ sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
+ sizeof(u32), &msg_offset, 0);
+
+ if (tail_len) {
+ copy_result = sep_copy_offset_sg(
+ ta_ctx->sep_used,
+ ta_ctx->src_sg,
+ req->nbytes - tail_len,
+ small_buf, tail_len);
+
+ if (copy_result != tail_len) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sg tail copy failure in hash block\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ }
+
+ sep_write_msg(ta_ctx, small_buf, tail_len,
+ sizeof(u32) * 32, &msg_offset, 1);
+ } else {
+ msg_offset += sizeof(u32) * 32;
+ }
+
+ /* Context */
+ sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ sep_end_msg(ta_ctx, msg_offset);
+ are_we_done_yet = 0;
+ int_error = sep_crypto_take_sep(ta_ctx);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_update take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash update never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+static void sep_hash_final(void *data)
+{
+ u32 msg_offset;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ int result;
+ unsigned long end_time;
+ int are_we_done_yet;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_final\n");
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ /* opcode and mode */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
+
+ /* Context */
+ sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
+ sizeof(struct sep_hash_private_context));
+
+ sep_end_msg(ta_ctx, msg_offset);
+ are_we_done_yet = 0;
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_final take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash final job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+static void sep_hash_digest(void *data)
+{
+ int int_error;
+ u32 msg_offset;
+ u32 block_size;
+ u32 msg[10];
+ size_t copy_result;
+ int result;
+ int are_we_done_yet;
+ u32 tail_len;
+ static char small_buf[100];
+ struct scatterlist *new_sg;
+ void *src_ptr;
+
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ struct this_task_ctx *ta_ctx;
+ struct sep_system_ctx *sctx;
+ unsigned long end_time;
+
+ req = (struct ahash_request *)data;
+ tfm = crypto_ahash_reqtfm(req);
+ sctx = crypto_ahash_ctx(tfm);
+ ta_ctx = ahash_request_ctx(req);
+ ta_ctx->sep_used = sep_dev;
+
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_digest\n");
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ ta_ctx->are_we_done_yet = &are_we_done_yet;
+
+ /* length for queue status */
+ ta_ctx->nbytes = req->nbytes;
+
+ block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ tail_len = req->nbytes % block_size;
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
+ dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
+
+ /* Make sure all pages are even block */
+ int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
+ req->nbytes,
+ block_size, &new_sg, 1);
+
+ if (int_error < 0) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "oddball pages error in crash update\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ } else if (int_error == 1) {
+ ta_ctx->src_sg = new_sg;
+ ta_ctx->src_sg_hold = new_sg;
+ } else {
+ ta_ctx->src_sg = req->src;
+ ta_ctx->src_sg_hold = NULL;
+ }
+
+ src_ptr = sg_virt(ta_ctx->src_sg);
+
+ if ((!req->nbytes) || (!ta_ctx->src_sg)) {
+ /* null data */
+ src_ptr = NULL;
+ }
+
+ sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
+
+ ta_ctx->dcb_input_data.app_in_address = src_ptr;
+ ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
+ ta_ctx->dcb_input_data.app_out_address = NULL;
+ ta_ctx->dcb_input_data.block_size = block_size;
+ ta_ctx->dcb_input_data.tail_block_size = 0;
+ ta_ctx->dcb_input_data.is_applet = 0;
+ ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
+ ta_ctx->dcb_input_data.dst_sg = NULL;
+
+ int_error = sep_create_dcb_dmatables_context_kernel(
+ ta_ctx->sep_used,
+ &ta_ctx->dcb_region,
+ &ta_ctx->dmatables_region,
+ &ta_ctx->dma_ctx,
+ &ta_ctx->dcb_input_data,
+ 1);
+ if (int_error) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "hash update dma table create failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+ /* Construct message to SEP */
+ sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
+ sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
+ sizeof(u32), sizeof(u32), &msg_offset, 0);
+
+ msg[0] = (u32)0;
+ msg[1] = (u32)0;
+ msg[2] = (u32)0;
+
+ sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
+ &msg_offset, 0);
+
+ /* Tail */
+ sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
+ sizeof(u32), &msg_offset, 0);
+
+ if (tail_len) {
+ copy_result = sep_copy_offset_sg(
+ ta_ctx->sep_used,
+ ta_ctx->src_sg,
+ req->nbytes - tail_len,
+ small_buf, tail_len);
+
+ if (copy_result != tail_len) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sg tail copy failure in hash block\n");
+ sep_crypto_release(sctx, ta_ctx, -ENOMEM);
+ return;
+ }
+
+ sep_write_msg(ta_ctx, small_buf, tail_len,
+ sizeof(u32) * 32, &msg_offset, 1);
+ } else {
+ msg_offset += sizeof(u32) * 32;
+ }
+
+ sep_end_msg(ta_ctx, msg_offset);
+
+ are_we_done_yet = 0;
+ result = sep_crypto_take_sep(ta_ctx);
+ if (result) {
+ dev_warn(&ta_ctx->sep_used->pdev->dev,
+ "sep_hash_digest take sep failed\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ }
+
+ /* now we sit and wait up to a fixed time for completion */
+ end_time = jiffies + (WAIT_TIME * HZ);
+ while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
+ schedule();
+
+ /* Done waiting; still not done yet? */
+ if (are_we_done_yet == 0) {
+ dev_dbg(&ta_ctx->sep_used->pdev->dev,
+ "hash digest job never got done\n");
+ sep_crypto_release(sctx, ta_ctx, -EINVAL);
+ return;
+ }
+
+}
+
+/**
+ * This is what is called by each of the API's provided
+ * in the kernel crypto descriptors. It is run in a process
+ * context using the kernel workqueues. Therefore it can
+ * be put to sleep.
+ */
+static void sep_dequeuer(void *data)
+{
+ struct crypto_queue *this_queue;
+ struct crypto_async_request *async_req;
+ struct crypto_async_request *backlog;
+ struct ablkcipher_request *cypher_req;
+ struct ahash_request *hash_req;
+ struct sep_system_ctx *sctx;
+ struct crypto_ahash *hash_tfm;
+ struct this_task_ctx *ta_ctx;
+
+
+ this_queue = (struct crypto_queue *)data;
+
+ spin_lock_irq(&queue_lock);
+ backlog = crypto_get_backlog(this_queue);
+ async_req = crypto_dequeue_request(this_queue);
+ spin_unlock_irq(&queue_lock);
+
+ if (!async_req) {
+ pr_debug("sep crypto queue is empty\n");
+ return;
+ }
+
+ if (backlog) {
+ pr_debug("sep crypto backlog set\n");
+ if (backlog->complete)
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (!async_req->tfm) {
+ pr_debug("sep crypto queue null tfm\n");
+ return;
+ }
+
+ if (!async_req->tfm->__crt_alg) {
+ pr_debug("sep crypto queue null __crt_alg\n");
+ return;
+ }
+
+ if (!async_req->tfm->__crt_alg->cra_type) {
+ pr_debug("sep crypto queue null cra_type\n");
+ return;
+ }
+
+ /* we have stuff in the queue */
+ if (async_req->tfm->__crt_alg->cra_type !=
+ &crypto_ahash_type) {
+ /* This is for a cypher */
+ pr_debug("sep crypto queue doing cipher\n");
+ cypher_req = container_of(async_req,
+ struct ablkcipher_request,
+ base);
+ if (!cypher_req) {
+ pr_debug("sep crypto queue null cypher_req\n");
+ return;
+ }
+
+ sep_crypto_block((void *)cypher_req);
+ return;
+ } else {
+ /* This is a hash */
+ pr_debug("sep crypto queue doing hash\n");
+ /**
+ * This is a bit more complex than cipher; we
+ * need to figure out what type of operation
+ */
+ hash_req = ahash_request_cast(async_req);
+ if (!hash_req) {
+ pr_debug("sep crypto queue null hash_req\n");
+ return;
+ }
+
+ hash_tfm = crypto_ahash_reqtfm(hash_req);
+ if (!hash_tfm) {
+ pr_debug("sep crypto queue null hash_tfm\n");
+ return;
+ }
+
+
+ sctx = crypto_ahash_ctx(hash_tfm);
+ if (!sctx) {
+ pr_debug("sep crypto queue null sctx\n");
+ return;
+ }
+
+ ta_ctx = ahash_request_ctx(hash_req);
+
+ if (ta_ctx->current_hash_stage == HASH_INIT) {
+ pr_debug("sep crypto queue hash init\n");
+ sep_hash_init((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
+ pr_debug("sep crypto queue hash update\n");
+ sep_hash_update((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
+ pr_debug("sep crypto queue hash final\n");
+ sep_hash_final((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
+ pr_debug("sep crypto queue hash digest\n");
+ sep_hash_digest((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
+ pr_debug("sep crypto queue hash digest\n");
+ sep_hash_update((void *)hash_req);
+ return;
+ } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
+ pr_debug("sep crypto queue hash digest\n");
+ sep_hash_final((void *)hash_req);
+ return;
+ } else {
+ pr_debug("sep crypto queue hash oops nothing\n");
+ return;
+ }
+ }
+}
+
+static int sep_sha1_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha1 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha1 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha1 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha1 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha1_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha1 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA1;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA1;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing md5 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing md5 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing md5 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing md5 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_md5_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing md5 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = MD5;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_MD5;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha224 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha224 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha224 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha224 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha224_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha224 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA224;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA224;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_init(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha256 init\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_INIT;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_update(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha256 update\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_UPDATE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_final(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+ pr_debug("sep - doing sha256 final\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_FINISH;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_digest(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha256 digest\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_DIGEST;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_sha256_finup(struct ahash_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
+
+ pr_debug("sep - doing sha256 finup\n");
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = SHA256;
+ ta_ctx->current_hash_req = req;
+ ta_ctx->current_cypher_req = NULL;
+ ta_ctx->hash_opmode = SEP_HASH_SHA256;
+ ta_ctx->current_hash_stage = HASH_FINUP_DATA;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_crypto_init(struct crypto_tfm *tfm)
+{
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ if (alg_name == NULL)
+ pr_debug("sep_crypto_init alg is NULL\n");
+ else
+ pr_debug("sep_crypto_init alg is %s\n", alg_name);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
+ return 0;
+}
+
+static void sep_crypto_exit(struct crypto_tfm *tfm)
+{
+ pr_debug("sep_crypto_exit\n");
+}
+
+static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
+
+ pr_debug("sep aes setkey\n");
+
+ pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
+ switch (keylen) {
+ case SEP_AES_KEY_128_SIZE:
+ sctx->aes_key_size = AES_128;
+ break;
+ case SEP_AES_KEY_192_SIZE:
+ sctx->aes_key_size = AES_192;
+ break;
+ case SEP_AES_KEY_256_SIZE:
+ sctx->aes_key_size = AES_256;
+ break;
+ case SEP_AES_KEY_512_SIZE:
+ sctx->aes_key_size = AES_512;
+ break;
+ default:
+ pr_debug("invalid sep aes key size %x\n",
+ keylen);
+ return -EINVAL;
+ }
+
+ memset(&sctx->key.aes, 0, sizeof(u32) *
+ SEP_AES_MAX_KEY_SIZE_WORDS);
+ memcpy(&sctx->key.aes, key, keylen);
+ sctx->keylen = keylen;
+ /* Indicate to encrypt/decrypt function to send key to SEP */
+ sctx->key_sent = 0;
+
+ return 0;
+}
+
+static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing aes ecb encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
+ ta_ctx->aes_opmode = SEP_AES_ECB;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing aes ecb decrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_DECRYPT;
+ ta_ctx->aes_opmode = SEP_AES_ECB;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+
+ pr_debug("sep - doing aes cbc encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
+ crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
+ ta_ctx->aes_opmode = SEP_AES_CBC;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+
+ pr_debug("sep - doing aes cbc decrypt\n");
+
+ pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
+ crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = AES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->aes_encmode = SEP_AES_DECRYPT;
+ ta_ctx->aes_opmode = SEP_AES_CBC;
+ ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
+ u32 *flags = &ctfm->crt_flags;
+
+ pr_debug("sep des setkey\n");
+
+ switch (keylen) {
+ case DES_KEY_SIZE:
+ sctx->des_nbr_keys = DES_KEY_1;
+ break;
+ case DES_KEY_SIZE * 2:
+ sctx->des_nbr_keys = DES_KEY_2;
+ break;
+ case DES_KEY_SIZE * 3:
+ sctx->des_nbr_keys = DES_KEY_3;
+ break;
+ default:
+ pr_debug("invalid key size %x\n",
+ keylen);
+ return -EINVAL;
+ }
+
+ if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
+ (sep_weak_key(key, keylen))) {
+
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug("weak key\n");
+ return -EINVAL;
+ }
+
+ memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
+ memcpy(&sctx->key.des.key1, key, keylen);
+ sctx->keylen = keylen;
+ /* Indicate to encrypt/decrypt function to send key to SEP */
+ sctx->key_sent = 0;
+
+ return 0;
+}
+
+static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des ecb encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_ENCRYPT;
+ ta_ctx->des_opmode = SEP_DES_ECB;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des ecb decrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_ECB;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_DECRYPT;
+ ta_ctx->des_opmode = SEP_DES_ECB;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des cbc encrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_ENCRYPT;
+ ta_ctx->des_opmode = SEP_DES_CBC;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ int error;
+ int error1;
+ struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
+
+ pr_debug("sep - doing des ecb decrypt\n");
+
+ /* Clear out task context */
+ memset(ta_ctx, 0, sizeof(struct this_task_ctx));
+
+ ta_ctx->sep_used = sep_dev;
+ ta_ctx->current_request = DES_CBC;
+ ta_ctx->current_hash_req = NULL;
+ ta_ctx->current_cypher_req = req;
+ ta_ctx->des_encmode = SEP_DES_DECRYPT;
+ ta_ctx->des_opmode = SEP_DES_CBC;
+ ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
+ ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
+
+ /* lock necessary so that only one entity touches the queues */
+ spin_lock_irq(&queue_lock);
+ error = crypto_enqueue_request(&sep_queue, &req->base);
+
+ if ((error != 0) && (error != -EINPROGRESS))
+ pr_debug(" sep - crypto enqueue failed: %x\n",
+ error);
+ error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
+ sep_dequeuer, (void *)&sep_queue);
+ if (error1)
+ pr_debug(" sep - workqueue submit failed: %x\n",
+ error1);
+ spin_unlock_irq(&queue_lock);
+ /* We return result of crypto enqueue */
+ return error;
+}
+
+static struct ahash_alg hash_algs[] = {
+{
+ .init = sep_sha1_init,
+ .update = sep_sha1_update,
+ .final = sep_sha1_final,
+ .digest = sep_sha1_digest,
+ .finup = sep_sha1_finup,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+},
+{
+ .init = sep_md5_init,
+ .update = sep_md5_update,
+ .final = sep_md5_final,
+ .digest = sep_md5_digest,
+ .finup = sep_md5_finup,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+},
+{
+ .init = sep_sha224_init,
+ .update = sep_sha224_update,
+ .final = sep_sha224_final,
+ .digest = sep_sha224_digest,
+ .finup = sep_sha224_finup,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+},
+{
+ .init = sep_sha256_init,
+ .update = sep_sha256_update,
+ .final = sep_sha256_final,
+ .digest = sep_sha256_digest,
+ .finup = sep_sha256_finup,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_hash_cra_init,
+ .cra_exit = sep_hash_cra_exit,
+ }
+ }
+}
+};
+
+static struct crypto_alg crypto_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sep_aes_setkey,
+ .encrypt = sep_aes_ecb_encrypt,
+ .decrypt = sep_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sep_aes_setkey,
+ .encrypt = sep_aes_cbc_encrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .decrypt = sep_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ebc(des)",
+ .cra_driver_name = "ebc-des-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_ebc_encrypt,
+ .decrypt = sep_des_ebc_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_cbc_encrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .decrypt = sep_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ebc(des3-ede)",
+ .cra_driver_name = "ebc-des3-ede-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_ebc_encrypt,
+ .decrypt = sep_des_ebc_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3-ede)",
+ .cra_driver_name = "cbc-des3--ede-sep",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sep_system_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sep_crypto_init,
+ .cra_exit = sep_crypto_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sep_des_setkey,
+ .encrypt = sep_des_cbc_encrypt,
+ .decrypt = sep_des_cbc_decrypt,
+ }
+}
+};
+
+int sep_crypto_setup(void)
+{
+ int err, i, j, k;
+ tasklet_init(&sep_dev->finish_tasklet, sep_finish,
+ (unsigned long)sep_dev);
+
+ crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
+
+ sep_dev->workqueue = create_singlethread_workqueue(
+ "sep_crypto_workqueue");
+ if (!sep_dev->workqueue) {
+ dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ j = 0;
+
+ spin_lock_init(&queue_lock);
+
+ err = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
+ err = crypto_register_ahash(&hash_algs[i]);
+ if (err)
+ goto err_algs;
+ }
+
+ err = 0;
+ for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
+ err = crypto_register_alg(&crypto_algs[j]);
+ if (err)
+ goto err_crypto_algs;
+ }
+
+ return err;
+
+err_algs:
+ for (k = 0; k < i; k++)
+ crypto_unregister_ahash(&hash_algs[k]);
+ return err;
+
+err_crypto_algs:
+ for (k = 0; k < j; k++)
+ crypto_unregister_alg(&crypto_algs[k]);
+ goto err_algs;
+}
+
+void sep_crypto_takedown(void)
+{
+
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
+ crypto_unregister_ahash(&hash_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
+ crypto_unregister_alg(&crypto_algs[i]);
+
+ tasklet_kill(&sep_dev->finish_tasklet);
+}
diff --git a/drivers/staging/sep/sep_crypto.h b/drivers/staging/sep/sep_crypto.h
new file mode 100644
index 000000000000..155c3c9b87c2
--- /dev/null
+++ b/drivers/staging/sep/sep_crypto.h
@@ -0,0 +1,359 @@
+/*
+ *
+ * sep_crypto.h - Crypto interface structures
+ *
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2011.02.22 Enable Kernel Crypto
+ *
+ */
+
+/* Constants for SEP (from vendor) */
+#define SEP_START_MSG_TOKEN 0x02558808
+
+#define SEP_DES_IV_SIZE_WORDS 2
+#define SEP_DES_IV_SIZE_BYTES (SEP_DES_IV_SIZE_WORDS * \
+ sizeof(u32))
+#define SEP_DES_KEY_SIZE_WORDS 2
+#define SEP_DES_KEY_SIZE_BYTES (SEP_DES_KEY_SIZE_WORDS * \
+ sizeof(u32))
+#define SEP_DES_BLOCK_SIZE 8
+#define SEP_DES_DUMMY_SIZE 16
+
+#define SEP_DES_INIT_OPCODE 0x10
+#define SEP_DES_BLOCK_OPCODE 0x11
+
+#define SEP_AES_BLOCK_SIZE_WORDS 4
+#define SEP_AES_BLOCK_SIZE_BYTES \
+ (SEP_AES_BLOCK_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_DUMMY_BLOCK_SIZE 16
+#define SEP_AES_IV_SIZE_WORDS SEP_AES_BLOCK_SIZE_WORDS
+#define SEP_AES_IV_SIZE_BYTES \
+ (SEP_AES_IV_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_KEY_128_SIZE 16
+#define SEP_AES_KEY_192_SIZE 24
+#define SEP_AES_KEY_256_SIZE 32
+#define SEP_AES_KEY_512_SIZE 64
+#define SEP_AES_MAX_KEY_SIZE_WORDS 16
+#define SEP_AES_MAX_KEY_SIZE_BYTES \
+ (SEP_AES_MAX_KEY_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_WRAP_MIN_SIZE 8
+#define SEP_AES_WRAP_MAX_SIZE 0x10000000
+
+#define SEP_AES_WRAP_BLOCK_SIZE_WORDS 2
+#define SEP_AES_WRAP_BLOCK_SIZE_BYTES \
+ (SEP_AES_WRAP_BLOCK_SIZE_WORDS * sizeof(u32))
+
+#define SEP_AES_SECRET_RKEK1 0x1
+#define SEP_AES_SECRET_RKEK2 0x2
+
+#define SEP_AES_INIT_OPCODE 0x2
+#define SEP_AES_BLOCK_OPCODE 0x3
+#define SEP_AES_FINISH_OPCODE 0x4
+#define SEP_AES_WRAP_OPCODE 0x6
+#define SEP_AES_UNWRAP_OPCODE 0x7
+#define SEP_AES_XTS_FINISH_OPCODE 0x8
+
+#define SEP_HASH_RESULT_SIZE_WORDS 16
+#define SEP_MD5_DIGEST_SIZE_WORDS 4
+#define SEP_MD5_DIGEST_SIZE_BYTES \
+ (SEP_MD5_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA1_DIGEST_SIZE_WORDS 5
+#define SEP_SHA1_DIGEST_SIZE_BYTES \
+ (SEP_SHA1_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA224_DIGEST_SIZE_WORDS 7
+#define SEP_SHA224_DIGEST_SIZE_BYTES \
+ (SEP_SHA224_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA256_DIGEST_SIZE_WORDS 8
+#define SEP_SHA256_DIGEST_SIZE_BYTES \
+ (SEP_SHA256_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA384_DIGEST_SIZE_WORDS 12
+#define SEP_SHA384_DIGEST_SIZE_BYTES \
+ (SEP_SHA384_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA512_DIGEST_SIZE_WORDS 16
+#define SEP_SHA512_DIGEST_SIZE_BYTES \
+ (SEP_SHA512_DIGEST_SIZE_WORDS * sizeof(u32))
+#define SEP_HASH_BLOCK_SIZE_WORDS 16
+#define SEP_HASH_BLOCK_SIZE_BYTES \
+ (SEP_HASH_BLOCK_SIZE_WORDS * sizeof(u32))
+#define SEP_SHA2_BLOCK_SIZE_WORDS 32
+#define SEP_SHA2_BLOCK_SIZE_BYTES \
+ (SEP_SHA2_BLOCK_SIZE_WORDS * sizeof(u32))
+
+#define SEP_HASH_INIT_OPCODE 0x20
+#define SEP_HASH_UPDATE_OPCODE 0x21
+#define SEP_HASH_FINISH_OPCODE 0x22
+#define SEP_HASH_SINGLE_OPCODE 0x23
+
+#define SEP_HOST_ERROR 0x0b000000
+#define SEP_OK 0x0
+#define SEP_INVALID_START (SEP_HOST_ERROR + 0x3)
+#define SEP_WRONG_OPCODE (SEP_HOST_ERROR + 0x1)
+
+#define SEP_TRANSACTION_WAIT_TIME 5
+
+#define SEP_QUEUE_LENGTH 2
+/* Macros */
+#ifndef __LITTLE_ENDIAN
+#define CHG_ENDIAN(val) \
+ (((val) >> 24) | \
+ (((val) & 0x00FF0000) >> 8) | \
+ (((val) & 0x0000FF00) << 8) | \
+ (((val) & 0x000000FF) << 24))
+#else
+#define CHG_ENDIAN(val) val
+#endif
+/* Enums for SEP (from vendor) */
+enum des_numkey {
+ DES_KEY_1 = 1,
+ DES_KEY_2 = 2,
+ DES_KEY_3 = 3,
+ SEP_NUMKEY_OPTIONS,
+ SEP_NUMKEY_LAST = 0x7fffffff,
+};
+
+enum des_enc_mode {
+ SEP_DES_ENCRYPT = 0,
+ SEP_DES_DECRYPT = 1,
+ SEP_DES_ENC_OPTIONS,
+ SEP_DES_ENC_LAST = 0x7fffffff,
+};
+
+enum des_op_mode {
+ SEP_DES_ECB = 0,
+ SEP_DES_CBC = 1,
+ SEP_OP_OPTIONS,
+ SEP_OP_LAST = 0x7fffffff,
+};
+
+enum aes_keysize {
+ AES_128 = 0,
+ AES_192 = 1,
+ AES_256 = 2,
+ AES_512 = 3,
+ AES_SIZE_OPTIONS,
+ AEA_SIZE_LAST = 0x7FFFFFFF,
+};
+
+enum aes_enc_mode {
+ SEP_AES_ENCRYPT = 0,
+ SEP_AES_DECRYPT = 1,
+ SEP_AES_ENC_OPTIONS,
+ SEP_AES_ENC_LAST = 0x7FFFFFFF,
+};
+
+enum aes_op_mode {
+ SEP_AES_ECB = 0,
+ SEP_AES_CBC = 1,
+ SEP_AES_MAC = 2,
+ SEP_AES_CTR = 3,
+ SEP_AES_XCBC = 4,
+ SEP_AES_CMAC = 5,
+ SEP_AES_XTS = 6,
+ SEP_AES_OP_OPTIONS,
+ SEP_AES_OP_LAST = 0x7FFFFFFF,
+};
+
+enum hash_op_mode {
+ SEP_HASH_SHA1 = 0,
+ SEP_HASH_SHA224 = 1,
+ SEP_HASH_SHA256 = 2,
+ SEP_HASH_SHA384 = 3,
+ SEP_HASH_SHA512 = 4,
+ SEP_HASH_MD5 = 5,
+ SEP_HASH_OPTIONS,
+ SEP_HASH_LAST_MODE = 0x7FFFFFFF,
+};
+
+/* Structures for SEP (from vendor) */
+struct sep_des_internal_key {
+ u32 key1[SEP_DES_KEY_SIZE_WORDS];
+ u32 key2[SEP_DES_KEY_SIZE_WORDS];
+ u32 key3[SEP_DES_KEY_SIZE_WORDS];
+};
+
+struct sep_des_internal_context {
+ u32 iv_context[SEP_DES_IV_SIZE_WORDS];
+ struct sep_des_internal_key context_key;
+ enum des_numkey nbr_keys;
+ enum des_enc_mode encryption;
+ enum des_op_mode operation;
+ u8 dummy_block[SEP_DES_DUMMY_SIZE];
+};
+
+struct sep_des_private_context {
+ u32 valid_tag;
+ u32 iv;
+ u8 ctx_buf[sizeof(struct sep_des_internal_context)];
+};
+
+/* This is the structure passed to SEP via msg area */
+struct sep_des_key {
+ u32 key1[SEP_DES_KEY_SIZE_WORDS];
+ u32 key2[SEP_DES_KEY_SIZE_WORDS];
+ u32 key3[SEP_DES_KEY_SIZE_WORDS];
+ u32 pad[SEP_DES_KEY_SIZE_WORDS];
+};
+
+struct sep_aes_internal_context {
+ u32 aes_ctx_iv[SEP_AES_IV_SIZE_WORDS];
+ u32 aes_ctx_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2];
+ enum aes_keysize keysize;
+ enum aes_enc_mode encmode;
+ enum aes_op_mode opmode;
+ u8 secret_key;
+ u32 no_add_blocks;
+ u32 last_block_size;
+ u32 last_block[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 prev_iv[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 remaining_size;
+ union {
+ struct {
+ u32 dkey1[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 dkey2[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 dkey3[SEP_AES_BLOCK_SIZE_WORDS];
+ } cmac_data;
+ struct {
+ u32 xts_key[SEP_AES_MAX_KEY_SIZE_WORDS / 2];
+ u32 temp1[SEP_AES_BLOCK_SIZE_WORDS];
+ u32 temp2[SEP_AES_BLOCK_SIZE_WORDS];
+ } xtx_data;
+ } s_data;
+ u8 dummy_block[SEP_AES_DUMMY_BLOCK_SIZE];
+};
+
+struct sep_aes_private_context {
+ u32 valid_tag;
+ u32 aes_iv;
+ u32 op_mode;
+ u8 cbuff[sizeof(struct sep_aes_internal_context)];
+};
+
+struct sep_hash_internal_context {
+ u32 hash_result[SEP_HASH_RESULT_SIZE_WORDS];
+ enum hash_op_mode hash_opmode;
+ u32 previous_data[SEP_SHA2_BLOCK_SIZE_WORDS];
+ u16 prev_update_bytes;
+ u32 total_proc_128bit[4];
+ u16 op_mode_block_size;
+ u8 dummy_aes_block[SEP_AES_DUMMY_BLOCK_SIZE];
+};
+
+struct sep_hash_private_context {
+ u32 valid_tag;
+ u32 iv;
+ u8 internal_context[sizeof(struct sep_hash_internal_context)];
+};
+
+union key_t {
+ struct sep_des_key des;
+ u32 aes[SEP_AES_MAX_KEY_SIZE_WORDS];
+};
+
+/* Context structures for crypto API */
+/**
+ * Structure for this current task context
+ * This same structure is used for both hash
+ * and crypt in order to reduce duplicate code
+ * for stuff that is done for both hash operations
+ * and crypto operations. We cannot trust that the
+ * system context is not pulled out from under
+ * us during operation to operation, so all
+ * critical stuff such as data pointers must
+ * be in in a context that is exclusive for this
+ * particular task at hand.
+ */
+struct this_task_ctx {
+ struct sep_device *sep_used;
+ u32 done;
+ unsigned char iv[100];
+ enum des_enc_mode des_encmode;
+ enum des_op_mode des_opmode;
+ enum aes_enc_mode aes_encmode;
+ enum aes_op_mode aes_opmode;
+ u32 init_opcode;
+ u32 block_opcode;
+ size_t data_length;
+ size_t ivlen;
+ struct ablkcipher_walk walk;
+ int i_own_sep; /* Do I have custody of the sep? */
+ struct sep_call_status call_status;
+ struct build_dcb_struct_kernel dcb_input_data;
+ struct sep_dma_context *dma_ctx;
+ void *dmatables_region;
+ size_t nbytes;
+ struct sep_dcblock *dcb_region;
+ struct sep_queue_info *queue_elem;
+ int msg_len_words;
+ unsigned char msg[SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES];
+ void *msgptr;
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+ struct scatterlist *src_sg_hold;
+ struct scatterlist *dst_sg_hold;
+ struct ahash_request *current_hash_req;
+ struct ablkcipher_request *current_cypher_req;
+ enum type_of_request current_request;
+ int digest_size_words;
+ int digest_size_bytes;
+ int block_size_words;
+ int block_size_bytes;
+ enum hash_op_mode hash_opmode;
+ enum hash_stage current_hash_stage;
+ /**
+ * Not that this is a pointer. The are_we_done_yet variable is
+ * allocated by the task function. This way, even if the kernel
+ * crypto infrastructure has grabbed the task structure out from
+ * under us, the task function can still see this variable.
+ */
+ int *are_we_done_yet;
+ unsigned long end_time;
+ };
+
+struct sep_system_ctx {
+ union key_t key;
+ size_t keylen;
+ int key_sent;
+ enum des_numkey des_nbr_keys;
+ enum aes_keysize aes_key_size;
+ unsigned long end_time;
+ struct sep_des_private_context des_private_ctx;
+ struct sep_aes_private_context aes_private_ctx;
+ struct sep_hash_private_context hash_private_ctx;
+ };
+
+/* work queue structures */
+struct sep_work_struct {
+ struct work_struct work;
+ void (*callback)(void *);
+ void *data;
+ };
+
+/* Functions */
+int sep_crypto_setup(void);
+void sep_crypto_takedown(void);
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
index 696ab0dd2b79..5f6a07f59dd7 100644
--- a/drivers/staging/sep/sep_dev.h
+++ b/drivers/staging/sep/sep_dev.h
@@ -5,8 +5,8 @@
*
* sep_dev.h - Security Processor Device Structures
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -28,6 +28,7 @@
*
* CHANGES
* 2010.09.14 upgrade to Medfield
+ * 2011.02.22 enable kernel crypto
*/
struct sep_device {
@@ -36,33 +37,21 @@ struct sep_device {
/* character device file */
struct cdev sep_cdev;
- struct cdev sep_daemon_cdev;
- struct cdev sep_singleton_cdev;
/* devices (using misc dev) */
struct miscdevice miscdev_sep;
- struct miscdevice miscdev_singleton;
- struct miscdevice miscdev_daemon;
/* major / minor numbers of device */
dev_t sep_devno;
- dev_t sep_daemon_devno;
- dev_t sep_singleton_devno;
-
- struct mutex sep_mutex;
- struct mutex ioctl_mutex;
+ /* guards command sent counter */
spinlock_t snd_rply_lck;
+ /* guards driver memory usage in fastcall if */
+ struct semaphore sep_doublebuf;
/* flags to indicate use and lock status of sep */
u32 pid_doing_transaction;
unsigned long in_use_flags;
- /* request daemon alread open */
- unsigned long request_daemon_open;
-
- /* 1 = Moorestown; 0 = Medfield */
- int mrst;
-
/* address of the shared memory allocated during init for SEP driver
(coherent alloc) */
dma_addr_t shared_bus;
@@ -74,36 +63,77 @@ struct sep_device {
dma_addr_t reg_physical_end;
void __iomem *reg_addr;
- /* wait queue head (event) of the driver */
- wait_queue_head_t event;
- wait_queue_head_t event_request_daemon;
- wait_queue_head_t event_mmap;
+ /* wait queue heads of the driver */
+ wait_queue_head_t event_interrupt;
+ wait_queue_head_t event_transactions;
- struct sep_caller_id_entry
- caller_id_table[SEP_CALLER_ID_TABLE_NUM_ENTRIES];
+ struct list_head sep_queue_status;
+ u32 sep_queue_num;
+ spinlock_t sep_queue_lock;
- /* access flag for singleton device */
- unsigned long singleton_access_flag;
+ /* Is this in use? */
+ u32 in_use;
+
+ /* indicates whether power save is set up */
+ u32 power_save_setup;
+
+ /* Power state */
+ u32 power_state;
/* transaction counter that coordinates the
transactions between SEP and HOST */
unsigned long send_ct;
/* counter for the messages from sep */
unsigned long reply_ct;
- /* counter for the number of bytes allocated in the pool for the
- current transaction */
- long data_pool_bytes_allocated;
- u32 num_of_data_allocations;
+ /* The following are used for kernel crypto client requests */
+ u32 in_kernel; /* Set for kernel client request */
+ struct tasklet_struct finish_tasklet;
+ enum type_of_request current_request;
+ enum hash_stage current_hash_stage;
+ struct ahash_request *current_hash_req;
+ struct ablkcipher_request *current_cypher_req;
+ struct this_task_ctx *ta_ctx;
+ struct workqueue_struct *workqueue;
+};
- /* number of the lli tables created in the current transaction */
- u32 num_lli_tables_created;
+extern struct sep_device *sep_dev;
- /* number of data control blocks */
- u32 nr_dcb_creat;
+/**
+ * SEP message header for a transaction
+ * @reserved: reserved memory (two words)
+ * @token: SEP message token
+ * @msg_len: message length
+ * @opcpde: message opcode
+ */
+struct sep_msgarea_hdr {
+ u32 reserved[2];
+ u32 token;
+ u32 msg_len;
+ u32 opcode;
+};
- struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS];
+/**
+ * sep_queue_data - data to be maintained in status queue for a transaction
+ * @opcode : transaction opcode
+ * @size : message size
+ * @pid: owner process
+ * @name: owner process name
+ */
+struct sep_queue_data {
+ u32 opcode;
+ u32 size;
+ s32 pid;
+ u8 name[TASK_COMM_LEN];
+};
+/** sep_queue_info - maintains status info of all transactions
+ * @list: head of list
+ * @sep_queue_data : data for transaction
+ */
+struct sep_queue_info {
+ struct list_head list;
+ struct sep_queue_data data;
};
static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index 6b3d156d4140..000000000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2932 +0,0 @@
-/*
- *
- * sep_driver.c - Security Processor Driver main group of functions
- *
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- * Jayant Mangalampalli jayant.mangalampalli@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- * 2010.09.14 Upgrade to Medfield
- *
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/ioctl.h>
-#include <asm/current.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/rar_register.h>
-
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-/*----------------------------------------
- DEFINES
------------------------------------------*/
-
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
- GLOBAL variables
---------------------------------------------*/
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device *sep_dev;
-
-/**
- * sep_dump_message - dump the message that is pending
- * @sep: SEP device
- */
-static void sep_dump_message(struct sep_device *sep)
-{
- int count;
- u32 *p = sep->shared_addr;
- for (count = 0; count < 12 * 4; count += 4)
- dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
- count, *p++);
-}
-
-/**
- * sep_map_and_alloc_shared_area - allocate shared block
- * @sep: security processor
- * @size: size of shared area
- */
-static int sep_map_and_alloc_shared_area(struct sep_device *sep)
-{
- sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
- sep->shared_size,
- &sep->shared_bus, GFP_KERNEL);
-
- if (!sep->shared_addr) {
- dev_warn(&sep->pdev->dev,
- "shared memory dma_alloc_coherent failed\n");
- return -ENOMEM;
- }
- dev_dbg(&sep->pdev->dev,
- "shared_addr %zx bytes @%p (bus %llx)\n",
- sep->shared_size, sep->shared_addr,
- (unsigned long long)sep->shared_bus);
- return 0;
-}
-
-/**
- * sep_unmap_and_free_shared_area - free shared block
- * @sep: security processor
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep)
-{
- dma_free_coherent(&sep->pdev->dev, sep->shared_size,
- sep->shared_addr, sep->shared_bus);
-}
-
-/**
- * sep_shared_bus_to_virt - convert bus/virt addresses
- * @sep: pointer to struct sep_device
- * @bus_address: address to convert
- *
- * Returns virtual address inside the shared area according
- * to the bus address.
- */
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-/**
- * open function for the singleton driver
- * @inode_ptr struct inode *
- * @file_ptr struct file *
- *
- * Called when the user opens the singleton device interface
- */
-static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
-{
- struct sep_device *sep;
-
- /*
- * Get the SEP device structure and use it for the
- * private_data field in filp for other methods
- */
- sep = sep_dev;
-
- file_ptr->private_data = sep;
-
- if (test_and_set_bit(0, &sep->singleton_access_flag))
- return -EBUSY;
- return 0;
-}
-
-/**
- * sep_open - device open method
- * @inode: inode of SEP device
- * @filp: file handle to SEP device
- *
- * Open method for the SEP device. Called when userspace opens
- * the SEP device node.
- *
- * Returns zero on success otherwise an error code.
- */
-static int sep_open(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep;
-
- /*
- * Get the SEP device structure and use it for the
- * private_data field in filp for other methods
- */
- sep = sep_dev;
- filp->private_data = sep;
-
- /* Anyone can open; locking takes place at transaction level */
- return 0;
-}
-
-/**
- * sep_singleton_release - close a SEP singleton device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device. As the open protects against
- * multiple simultaenous opens that means this method is called when the
- * final reference to the open handle is dropped.
- */
-static int sep_singleton_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-
- clear_bit(0, &sep->singleton_access_flag);
- return 0;
-}
-
-/**
- * sep_request_daemon_open - request daemon open method
- * @inode: inode of SEP device
- * @filp: file handle to SEP device
- *
- * Open method for the SEP request daemon. Called when
- * request daemon in userspace opens the SEP device node.
- *
- * Returns zero on success otherwise an error code.
- */
-static int sep_request_daemon_open(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = sep_dev;
- int error = 0;
-
- filp->private_data = sep;
-
- /* There is supposed to be only one request daemon */
- if (test_and_set_bit(0, &sep->request_daemon_open))
- error = -EBUSY;
- return error;
-}
-
-/**
- * sep_request_daemon_release - close a SEP daemon
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP daemon.
- */
-static int sep_request_daemon_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-
- dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n",
- current->pid);
-
- /* Clear the request_daemon_open flag */
- clear_bit(0, &sep->request_daemon_open);
- return 0;
-}
-
-/**
- * sep_req_daemon_send_reply_command_handler - poke the SEP
- * @sep: struct sep_device *
- *
- * This function raises interrupt to SEPm that signals that is has a
- * new command from HOST
- */
-static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
-{
- unsigned long lck_flags;
-
- sep_dump_message(sep);
-
- /* Counters are lockable region */
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- sep->send_ct++;
- sep->reply_ct++;
-
- /* Send the interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
- sep->send_ct++;
-
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- dev_dbg(&sep->pdev->dev,
- "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
- sep->send_ct, sep->reply_ct);
-
- return 0;
-}
-
-
-/**
- * sep_free_dma_table_data_handler - free DMA table
- * @sep: pointere to struct sep_device
- *
- * Handles the request to free DMA table for synchronic actions
- */
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
- int count;
- int dcb_counter;
- /* Pointer to the current dma_resource struct */
- struct sep_dma_resource *dma;
-
- for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
- dma = &sep->dma_res_arr[dcb_counter];
-
- /* Unmap and free input map array */
- if (dma->in_map_array) {
- for (count = 0; count < dma->in_num_pages; count++) {
- dma_unmap_page(&sep->pdev->dev,
- dma->in_map_array[count].dma_addr,
- dma->in_map_array[count].size,
- DMA_TO_DEVICE);
- }
- kfree(dma->in_map_array);
- }
-
- /* Unmap output map array, DON'T free it yet */
- if (dma->out_map_array) {
- for (count = 0; count < dma->out_num_pages; count++) {
- dma_unmap_page(&sep->pdev->dev,
- dma->out_map_array[count].dma_addr,
- dma->out_map_array[count].size,
- DMA_FROM_DEVICE);
- }
- kfree(dma->out_map_array);
- }
-
- /* Free page cache for output */
- if (dma->in_page_array) {
- for (count = 0; count < dma->in_num_pages; count++) {
- flush_dcache_page(dma->in_page_array[count]);
- page_cache_release(dma->in_page_array[count]);
- }
- kfree(dma->in_page_array);
- }
-
- if (dma->out_page_array) {
- for (count = 0; count < dma->out_num_pages; count++) {
- if (!PageReserved(dma->out_page_array[count]))
- SetPageDirty(dma->out_page_array[count]);
- flush_dcache_page(dma->out_page_array[count]);
- page_cache_release(dma->out_page_array[count]);
- }
- kfree(dma->out_page_array);
- }
-
- /* Reset all the values */
- dma->in_page_array = NULL;
- dma->out_page_array = NULL;
- dma->in_num_pages = 0;
- dma->out_num_pages = 0;
- dma->in_map_array = NULL;
- dma->out_map_array = NULL;
- dma->in_map_num_entries = 0;
- dma->out_map_num_entries = 0;
- }
-
- sep->nr_dcb_creat = 0;
- sep->num_lli_tables_created = 0;
-
- return 0;
-}
-
-/**
- * sep_request_daemon_mmap - maps the shared area to user space
- * @filp: pointer to struct file
- * @vma: pointer to vm_area_struct
- *
- * Called by the kernel when the daemon attempts an mmap() syscall
- * using our handle.
- */
-static int sep_request_daemon_mmap(struct file *filp,
- struct vm_area_struct *vma)
-{
- struct sep_device *sep = filp->private_data;
- dma_addr_t bus_address;
- int error = 0;
-
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- error = -EINVAL;
- goto end_function;
- }
-
- /* Get physical address */
- bus_address = sep->shared_bus;
-
- if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
-
- dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
- error = -EAGAIN;
- goto end_function;
- }
-
-end_function:
- return error;
-}
-
-/**
- * sep_request_daemon_poll - poll implementation
- * @sep: struct sep_device * for current SEP device
- * @filp: struct file * for open file
- * @wait: poll_table * for poll
- *
- * Called when our device is part of a poll() or select() syscall
- */
-static unsigned int sep_request_daemon_poll(struct file *filp,
- poll_table *wait)
-{
- u32 mask = 0;
- /* GPR2 register */
- u32 retval2;
- unsigned long lck_flags;
- struct sep_device *sep = filp->private_data;
-
- poll_wait(filp, &sep->event_request_daemon, wait);
-
- dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
- sep->send_ct, sep->reply_ct);
-
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- /* Check if the data is ready */
- if (sep->send_ct == sep->reply_ct) {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- dev_dbg(&sep->pdev->dev,
- "daemon poll: data check (GPR2) is %x\n", retval2);
-
- /* Check if PRINT request */
- if ((retval2 >> 30) & 0x1) {
- dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
- mask |= POLLIN;
- goto end_function;
- }
- /* Check if NVS request */
- if (retval2 >> 31) {
- dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
- mask |= POLLPRI | POLLWRNORM;
- }
- } else {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
- dev_dbg(&sep->pdev->dev,
- "daemon poll: no reply received; returning 0\n");
- mask = 0;
- }
-end_function:
- return mask;
-}
-
-/**
- * sep_release - close a SEP device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device.
- */
-static int sep_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-
- dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
-
- mutex_lock(&sep->sep_mutex);
- /* Is this the process that has a transaction open?
- * If so, lets reset pid_doing_transaction to 0 and
- * clear the in use flags, and then wake up sep_event
- * so that other processes can do transactions
- */
- if (sep->pid_doing_transaction == current->pid) {
- clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
- clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
- sep_free_dma_table_data_handler(sep);
- wake_up(&sep->event);
- sep->pid_doing_transaction = 0;
- }
-
- mutex_unlock(&sep->sep_mutex);
- return 0;
-}
-
-/**
- * sep_mmap - maps the shared area to user space
- * @filp: pointer to struct file
- * @vma: pointer to vm_area_struct
- *
- * Called on an mmap of our space via the normal SEP device
- */
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- dma_addr_t bus_addr;
- struct sep_device *sep = filp->private_data;
- unsigned long error = 0;
-
- /* Set the transaction busy (own the device) */
- wait_event_interruptible(sep->event,
- test_and_set_bit(SEP_MMAP_LOCK_BIT,
- &sep->in_use_flags) == 0);
-
- if (signal_pending(current)) {
- error = -EINTR;
- goto end_function_with_error;
- }
- /*
- * The pid_doing_transaction indicates that this process
- * now owns the facilities to performa a transaction with
- * the SEP. While this process is performing a transaction,
- * no other process who has the SEP device open can perform
- * any transactions. This method allows more than one process
- * to have the device open at any given time, which provides
- * finer granularity for device utilization by multiple
- * processes.
- */
- mutex_lock(&sep->sep_mutex);
- sep->pid_doing_transaction = current->pid;
- mutex_unlock(&sep->sep_mutex);
-
- /* Zero the pools and the number of data pool alocation pointers */
- sep->data_pool_bytes_allocated = 0;
- sep->num_of_data_allocations = 0;
-
- /*
- * Check that the size of the mapped range is as the size of the message
- * shared area
- */
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- error = -EINVAL;
- goto end_function_with_error;
- }
-
- dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
-
- /* Get bus address */
- bus_addr = sep->shared_bus;
-
- if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
- dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
- error = -EAGAIN;
- goto end_function_with_error;
- }
- goto end_function;
-
-end_function_with_error:
- /* Clear the bit */
- clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
- mutex_lock(&sep->sep_mutex);
- sep->pid_doing_transaction = 0;
- mutex_unlock(&sep->sep_mutex);
-
- /* Raise event for stuck contextes */
-
- wake_up(&sep->event);
-
-end_function:
- return error;
-}
-
-/**
- * sep_poll - poll handler
- * @filp: pointer to struct file
- * @wait: pointer to poll_table
- *
- * Called by the OS when the kernel is asked to do a poll on
- * a SEP file handle.
- */
-static unsigned int sep_poll(struct file *filp, poll_table *wait)
-{
- u32 mask = 0;
- u32 retval = 0;
- u32 retval2 = 0;
- unsigned long lck_flags;
-
- struct sep_device *sep = filp->private_data;
-
- /* Am I the process that owns the transaction? */
- mutex_lock(&sep->sep_mutex);
- if (current->pid != sep->pid_doing_transaction) {
- dev_dbg(&sep->pdev->dev, "poll; wrong pid\n");
- mask = POLLERR;
- mutex_unlock(&sep->sep_mutex);
- goto end_function;
- }
- mutex_unlock(&sep->sep_mutex);
-
- /* Check if send command or send_reply were activated previously */
- if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
- mask = POLLERR;
- goto end_function;
- }
-
- /* Add the event to the polling wait table */
- dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
-
- poll_wait(filp, &sep->event, wait);
-
- dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
- sep->send_ct, sep->reply_ct);
-
- /* Check if error occurred during poll */
- retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- if (retval2 != 0x0) {
- dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
- mask |= POLLERR;
- goto end_function;
- }
-
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
-
- if (sep->send_ct == sep->reply_ct) {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
- retval);
-
- /* Check if printf request */
- if ((retval >> 30) & 0x1) {
- dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
- wake_up(&sep->event_request_daemon);
- goto end_function;
- }
-
- /* Check if the this is SEP reply or request */
- if (retval >> 31) {
- dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
- wake_up(&sep->event_request_daemon);
- } else {
- dev_dbg(&sep->pdev->dev, "poll: normal return\n");
- /* In case it is again by send_reply_comand */
- clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
- sep_dump_message(sep);
- dev_dbg(&sep->pdev->dev,
- "poll; SEP reply POLLIN | POLLRDNORM\n");
- mask |= POLLIN | POLLRDNORM;
- }
- } else {
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
- dev_dbg(&sep->pdev->dev,
- "poll; no reply received; returning mask of 0\n");
- mask = 0;
- }
-
-end_function:
- return mask;
-}
-
-/**
- * sep_time_address - address in SEP memory of time
- * @sep: SEP device we want the address from
- *
- * Return the address of the two dwords in memory used for time
- * setting.
- */
-static u32 *sep_time_address(struct sep_device *sep)
-{
- return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- * sep_set_time - set the SEP time
- * @sep: the SEP we are setting the time for
- *
- * Calculates time and sets it at the predefined address.
- * Called with the SEP mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
- struct timeval time;
- u32 *time_addr; /* Address of time as seen by the kernel */
-
-
- do_gettimeofday(&time);
-
- /* Set value in the SYSTEM MEMORY offset */
- time_addr = sep_time_address(sep);
-
- time_addr[0] = SEP_TIME_VAL_TOKEN;
- time_addr[1] = time.tv_sec;
-
- dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
- dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
- dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
-
- return time.tv_sec;
-}
-
-/**
- * sep_set_caller_id_handler - insert caller id entry
- * @sep: SEP device
- * @arg: pointer to struct caller_id_struct
- *
- * Inserts the data into the caller id table. Note that this function
- * falls under the ioctl lock
- */
-static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
-{
- void __user *hash;
- int error = 0;
- int i;
- struct caller_id_struct command_args;
-
- for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
- if (sep->caller_id_table[i].pid == 0)
- break;
- }
-
- if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
- dev_dbg(&sep->pdev->dev, "no more caller id entries left\n");
- dev_dbg(&sep->pdev->dev, "maximum number is %d\n",
- SEP_CALLER_ID_TABLE_NUM_ENTRIES);
- error = -EUSERS;
- goto end_function;
- }
-
- /* Copy the data */
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(command_args))) {
- error = -EFAULT;
- goto end_function;
- }
-
- hash = (void __user *)(unsigned long)command_args.callerIdAddress;
-
- if (!command_args.pid || !command_args.callerIdSizeInBytes) {
- error = -EINVAL;
- goto end_function;
- }
-
- dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
- dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
- command_args.callerIdSizeInBytes);
-
- if (command_args.callerIdSizeInBytes >
- SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
- error = -EMSGSIZE;
- goto end_function;
- }
-
- sep->caller_id_table[i].pid = command_args.pid;
-
- if (copy_from_user(sep->caller_id_table[i].callerIdHash,
- hash, command_args.callerIdSizeInBytes))
- error = -EFAULT;
-end_function:
- return error;
-}
-
-/**
- * sep_set_current_caller_id - set the caller id
- * @sep: pointer to struct_sep_device
- *
- * Set the caller ID (if it exists) to the SEP. Note that this
- * function falls under the ioctl lock
- */
-static int sep_set_current_caller_id(struct sep_device *sep)
-{
- int i;
- u32 *hash_buf_ptr;
-
- /* Zero the previous value */
- memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
- 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
-
- for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
- if (sep->caller_id_table[i].pid == current->pid) {
- dev_dbg(&sep->pdev->dev, "Caller Id found\n");
-
- memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
- (void *)(sep->caller_id_table[i].callerIdHash),
- SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
- break;
- }
- }
- /* Ensure data is in little endian */
- hash_buf_ptr = (u32 *)sep->shared_addr +
- SEP_CALLER_ID_OFFSET_BYTES;
-
- for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
- hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
-
- return 0;
-}
-
-/**
- * sep_send_command_handler - kick off a command
- * @sep: SEP being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- *
- * Note that this function does fall under the ioctl lock
- */
-static int sep_send_command_handler(struct sep_device *sep)
-{
- unsigned long lck_flags;
- int error = 0;
-
- if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
- error = -EPROTO;
- goto end_function;
- }
- sep_set_time(sep);
-
- sep_set_current_caller_id(sep);
-
- sep_dump_message(sep);
-
- /* Update counter */
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- sep->send_ct++;
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- dev_dbg(&sep->pdev->dev,
- "sep_send_command_handler send_ct %lx reply_ct %lx\n",
- sep->send_ct, sep->reply_ct);
-
- /* Send interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
-end_function:
- return error;
-}
-
-/**
- * sep_allocate_data_pool_memory_handler -allocate pool memory
- * @sep: pointer to struct sep_device
- * @arg: pointer to struct alloc_struct
- *
- * This function handles the allocate data pool memory request
- * This function returns calculates the bus address of the
- * allocated memory, and the offset of this area from the mapped address.
- * Therefore, the FVOs in user space can calculate the exact virtual
- * address of this allocated memory
- */
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = 0;
- struct alloc_struct command_args;
-
- /* Holds the allocated buffer address in the system memory pool */
- u32 *token_addr;
-
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(struct alloc_struct))) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* Allocate memory */
- if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
- SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- dev_dbg(&sep->pdev->dev,
- "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
- dev_dbg(&sep->pdev->dev,
- "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
- /* Set the virtual and bus address */
- command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
- sep->data_pool_bytes_allocated;
-
- /* Place in the shared area that is known by the SEP */
- token_addr = (u32 *)(sep->shared_addr +
- SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
- (sep->num_of_data_allocations)*2*sizeof(u32));
-
- token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
- token_addr[1] = (u32)sep->shared_bus +
- SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
- sep->data_pool_bytes_allocated;
-
- /* Write the memory back to the user space */
- error = copy_to_user((void *)arg, (void *)&command_args,
- sizeof(struct alloc_struct));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* Update the allocation */
- sep->data_pool_bytes_allocated += command_args.num_bytes;
- sep->num_of_data_allocations += 1;
-
-end_function:
- return error;
-}
-
-/**
- * sep_lock_kernel_pages - map kernel pages for DMA
- * @sep: pointer to struct sep_device
- * @kernel_virt_addr: address of data buffer in kernel
- * @data_size: size of data
- * @lli_array_ptr: lli array
- * @in_out_flag: input into device or output from device
- *
- * This function locks all the physical pages of the kernel virtual buffer
- * and construct a basic lli array, where each entry holds the physical
- * page address and the size that application data holds in this page
- * This function is used only during kernel crypto mod calls from within
- * the kernel (when ioctl is not used)
- */
-static int sep_lock_kernel_pages(struct sep_device *sep,
- unsigned long kernel_virt_addr,
- u32 data_size,
- struct sep_lli_entry **lli_array_ptr,
- int in_out_flag)
-
-{
- int error = 0;
- /* Array of lli */
- struct sep_lli_entry *lli_array;
- /* Map array */
- struct sep_dma_map *map_array;
-
- dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n",
- (unsigned long)kernel_virt_addr);
- dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
- if (!lli_array) {
- error = -ENOMEM;
- goto end_function;
- }
- map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
- if (!map_array) {
- error = -ENOMEM;
- goto end_function_with_error;
- }
-
- map_array[0].dma_addr =
- dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
- data_size, DMA_BIDIRECTIONAL);
- map_array[0].size = data_size;
-
-
- /*
- * Set the start address of the first page - app data may start not at
- * the beginning of the page
- */
- lli_array[0].bus_address = (u32)map_array[0].dma_addr;
- lli_array[0].block_size = map_array[0].size;
-
- dev_dbg(&sep->pdev->dev,
- "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
- (unsigned long)lli_array[0].bus_address,
- lli_array[0].block_size);
-
- /* Set the output parameters */
- if (in_out_flag == SEP_DRIVER_IN_FLAG) {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
- } else {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
- sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
- }
- goto end_function;
-
-end_function_with_error:
- kfree(lli_array);
-
-end_function:
- return error;
-}
-
-/**
- * sep_lock_user_pages - lock and map user pages for DMA
- * @sep: pointer to struct sep_device
- * @app_virt_addr: user memory data buffer
- * @data_size: size of data buffer
- * @lli_array_ptr: lli array
- * @in_out_flag: input or output to device
- *
- * This function locks all the physical pages of the application
- * virtual buffer and construct a basic lli array, where each entry
- * holds the physical page address and the size that application
- * data holds in this physical pages
- */
-static int sep_lock_user_pages(struct sep_device *sep,
- u32 app_virt_addr,
- u32 data_size,
- struct sep_lli_entry **lli_array_ptr,
- int in_out_flag)
-
-{
- int error = 0;
- u32 count;
- int result;
- /* The the page of the end address of the user space buffer */
- u32 end_page;
- /* The page of the start address of the user space buffer */
- u32 start_page;
- /* The range in pages */
- u32 num_pages;
- /* Array of pointers to page */
- struct page **page_array;
- /* Array of lli */
- struct sep_lli_entry *lli_array;
- /* Map array */
- struct sep_dma_map *map_array;
- /* Direction of the DMA mapping for locked pages */
- enum dma_data_direction dir;
-
- /* Set start and end pages and num pages */
- end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = app_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr);
- dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
- dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
- dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
- dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
-
- /* Allocate array of pages structure pointers */
- page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
- if (!page_array) {
- error = -ENOMEM;
- goto end_function;
- }
- map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
- if (!map_array) {
- dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
- error = -ENOMEM;
- goto end_function_with_error1;
- }
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
- GFP_ATOMIC);
-
- if (!lli_array) {
- dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
- error = -ENOMEM;
- goto end_function_with_error2;
- }
-
- /* Convert the application virtual address into a set of physical */
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, app_virt_addr,
- num_pages,
- ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
- 0, page_array, NULL);
-
- up_read(&current->mm->mmap_sem);
-
- /* Check the number of pages locked - if not all then exit with error */
- if (result != num_pages) {
- dev_warn(&sep->pdev->dev,
- "not all pages locked by get_user_pages\n");
- error = -ENOMEM;
- goto end_function_with_error3;
- }
-
- dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
-
- /* Set direction */
- if (in_out_flag == SEP_DRIVER_IN_FLAG)
- dir = DMA_TO_DEVICE;
- else
- dir = DMA_FROM_DEVICE;
-
- /*
- * Fill the array using page array data and
- * map the pages - this action will also flush the cache as needed
- */
- for (count = 0; count < num_pages; count++) {
- /* Fill the map array */
- map_array[count].dma_addr =
- dma_map_page(&sep->pdev->dev, page_array[count],
- 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
-
- map_array[count].size = PAGE_SIZE;
-
- /* Fill the lli array entry */
- lli_array[count].bus_address = (u32)map_array[count].dma_addr;
- lli_array[count].block_size = PAGE_SIZE;
-
- dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
- count, (unsigned long)lli_array[count].bus_address,
- count, lli_array[count].block_size);
- }
-
- /* Check the offset for the first page */
- lli_array[0].bus_address =
- lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
-
- /* Check that not all the data is in the first page only */
- if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size =
- PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
- dev_dbg(&sep->pdev->dev,
- "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
- (unsigned long)lli_array[count].bus_address,
- lli_array[count].block_size);
-
- /* Check the size of the last page */
- if (num_pages > 1) {
- lli_array[num_pages - 1].block_size =
- (app_virt_addr + data_size) & (~PAGE_MASK);
- if (lli_array[num_pages - 1].block_size == 0)
- lli_array[num_pages - 1].block_size = PAGE_SIZE;
-
- dev_warn(&sep->pdev->dev,
- "lli_array[%x].bus_address is "
- "%08lx, lli_array[%x].block_size is %x\n",
- num_pages - 1,
- (unsigned long)lli_array[num_pages - 1].bus_address,
- num_pages - 1,
- lli_array[num_pages - 1].block_size);
- }
-
- /* Set output params according to the in_out flag */
- if (in_out_flag == SEP_DRIVER_IN_FLAG) {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
- num_pages;
- } else {
- *lli_array_ptr = lli_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
- sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
- page_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
- sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
- num_pages;
- }
- goto end_function;
-
-end_function_with_error3:
- /* Free lli array */
- kfree(lli_array);
-
-end_function_with_error2:
- kfree(map_array);
-
-end_function_with_error1:
- /* Free page array */
- kfree(page_array);
-
-end_function:
- return error;
-}
-
-/**
- * u32 sep_calculate_lli_table_max_size - size the LLI table
- * @sep: pointer to struct sep_device
- * @lli_in_array_ptr
- * @num_array_entries
- * @last_table_flag
- *
- * This function calculates the size of data that can be inserted into
- * the lli table from this array, such that either the table is full
- * (all entries are entered), or there are no more entries in the
- * lli array
- */
-static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
- struct sep_lli_entry *lli_in_array_ptr,
- u32 num_array_entries,
- u32 *last_table_flag)
-{
- u32 counter;
- /* Table data size */
- u32 table_data_size = 0;
- /* Data size for the next table */
- u32 next_table_data_size;
-
- *last_table_flag = 0;
-
- /*
- * Calculate the data in the out lli table till we fill the whole
- * table or till the data has ended
- */
- for (counter = 0;
- (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
- (counter < num_array_entries); counter++)
- table_data_size += lli_in_array_ptr[counter].block_size;
-
- /*
- * Check if we reached the last entry,
- * meaning this ia the last table to build,
- * and no need to check the block alignment
- */
- if (counter == num_array_entries) {
- /* Set the last table flag */
- *last_table_flag = 1;
- goto end_function;
- }
-
- /*
- * Calculate the data size of the next table.
- * Stop if no entries left or if data size is more the DMA restriction
- */
- next_table_data_size = 0;
- for (; counter < num_array_entries; counter++) {
- next_table_data_size += lli_in_array_ptr[counter].block_size;
- if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
- break;
- }
-
- /*
- * Check if the next table data size is less then DMA rstriction.
- * if it is - recalculate the current table size, so that the next
- * table data size will be adaquete for DMA
- */
- if (next_table_data_size &&
- next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
-
- table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
- next_table_data_size);
-
-end_function:
- return table_data_size;
-}
-
-/**
- * sep_build_lli_table - build an lli array for the given table
- * @sep: pointer to struct sep_device
- * @lli_array_ptr: pointer to lli array
- * @lli_table_ptr: pointer to lli table
- * @num_processed_entries_ptr: pointer to number of entries
- * @num_table_entries_ptr: pointer to number of tables
- * @table_data_size: total data size
- *
- * Builds ant lli table from the lli_array according to
- * the given size of data
- */
-static void sep_build_lli_table(struct sep_device *sep,
- struct sep_lli_entry *lli_array_ptr,
- struct sep_lli_entry *lli_table_ptr,
- u32 *num_processed_entries_ptr,
- u32 *num_table_entries_ptr,
- u32 table_data_size)
-{
- /* Current table data size */
- u32 curr_table_data_size;
- /* Counter of lli array entry */
- u32 array_counter;
-
- /* Init current table data size and lli array entry counter */
- curr_table_data_size = 0;
- array_counter = 0;
- *num_table_entries_ptr = 1;
-
- dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size);
-
- /* Fill the table till table size reaches the needed amount */
- while (curr_table_data_size < table_data_size) {
- /* Update the number of entries in table */
- (*num_table_entries_ptr)++;
-
- lli_table_ptr->bus_address =
- cpu_to_le32(lli_array_ptr[array_counter].bus_address);
-
- lli_table_ptr->block_size =
- cpu_to_le32(lli_array_ptr[array_counter].block_size);
-
- curr_table_data_size += lli_array_ptr[array_counter].block_size;
-
- dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
- lli_table_ptr);
- dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
- (unsigned long)lli_table_ptr->bus_address);
- dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
- lli_table_ptr->block_size);
-
- /* Check for overflow of the table data */
- if (curr_table_data_size > table_data_size) {
- dev_dbg(&sep->pdev->dev,
- "curr_table_data_size too large\n");
-
- /* Update the size of block in the table */
- lli_table_ptr->block_size -=
- cpu_to_le32((curr_table_data_size - table_data_size));
-
- /* Update the physical address in the lli array */
- lli_array_ptr[array_counter].bus_address +=
- cpu_to_le32(lli_table_ptr->block_size);
-
- /* Update the block size left in the lli array */
- lli_array_ptr[array_counter].block_size =
- (curr_table_data_size - table_data_size);
- } else
- /* Advance to the next entry in the lli_array */
- array_counter++;
-
- dev_dbg(&sep->pdev->dev,
- "lli_table_ptr->bus_address is %08lx\n",
- (unsigned long)lli_table_ptr->bus_address);
- dev_dbg(&sep->pdev->dev,
- "lli_table_ptr->block_size is %x\n",
- lli_table_ptr->block_size);
-
- /* Move to the next entry in table */
- lli_table_ptr++;
- }
-
- /* Set the info entry to default */
- lli_table_ptr->bus_address = 0xffffffff;
- lli_table_ptr->block_size = 0;
-
- /* Set the output parameter */
- *num_processed_entries_ptr += array_counter;
-
-}
-
-/**
- * sep_shared_area_virt_to_bus - map shared area to bus address
- * @sep: pointer to struct sep_device
- * @virt_address: virtual address to convert
- *
- * This functions returns the physical address inside shared area according
- * to the virtual address. It can be either on the externa RAM device
- * (ioremapped), or on the system RAM
- * This implementation is for the external RAM
- */
-static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
- void *virt_address)
-{
- dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
- dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
- (unsigned long)
- sep->shared_bus + (virt_address - sep->shared_addr));
-
- return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
-}
-
-/**
- * sep_shared_area_bus_to_virt - map shared area bus address to kernel
- * @sep: pointer to struct sep_device
- * @bus_address: bus address to convert
- *
- * This functions returns the virtual address inside shared area
- * according to the physical address. It can be either on the
- * externa RAM device (ioremapped), or on the system RAM
- * This implementation is for the external RAM
- */
-static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
- (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
- (size_t)(bus_address - sep->shared_bus)));
-
- return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
-}
-
-/**
- * sep_debug_print_lli_tables - dump LLI table
- * @sep: pointer to struct sep_device
- * @lli_table_ptr: pointer to sep_lli_entry
- * @num_table_entries: number of entries
- * @table_data_size: total data size
- *
- * Walk the the list of the print created tables and print all the data
- */
-static void sep_debug_print_lli_tables(struct sep_device *sep,
- struct sep_lli_entry *lli_table_ptr,
- unsigned long num_table_entries,
- unsigned long table_data_size)
-{
- unsigned long table_count = 1;
- unsigned long entries_count = 0;
-
- dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
-
- while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
- dev_dbg(&sep->pdev->dev,
- "lli table %08lx, table_data_size is %lu\n",
- table_count, table_data_size);
- dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
- num_table_entries);
-
- /* Print entries of the table (without info entry) */
- for (entries_count = 0; entries_count < num_table_entries;
- entries_count++, lli_table_ptr++) {
-
- dev_dbg(&sep->pdev->dev,
- "lli_table_ptr address is %08lx\n",
- (unsigned long) lli_table_ptr);
-
- dev_dbg(&sep->pdev->dev,
- "phys address is %08lx block size is %x\n",
- (unsigned long)lli_table_ptr->bus_address,
- lli_table_ptr->block_size);
- }
- /* Point to the info entry */
- lli_table_ptr--;
-
- dev_dbg(&sep->pdev->dev,
- "phys lli_table_ptr->block_size is %x\n",
- lli_table_ptr->block_size);
-
- dev_dbg(&sep->pdev->dev,
- "phys lli_table_ptr->physical_address is %08lu\n",
- (unsigned long)lli_table_ptr->bus_address);
-
-
- table_data_size = lli_table_ptr->block_size & 0xffffff;
- num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
-
- dev_dbg(&sep->pdev->dev,
- "phys table_data_size is %lu num_table_entries is"
- " %lu bus_address is%lu\n", table_data_size,
- num_table_entries, (unsigned long)lli_table_ptr->bus_address);
-
- if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
- lli_table_ptr = (struct sep_lli_entry *)
- sep_shared_bus_to_virt(sep,
- (unsigned long)lli_table_ptr->bus_address);
-
- table_count++;
- }
- dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
-}
-
-
-/**
- * sep_prepare_empty_lli_table - create a blank LLI table
- * @sep: pointer to struct sep_device
- * @lli_table_addr_ptr: pointer to lli table
- * @num_entries_ptr: pointer to number of entries
- * @table_data_size_ptr: point to table data size
- *
- * This function creates empty lli tables when there is no data
- */
-static void sep_prepare_empty_lli_table(struct sep_device *sep,
- dma_addr_t *lli_table_addr_ptr,
- u32 *num_entries_ptr,
- u32 *table_data_size_ptr)
-{
- struct sep_lli_entry *lli_table_ptr;
-
- /* Find the area for new table */
- lli_table_ptr =
- (struct sep_lli_entry *)(sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
- lli_table_ptr->bus_address = 0;
- lli_table_ptr->block_size = 0;
-
- lli_table_ptr++;
- lli_table_ptr->bus_address = 0xFFFFFFFF;
- lli_table_ptr->block_size = 0;
-
- /* Set the output parameter value */
- *lli_table_addr_ptr = sep->shared_bus +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- sep->num_lli_tables_created *
- sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* Set the num of entries and table data size for empty table */
- *num_entries_ptr = 2;
- *table_data_size_ptr = 0;
-
- /* Update the number of created tables */
- sep->num_lli_tables_created++;
-}
-
-/**
- * sep_prepare_input_dma_table - prepare input DMA mappings
- * @sep: pointer to struct sep_device
- * @data_size:
- * @block_size:
- * @lli_table_ptr:
- * @num_entries_ptr:
- * @table_data_size_ptr:
- * @is_kva: set for kernel data (kernel cryptio call)
- *
- * This function prepares only input DMA table for synhronic symmetric
- * operations (HASH)
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_prepare_input_dma_table(struct sep_device *sep,
- unsigned long app_virt_addr,
- u32 data_size,
- u32 block_size,
- dma_addr_t *lli_table_ptr,
- u32 *num_entries_ptr,
- u32 *table_data_size_ptr,
- bool is_kva)
-{
- int error = 0;
- /* Pointer to the info entry of the table - the last entry */
- struct sep_lli_entry *info_entry_ptr;
- /* Array of pointers to page */
- struct sep_lli_entry *lli_array_ptr;
- /* Points to the first entry to be processed in the lli_in_array */
- u32 current_entry = 0;
- /* Num entries in the virtual buffer */
- u32 sep_lli_entries = 0;
- /* Lli table pointer */
- struct sep_lli_entry *in_lli_table_ptr;
- /* The total data in one table */
- u32 table_data_size = 0;
- /* Flag for last table */
- u32 last_table_flag = 0;
- /* Number of entries in lli table */
- u32 num_entries_in_table = 0;
- /* Next table address */
- void *lli_table_alloc_addr = 0;
-
- dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size);
- dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
-
- /* Initialize the pages pointers */
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
-
- /* Set the kernel address for first table to be allocated */
- lli_table_alloc_addr = (void *)(sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
- if (data_size == 0) {
- /* Special case - create meptu table - 2 entries, zero data */
- sep_prepare_empty_lli_table(sep, lli_table_ptr,
- num_entries_ptr, table_data_size_ptr);
- goto update_dcb_counter;
- }
-
- /* Check if the pages are in Kernel Virtual Address layout */
- if (is_kva == true)
- /* Lock the pages in the kernel */
- error = sep_lock_kernel_pages(sep, app_virt_addr,
- data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
- else
- /*
- * Lock the pages of the user buffer
- * and translate them to pages
- */
- error = sep_lock_user_pages(sep, app_virt_addr,
- data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
-
- if (error)
- goto end_function;
-
- dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
-
- current_entry = 0;
- info_entry_ptr = NULL;
-
- sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
-
- /* Loop till all the entries in in array are not processed */
- while (current_entry < sep_lli_entries) {
-
- /* Set the new input and output tables */
- in_lli_table_ptr =
- (struct sep_lli_entry *)lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- if (lli_table_alloc_addr >
- ((void *)sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
-
- error = -ENOMEM;
- goto end_function_error;
-
- }
-
- /* Update the number of created tables */
- sep->num_lli_tables_created++;
-
- /* Calculate the maximum size of data for input table */
- table_data_size = sep_calculate_lli_table_max_size(sep,
- &lli_array_ptr[current_entry],
- (sep_lli_entries - current_entry),
- &last_table_flag);
-
- /*
- * If this is not the last table -
- * then align it to the block size
- */
- if (!last_table_flag)
- table_data_size =
- (table_data_size / block_size) * block_size;
-
- dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
- table_data_size);
-
- /* Construct input lli table */
- sep_build_lli_table(sep, &lli_array_ptr[current_entry],
- in_lli_table_ptr,
- &current_entry, &num_entries_in_table, table_data_size);
-
- if (info_entry_ptr == NULL) {
-
- /* Set the output parameters to physical addresses */
- *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
- in_lli_table_ptr);
- *num_entries_ptr = num_entries_in_table;
- *table_data_size_ptr = table_data_size;
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_in_ptr is %08lx\n",
- (unsigned long)*lli_table_ptr);
-
- } else {
- /* Update the info entry of the previous in table */
- info_entry_ptr->bus_address =
- sep_shared_area_virt_to_bus(sep,
- in_lli_table_ptr);
- info_entry_ptr->block_size =
- ((num_entries_in_table) << 24) |
- (table_data_size);
- }
- /* Save the pointer to the info entry of the current tables */
- info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- }
- /* Print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
- sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
- *num_entries_ptr, *table_data_size_ptr);
- /* The array of the pages */
- kfree(lli_array_ptr);
-
-update_dcb_counter:
- /* Update DCB counter */
- sep->nr_dcb_creat++;
- goto end_function;
-
-end_function_error:
- /* Free all the allocated resources */
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
- kfree(lli_array_ptr);
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
-
-end_function:
- return error;
-
-}
-/**
- * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
- * @sep: pointer to struct sep_device
- * @lli_in_array:
- * @sep_in_lli_entries:
- * @lli_out_array:
- * @sep_out_lli_entries
- * @block_size
- * @lli_table_in_ptr
- * @lli_table_out_ptr
- * @in_num_entries_ptr
- * @out_num_entries_ptr
- * @table_data_size_ptr
- *
- * This function creates the input and output DMA tables for
- * symmetric operations (AES/DES) according to the block
- * size from LLI arays
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_construct_dma_tables_from_lli(
- struct sep_device *sep,
- struct sep_lli_entry *lli_in_array,
- u32 sep_in_lli_entries,
- struct sep_lli_entry *lli_out_array,
- u32 sep_out_lli_entries,
- u32 block_size,
- dma_addr_t *lli_table_in_ptr,
- dma_addr_t *lli_table_out_ptr,
- u32 *in_num_entries_ptr,
- u32 *out_num_entries_ptr,
- u32 *table_data_size_ptr)
-{
- /* Points to the area where next lli table can be allocated */
- void *lli_table_alloc_addr = 0;
- /* Input lli table */
- struct sep_lli_entry *in_lli_table_ptr = NULL;
- /* Output lli table */
- struct sep_lli_entry *out_lli_table_ptr = NULL;
- /* Pointer to the info entry of the table - the last entry */
- struct sep_lli_entry *info_in_entry_ptr = NULL;
- /* Pointer to the info entry of the table - the last entry */
- struct sep_lli_entry *info_out_entry_ptr = NULL;
- /* Points to the first entry to be processed in the lli_in_array */
- u32 current_in_entry = 0;
- /* Points to the first entry to be processed in the lli_out_array */
- u32 current_out_entry = 0;
- /* Max size of the input table */
- u32 in_table_data_size = 0;
- /* Max size of the output table */
- u32 out_table_data_size = 0;
- /* Flag te signifies if this is the last tables build */
- u32 last_table_flag = 0;
- /* The data size that should be in table */
- u32 table_data_size = 0;
- /* Number of etnries in the input table */
- u32 num_entries_in_table = 0;
- /* Number of etnries in the output table */
- u32 num_entries_out_table = 0;
-
- /* Initiate to point after the message area */
- lli_table_alloc_addr = (void *)(sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- (sep->num_lli_tables_created *
- (sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
-
- /* Loop till all the entries in in array are not processed */
- while (current_in_entry < sep_in_lli_entries) {
- /* Set the new input and output tables */
- in_lli_table_ptr =
- (struct sep_lli_entry *)lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* Set the first output tables */
- out_lli_table_ptr =
- (struct sep_lli_entry *)lli_table_alloc_addr;
-
- /* Check if the DMA table area limit was overrun */
- if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
- ((void *)sep->shared_addr +
- SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
- SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
-
- dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
- return -ENOMEM;
- }
-
- /* Update the number of the lli tables created */
- sep->num_lli_tables_created += 2;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* Calculate the maximum size of data for input table */
- in_table_data_size =
- sep_calculate_lli_table_max_size(sep,
- &lli_in_array[current_in_entry],
- (sep_in_lli_entries - current_in_entry),
- &last_table_flag);
-
- /* Calculate the maximum size of data for output table */
- out_table_data_size =
- sep_calculate_lli_table_max_size(sep,
- &lli_out_array[current_out_entry],
- (sep_out_lli_entries - current_out_entry),
- &last_table_flag);
-
- dev_dbg(&sep->pdev->dev,
- "construct tables from lli in_table_data_size is %x\n",
- in_table_data_size);
-
- dev_dbg(&sep->pdev->dev,
- "construct tables from lli out_table_data_size is %x\n",
- out_table_data_size);
-
- table_data_size = in_table_data_size;
-
- if (!last_table_flag) {
- /*
- * If this is not the last table,
- * then must check where the data is smallest
- * and then align it to the block size
- */
- if (table_data_size > out_table_data_size)
- table_data_size = out_table_data_size;
-
- /*
- * Now calculate the table size so that
- * it will be module block size
- */
- table_data_size = (table_data_size / block_size) *
- block_size;
- }
-
- /* Construct input lli table */
- sep_build_lli_table(sep, &lli_in_array[current_in_entry],
- in_lli_table_ptr,
- &current_in_entry,
- &num_entries_in_table,
- table_data_size);
-
- /* Construct output lli table */
- sep_build_lli_table(sep, &lli_out_array[current_out_entry],
- out_lli_table_ptr,
- &current_out_entry,
- &num_entries_out_table,
- table_data_size);
-
- /* If info entry is null - this is the first table built */
- if (info_in_entry_ptr == NULL) {
- /* Set the output parameters to physical addresses */
- *lli_table_in_ptr =
- sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
-
- *in_num_entries_ptr = num_entries_in_table;
-
- *lli_table_out_ptr =
- sep_shared_area_virt_to_bus(sep,
- out_lli_table_ptr);
-
- *out_num_entries_ptr = num_entries_out_table;
- *table_data_size_ptr = table_data_size;
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_in_ptr is %08lx\n",
- (unsigned long)*lli_table_in_ptr);
- dev_dbg(&sep->pdev->dev,
- "output lli_table_out_ptr is %08lx\n",
- (unsigned long)*lli_table_out_ptr);
- } else {
- /* Update the info entry of the previous in table */
- info_in_entry_ptr->bus_address =
- sep_shared_area_virt_to_bus(sep,
- in_lli_table_ptr);
-
- info_in_entry_ptr->block_size =
- ((num_entries_in_table) << 24) |
- (table_data_size);
-
- /* Update the info entry of the previous in table */
- info_out_entry_ptr->bus_address =
- sep_shared_area_virt_to_bus(sep,
- out_lli_table_ptr);
-
- info_out_entry_ptr->block_size =
- ((num_entries_out_table) << 24) |
- (table_data_size);
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_in_ptr:%08lx %08x\n",
- (unsigned long)info_in_entry_ptr->bus_address,
- info_in_entry_ptr->block_size);
-
- dev_dbg(&sep->pdev->dev,
- "output lli_table_out_ptr:%08lx %08x\n",
- (unsigned long)info_out_entry_ptr->bus_address,
- info_out_entry_ptr->block_size);
- }
-
- /* Save the pointer to the info entry of the current tables */
- info_in_entry_ptr = in_lli_table_ptr +
- num_entries_in_table - 1;
- info_out_entry_ptr = out_lli_table_ptr +
- num_entries_out_table - 1;
-
- dev_dbg(&sep->pdev->dev,
- "output num_entries_out_table is %x\n",
- (u32)num_entries_out_table);
- dev_dbg(&sep->pdev->dev,
- "output info_in_entry_ptr is %lx\n",
- (unsigned long)info_in_entry_ptr);
- dev_dbg(&sep->pdev->dev,
- "output info_out_entry_ptr is %lx\n",
- (unsigned long)info_out_entry_ptr);
- }
-
- /* Print input tables */
- sep_debug_print_lli_tables(sep,
- (struct sep_lli_entry *)
- sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
- *in_num_entries_ptr,
- *table_data_size_ptr);
-
- /* Print output tables */
- sep_debug_print_lli_tables(sep,
- (struct sep_lli_entry *)
- sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
- *out_num_entries_ptr,
- *table_data_size_ptr);
-
- return 0;
-}
-
-/**
- * sep_prepare_input_output_dma_table - prepare DMA I/O table
- * @app_virt_in_addr:
- * @app_virt_out_addr:
- * @data_size:
- * @block_size:
- * @lli_table_in_ptr:
- * @lli_table_out_ptr:
- * @in_num_entries_ptr:
- * @out_num_entries_ptr:
- * @table_data_size_ptr:
- * @is_kva: set for kernel data; used only for kernel crypto module
- *
- * This function builds input and output DMA tables for synhronic
- * symmetric operations (AES, DES, HASH). It also checks that each table
- * is of the modular block size
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
- unsigned long app_virt_in_addr,
- unsigned long app_virt_out_addr,
- u32 data_size,
- u32 block_size,
- dma_addr_t *lli_table_in_ptr,
- dma_addr_t *lli_table_out_ptr,
- u32 *in_num_entries_ptr,
- u32 *out_num_entries_ptr,
- u32 *table_data_size_ptr,
- bool is_kva)
-
-{
- int error = 0;
- /* Array of pointers of page */
- struct sep_lli_entry *lli_in_array;
- /* Array of pointers of page */
- struct sep_lli_entry *lli_out_array;
-
- if (data_size == 0) {
- /* Prepare empty table for input and output */
- sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
- in_num_entries_ptr, table_data_size_ptr);
-
- sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
- out_num_entries_ptr, table_data_size_ptr);
-
- goto update_dcb_counter;
- }
-
- /* Initialize the pages pointers */
- sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
- sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
-
- /* Lock the pages of the buffer and translate them to pages */
- if (is_kva == true) {
- error = sep_lock_kernel_pages(sep, app_virt_in_addr,
- data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "lock kernel for in failed\n");
- goto end_function;
- }
-
- error = sep_lock_kernel_pages(sep, app_virt_out_addr,
- data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "lock kernel for out failed\n");
- goto end_function;
- }
- }
-
- else {
- error = sep_lock_user_pages(sep, app_virt_in_addr,
- data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
- if (error) {
- dev_warn(&sep->pdev->dev,
- "sep_lock_user_pages for input virtual buffer failed\n");
- goto end_function;
- }
-
- error = sep_lock_user_pages(sep, app_virt_out_addr,
- data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "sep_lock_user_pages for output virtual buffer failed\n");
- goto end_function_free_lli_in;
- }
- }
-
- dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n",
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
- dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
- dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
- SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
- /* Call the function that creates table from the lli arrays */
- error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
- sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
- lli_out_array,
- sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
- block_size, lli_table_in_ptr, lli_table_out_ptr,
- in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
-
- if (error) {
- dev_warn(&sep->pdev->dev,
- "sep_construct_dma_tables_from_lli failed\n");
- goto end_function_with_error;
- }
-
- kfree(lli_out_array);
- kfree(lli_in_array);
-
-update_dcb_counter:
- /* Update DCB counter */
- sep->nr_dcb_creat++;
-
- goto end_function;
-
-end_function_with_error:
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
- kfree(lli_out_array);
-
-
-end_function_free_lli_in:
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
- kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
- kfree(lli_in_array);
-
-end_function:
-
- return error;
-
-}
-
-/**
- * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
- * @app_in_address: unsigned long; for data buffer in (user space)
- * @app_out_address: unsigned long; for data buffer out (user space)
- * @data_in_size: u32; for size of data
- * @block_size: u32; for block size
- * @tail_block_size: u32; for size of tail block
- * @isapplet: bool; to indicate external app
- * @is_kva: bool; kernel buffer; only used for kernel crypto module
- *
- * This function prepares the linked DMA tables and puts the
- * address for the linked list of tables inta a DCB (data control
- * block) the address of which is known by the SEP hardware
- * Note that all bus addresses that are passed to the SEP
- * are in 32 bit format; the SEP is a 32 bit device
- */
-static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
- unsigned long app_in_address,
- unsigned long app_out_address,
- u32 data_in_size,
- u32 block_size,
- u32 tail_block_size,
- bool isapplet,
- bool is_kva)
-{
- int error = 0;
- /* Size of tail */
- u32 tail_size = 0;
- /* Address of the created DCB table */
- struct sep_dcblock *dcb_table_ptr = NULL;
- /* The physical address of the first input DMA table */
- dma_addr_t in_first_mlli_address = 0;
- /* Number of entries in the first input DMA table */
- u32 in_first_num_entries = 0;
- /* The physical address of the first output DMA table */
- dma_addr_t out_first_mlli_address = 0;
- /* Number of entries in the first output DMA table */
- u32 out_first_num_entries = 0;
- /* Data in the first input/output table */
- u32 first_data_size = 0;
-
- if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
- /* No more DCBs to allocate */
- dev_warn(&sep->pdev->dev, "no more DCBs available\n");
- error = -ENOSPC;
- goto end_function;
- }
-
- /* Allocate new DCB */
- dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
- SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
- (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
-
- /* Set the default values in the DCB */
- dcb_table_ptr->input_mlli_address = 0;
- dcb_table_ptr->input_mlli_num_entries = 0;
- dcb_table_ptr->input_mlli_data_size = 0;
- dcb_table_ptr->output_mlli_address = 0;
- dcb_table_ptr->output_mlli_num_entries = 0;
- dcb_table_ptr->output_mlli_data_size = 0;
- dcb_table_ptr->tail_data_size = 0;
- dcb_table_ptr->out_vr_tail_pt = 0;
-
- if (isapplet == true) {
-
- /* Check if there is enough data for DMA operation */
- if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
- if (is_kva == true) {
- memcpy(dcb_table_ptr->tail_data,
- (void *)app_in_address, data_in_size);
- } else {
- if (copy_from_user(dcb_table_ptr->tail_data,
- (void __user *)app_in_address,
- data_in_size)) {
- error = -EFAULT;
- goto end_function;
- }
- }
-
- dcb_table_ptr->tail_data_size = data_in_size;
-
- /* Set the output user-space address for mem2mem op */
- if (app_out_address)
- dcb_table_ptr->out_vr_tail_pt =
- (aligned_u64)app_out_address;
-
- /*
- * Update both data length parameters in order to avoid
- * second data copy and allow building of empty mlli
- * tables
- */
- tail_size = 0x0;
- data_in_size = 0x0;
-
- } else {
- if (!app_out_address) {
- tail_size = data_in_size % block_size;
- if (!tail_size) {
- if (tail_block_size == block_size)
- tail_size = block_size;
- }
- } else {
- tail_size = 0;
- }
- }
- if (tail_size) {
- if (tail_size > sizeof(dcb_table_ptr->tail_data))
- return -EINVAL;
- if (is_kva == true) {
- memcpy(dcb_table_ptr->tail_data,
- (void *)(app_in_address + data_in_size -
- tail_size), tail_size);
- } else {
- /* We have tail data - copy it to DCB */
- if (copy_from_user(dcb_table_ptr->tail_data,
- (void *)(app_in_address +
- data_in_size - tail_size), tail_size)) {
- error = -EFAULT;
- goto end_function;
- }
- }
- if (app_out_address)
- /*
- * Calculate the output address
- * according to tail data size
- */
- dcb_table_ptr->out_vr_tail_pt =
- (aligned_u64)app_out_address + data_in_size
- - tail_size;
-
- /* Save the real tail data size */
- dcb_table_ptr->tail_data_size = tail_size;
- /*
- * Update the data size without the tail
- * data size AKA data for the dma
- */
- data_in_size = (data_in_size - tail_size);
- }
- }
- /* Check if we need to build only input table or input/output */
- if (app_out_address) {
- /* Prepare input/output tables */
- error = sep_prepare_input_output_dma_table(sep,
- app_in_address,
- app_out_address,
- data_in_size,
- block_size,
- &in_first_mlli_address,
- &out_first_mlli_address,
- &in_first_num_entries,
- &out_first_num_entries,
- &first_data_size,
- is_kva);
- } else {
- /* Prepare input tables */
- error = sep_prepare_input_dma_table(sep,
- app_in_address,
- data_in_size,
- block_size,
- &in_first_mlli_address,
- &in_first_num_entries,
- &first_data_size,
- is_kva);
- }
-
- if (error) {
- dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
- goto end_function;
- }
-
- /* Set the DCB values */
- dcb_table_ptr->input_mlli_address = in_first_mlli_address;
- dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
- dcb_table_ptr->input_mlli_data_size = first_data_size;
- dcb_table_ptr->output_mlli_address = out_first_mlli_address;
- dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
- dcb_table_ptr->output_mlli_data_size = first_data_size;
-
-end_function:
- return error;
-
-}
-
-/**
- * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
- * @sep: pointer to struct sep_device
- * @isapplet: indicates external application (used for kernel access)
- * @is_kva: indicates kernel addresses (only used for kernel crypto)
- *
- * This function frees the DMA tables and DCB
- */
-static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
- bool is_kva)
-{
- int i = 0;
- int error = 0;
- int error_temp = 0;
- struct sep_dcblock *dcb_table_ptr;
- unsigned long pt_hold;
- void *tail_pt;
-
- if (isapplet == true) {
- /* Set pointer to first DCB table */
- dcb_table_ptr = (struct sep_dcblock *)
- (sep->shared_addr +
- SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
-
- /* Go over each DCB and see if tail pointer must be updated */
- for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
- if (dcb_table_ptr->out_vr_tail_pt) {
- pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
- tail_pt = (void *)pt_hold;
- if (is_kva == true) {
- memcpy(tail_pt,
- dcb_table_ptr->tail_data,
- dcb_table_ptr->tail_data_size);
- } else {
- error_temp = copy_to_user(
- tail_pt,
- dcb_table_ptr->tail_data,
- dcb_table_ptr->tail_data_size);
- }
- if (error_temp) {
- /* Release the DMA resource */
- error = -EFAULT;
- break;
- }
- }
- }
- }
- /* Free the output pages, if any */
- sep_free_dma_table_data_handler(sep);
-
- return error;
-}
-
-/**
- * sep_get_static_pool_addr_handler - get static pool address
- * @sep: pointer to struct sep_device
- *
- * This function sets the bus and virtual addresses of the static pool
- */
-static int sep_get_static_pool_addr_handler(struct sep_device *sep)
-{
- u32 *static_pool_addr = NULL;
-
- static_pool_addr = (u32 *)(sep->shared_addr +
- SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
-
- static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
- static_pool_addr[1] = (u32)sep->shared_bus +
- SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
- dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n",
- (u32)static_pool_addr[1]);
-
- return 0;
-}
-
-/**
- * sep_end_transaction_handler - end transaction
- * @sep: pointer to struct sep_device
- *
- * This API handles the end transaction request
- */
-static int sep_end_transaction_handler(struct sep_device *sep)
-{
- /* Clear the data pool pointers Token */
- memset((void *)(sep->shared_addr +
- SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
- 0, sep->num_of_data_allocations*2*sizeof(u32));
-
- /* Check that all the DMA resources were freed */
- sep_free_dma_table_data_handler(sep);
-
- clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
-
- /*
- * We are now through with the transaction. Let's
- * allow other processes who have the device open
- * to perform transactions
- */
- mutex_lock(&sep->sep_mutex);
- sep->pid_doing_transaction = 0;
- mutex_unlock(&sep->sep_mutex);
- /* Raise event for stuck contextes */
- wake_up(&sep->event);
-
- return 0;
-}
-
-/**
- * sep_prepare_dcb_handler - prepare a control block
- * @sep: pointer to struct sep_device
- * @arg: pointer to user parameters
- *
- * This function will retrieve the RAR buffer physical addresses, type
- * & size corresponding to the RAR handles provided in the buffers vector.
- */
-static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- /* Command arguments */
- struct build_dcb_struct command_args;
-
- /* Get the command arguments */
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(struct build_dcb_struct))) {
- error = -EFAULT;
- goto end_function;
- }
-
- dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n",
- command_args.app_in_address);
- dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
- command_args.app_out_address);
- dev_dbg(&sep->pdev->dev, "data_size is %x\n",
- command_args.data_in_size);
- dev_dbg(&sep->pdev->dev, "block_size is %x\n",
- command_args.block_size);
- dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
- command_args.tail_block_size);
-
- error = sep_prepare_input_output_dma_table_in_dcb(sep,
- (unsigned long)command_args.app_in_address,
- (unsigned long)command_args.app_out_address,
- command_args.data_in_size, command_args.block_size,
- command_args.tail_block_size, true, false);
-
-end_function:
- return error;
-
-}
-
-/**
- * sep_free_dcb_handler - free control block resources
- * @sep: pointer to struct sep_device
- *
- * This function frees the DCB resources and updates the needed
- * user-space buffers.
- */
-static int sep_free_dcb_handler(struct sep_device *sep)
-{
- return sep_free_dma_tables_and_dcb(sep, false, false);
-}
-
-/**
- * sep_rar_prepare_output_msg_handler - prepare an output message
- * @sep: pointer to struct sep_device
- * @arg: pointer to user parameters
- *
- * This function will retrieve the RAR buffer physical addresses, type
- * & size corresponding to the RAR handles provided in the buffers vector.
- */
-static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = 0;
- /* Command args */
- struct rar_hndl_to_bus_struct command_args;
- /* Bus address */
- dma_addr_t rar_bus = 0;
- /* Holds the RAR address in the system memory offset */
- u32 *rar_addr;
-
- /* Copy the data */
- if (copy_from_user(&command_args, (void __user *)arg,
- sizeof(command_args))) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* Call to translation function only if user handle is not NULL */
- if (command_args.rar_handle)
- return -EOPNOTSUPP;
- dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
-
- /* Set value in the SYSTEM MEMORY offset */
- rar_addr = (u32 *)(sep->shared_addr +
- SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
-
- /* Copy the physical address to the System Area for the SEP */
- rar_addr[0] = SEP_RAR_VAL_TOKEN;
- rar_addr[1] = rar_bus;
-
-end_function:
- return error;
-}
-
-/**
- * sep_ioctl - ioctl api
- * @filp: pointer to struct file
- * @cmd: command
- * @arg: pointer to argument structure
- *
- * Implement the ioctl methods available on the SEP device.
- */
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int error = 0;
- struct sep_device *sep = filp->private_data;
-
- /* Make sure we own this device */
- mutex_lock(&sep->sep_mutex);
- if ((current->pid != sep->pid_doing_transaction) &&
- (sep->pid_doing_transaction != 0)) {
- dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
- error = -EACCES;
- }
- mutex_unlock(&sep->sep_mutex);
-
- if (error)
- return error;
-
- if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
- return -ENOTTY;
-
- /* Lock to prevent the daemon to interfere with operation */
- mutex_lock(&sep->ioctl_mutex);
-
- switch (cmd) {
- case SEP_IOCSENDSEPCOMMAND:
- /* Send command to SEP */
- error = sep_send_command_handler(sep);
- break;
- case SEP_IOCALLOCDATAPOLL:
- /* Allocate data pool */
- error = sep_allocate_data_pool_memory_handler(sep, arg);
- break;
- case SEP_IOCGETSTATICPOOLADDR:
- /* Inform the SEP the bus address of the static pool */
- error = sep_get_static_pool_addr_handler(sep);
- break;
- case SEP_IOCENDTRANSACTION:
- error = sep_end_transaction_handler(sep);
- break;
- case SEP_IOCRARPREPAREMESSAGE:
- error = sep_rar_prepare_output_msg_handler(sep, arg);
- break;
- case SEP_IOCPREPAREDCB:
- error = sep_prepare_dcb_handler(sep, arg);
- break;
- case SEP_IOCFREEDCB:
- error = sep_free_dcb_handler(sep);
- break;
- default:
- error = -ENOTTY;
- break;
- }
-
- mutex_unlock(&sep->ioctl_mutex);
- return error;
-}
-
-/**
- * sep_singleton_ioctl - ioctl api for singleton interface
- * @filp: pointer to struct file
- * @cmd: command
- * @arg: pointer to argument structure
- *
- * Implement the additional ioctls for the singleton device
- */
-static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
-{
- long error = 0;
- struct sep_device *sep = filp->private_data;
-
- /* Check that the command is for the SEP device */
- if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
- return -ENOTTY;
-
- /* Make sure we own this device */
- mutex_lock(&sep->sep_mutex);
- if ((current->pid != sep->pid_doing_transaction) &&
- (sep->pid_doing_transaction != 0)) {
- dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
- mutex_unlock(&sep->sep_mutex);
- return -EACCES;
- }
-
- mutex_unlock(&sep->sep_mutex);
-
- switch (cmd) {
- case SEP_IOCTLSETCALLERID:
- mutex_lock(&sep->ioctl_mutex);
- error = sep_set_caller_id_handler(sep, arg);
- mutex_unlock(&sep->ioctl_mutex);
- break;
- default:
- error = sep_ioctl(filp, cmd, arg);
- break;
- }
- return error;
-}
-
-/**
- * sep_request_daemon_ioctl - ioctl for daemon
- * @filp: pointer to struct file
- * @cmd: command
- * @arg: pointer to argument structure
- *
- * Called by the request daemon to perform ioctls on the daemon device
- */
-static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
- unsigned long arg)
-{
-
- long error;
- struct sep_device *sep = filp->private_data;
-
- /* Check that the command is for SEP device */
- if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
- return -ENOTTY;
-
- /* Only one process can access ioctl at any given time */
- mutex_lock(&sep->ioctl_mutex);
-
- switch (cmd) {
- case SEP_IOCSENDSEPRPLYCOMMAND:
- /* Send reply command to SEP */
- error = sep_req_daemon_send_reply_command_handler(sep);
- break;
- case SEP_IOCENDTRANSACTION:
- /*
- * End req daemon transaction, do nothing
- * will be removed upon update in middleware
- * API library
- */
- error = 0;
- break;
- default:
- error = -ENOTTY;
- }
- mutex_unlock(&sep->ioctl_mutex);
- return error;
-}
-
-/**
- * sep_inthandler - interrupt handler
- * @irq: interrupt
- * @dev_id: device id
- */
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
- irqreturn_t int_error = IRQ_HANDLED;
- unsigned long lck_flags;
- u32 reg_val, reg_val2 = 0;
- struct sep_device *sep = dev_id;
-
- /* Read the IRR register to check if this is SEP interrupt */
- reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
- if (reg_val & (0x1 << 13)) {
- /* Lock and update the counter of reply messages */
- spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
- sep->reply_ct++;
- spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
-
- dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
- sep->send_ct, sep->reply_ct);
-
- /* Is this printf or daemon request? */
- reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- dev_dbg(&sep->pdev->dev,
- "SEP Interrupt - reg2 is %08x\n", reg_val2);
-
- if ((reg_val2 >> 30) & 0x1) {
- dev_dbg(&sep->pdev->dev, "int: printf request\n");
- wake_up(&sep->event_request_daemon);
- } else if (reg_val2 >> 31) {
- dev_dbg(&sep->pdev->dev, "int: daemon request\n");
- wake_up(&sep->event_request_daemon);
- } else {
- dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
- wake_up(&sep->event);
- }
- } else {
- dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
- int_error = IRQ_NONE;
- }
- if (int_error == IRQ_HANDLED)
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-
- return int_error;
-}
-
-/**
- * sep_reconfig_shared_area - reconfigure shared area
- * @sep: pointer to struct sep_device
- *
- * Reconfig the shared area between HOST and SEP - needed in case
- * the DX_CC_Init function was called before OS loading.
- */
-static int sep_reconfig_shared_area(struct sep_device *sep)
-{
- int ret_val;
-
- /* use to limit waiting for SEP */
- unsigned long end_time;
-
- /* Send the new SHARED MESSAGE AREA to the SEP */
- dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
- (unsigned long long)sep->shared_bus);
-
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
- /* Poll for SEP response */
- ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- end_time = jiffies + (WAIT_TIME * HZ);
-
- while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
- (ret_val != sep->shared_bus))
- ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- /* Check the return value (register) */
- if (ret_val != sep->shared_bus) {
- dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
- dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
- ret_val = -ENOMEM;
- } else
- ret_val = 0;
-
- dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
- return ret_val;
-}
-
-/* File operation for singleton SEP operations */
-static const struct file_operations singleton_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_singleton_ioctl,
- .poll = sep_poll,
- .open = sep_singleton_open,
- .release = sep_singleton_release,
- .mmap = sep_mmap,
-};
-
-/* File operation for daemon operations */
-static const struct file_operations daemon_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_request_daemon_ioctl,
- .poll = sep_request_daemon_poll,
- .open = sep_request_daemon_open,
- .release = sep_request_daemon_release,
- .mmap = sep_request_daemon_mmap,
-};
-
-/* The files operations structure of the driver */
-static const struct file_operations sep_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_ioctl,
- .poll = sep_poll,
- .open = sep_open,
- .release = sep_release,
- .mmap = sep_mmap,
-};
-
-/**
- * sep_register_driver_with_fs - register misc devices
- * @sep: pointer to struct sep_device
- *
- * This function registers the driver with the file system
- */
-static int sep_register_driver_with_fs(struct sep_device *sep)
-{
- int ret_val;
-
- sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
- sep->miscdev_sep.name = SEP_DEV_NAME;
- sep->miscdev_sep.fops = &sep_file_operations;
-
- sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
- sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
- sep->miscdev_singleton.fops = &singleton_file_operations;
-
- sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
- sep->miscdev_daemon.name = SEP_DEV_DAEMON;
- sep->miscdev_daemon.fops = &daemon_file_operations;
-
- ret_val = misc_register(&sep->miscdev_sep);
- if (ret_val) {
- dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
- ret_val);
- return ret_val;
- }
-
- ret_val = misc_register(&sep->miscdev_singleton);
- if (ret_val) {
- dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
- ret_val);
- misc_deregister(&sep->miscdev_sep);
- return ret_val;
- }
-
- ret_val = misc_register(&sep->miscdev_daemon);
- if (ret_val) {
- dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
- ret_val);
- misc_deregister(&sep->miscdev_sep);
- misc_deregister(&sep->miscdev_singleton);
-
- return ret_val;
- }
- return ret_val;
-}
-
-
-/**
- * sep_probe - probe a matching PCI device
- * @pdev: pci_device
- * @end: pci_device_id
- *
- * Attempt to set up and configure a SEP device that has been
- * discovered by the PCI layer.
- */
-static int __devinit sep_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int error = 0;
- struct sep_device *sep;
-
- if (sep_dev != NULL) {
- dev_warn(&pdev->dev, "only one SEP supported.\n");
- return -EBUSY;
- }
-
- /* Enable the device */
- error = pci_enable_device(pdev);
- if (error) {
- dev_warn(&pdev->dev, "error enabling pci device\n");
- goto end_function;
- }
-
- /* Allocate the sep_device structure for this device */
- sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
- if (sep_dev == NULL) {
- dev_warn(&pdev->dev,
- "can't kmalloc the sep_device structure\n");
- error = -ENOMEM;
- goto end_function_disable_device;
- }
-
- /*
- * We're going to use another variable for actually
- * working with the device; this way, if we have
- * multiple devices in the future, it would be easier
- * to make appropriate changes
- */
- sep = sep_dev;
-
- sep->pdev = pci_dev_get(pdev);
-
- init_waitqueue_head(&sep->event);
- init_waitqueue_head(&sep->event_request_daemon);
- spin_lock_init(&sep->snd_rply_lck);
- mutex_init(&sep->sep_mutex);
- mutex_init(&sep->ioctl_mutex);
-
- dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n");
- dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
-
- /* Set up our register area */
- sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
- if (!sep->reg_physical_addr) {
- dev_warn(&sep->pdev->dev, "Error getting register start\n");
- error = -ENODEV;
- goto end_function_free_sep_dev;
- }
-
- sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
- if (!sep->reg_physical_end) {
- dev_warn(&sep->pdev->dev, "Error getting register end\n");
- error = -ENODEV;
- goto end_function_free_sep_dev;
- }
-
- sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
- (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
- if (!sep->reg_addr) {
- dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
- error = -ENODEV;
- goto end_function_free_sep_dev;
- }
-
- dev_dbg(&sep->pdev->dev,
- "Register area start %llx end %llx virtual %p\n",
- (unsigned long long)sep->reg_physical_addr,
- (unsigned long long)sep->reg_physical_end,
- sep->reg_addr);
-
- /* Allocate the shared area */
- sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
- SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
- if (sep_map_and_alloc_shared_area(sep)) {
- error = -ENOMEM;
- /* Allocation failed */
- goto end_function_error;
- }
-
- /* Clear ICR register */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
- /* Set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
- /* Read send/receive counters from SEP */
- sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- sep->reply_ct &= 0x3FFFFFFF;
- sep->send_ct = sep->reply_ct;
-
- /* Get the interrupt line */
- error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
- "sep_driver", sep);
-
- if (error)
- goto end_function_deallocate_sep_shared_area;
-
- /* The new chip requires a shared area reconfigure */
- if (sep->pdev->revision == 4) { /* Only for new chip */
- error = sep_reconfig_shared_area(sep);
- if (error)
- goto end_function_free_irq;
- }
- /* Finally magic up the device nodes */
- /* Register driver with the fs */
- error = sep_register_driver_with_fs(sep);
- if (error == 0)
- /* Success */
- return 0;
-
-end_function_free_irq:
- free_irq(pdev->irq, sep);
-
-end_function_deallocate_sep_shared_area:
- /* De-allocate shared area */
- sep_unmap_and_free_shared_area(sep);
-
-end_function_error:
- iounmap(sep->reg_addr);
-
-end_function_free_sep_dev:
- pci_dev_put(sep_dev->pdev);
- kfree(sep_dev);
- sep_dev = NULL;
-
-end_function_disable_device:
- pci_disable_device(pdev);
-
-end_function:
- return error;
-}
-
-static void sep_remove(struct pci_dev *pdev)
-{
- struct sep_device *sep = sep_dev;
-
- /* Unregister from fs */
- misc_deregister(&sep->miscdev_sep);
- misc_deregister(&sep->miscdev_singleton);
- misc_deregister(&sep->miscdev_daemon);
-
- /* Free the irq */
- free_irq(sep->pdev->irq, sep);
-
- /* Free the shared area */
- sep_unmap_and_free_shared_area(sep_dev);
- iounmap((void *) sep_dev->reg_addr);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* Field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
- .name = "sep_sec_driver",
- .id_table = sep_pci_id_tbl,
- .probe = sep_probe,
- .remove = sep_remove
-};
-
-
-/**
- * sep_init - init function
- *
- * Module load time. Register the PCI device driver.
- */
-static int __init sep_init(void)
-{
- return pci_register_driver(&sep_pci_driver);
-}
-
-
-/**
- * sep_exit - called to unload driver
- *
- * Drop the misc devices then remove and unmap the various resources
- * that are not released by the driver remove method.
- */
-static void __exit sep_exit(void)
-{
- pci_unregister_driver(&sep_pci_driver);
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
index c3aacfcc8ac6..8b797d5388bb 100644
--- a/drivers/staging/sep/sep_driver_api.h
+++ b/drivers/staging/sep/sep_driver_api.h
@@ -2,8 +2,8 @@
*
* sep_driver_api.h - Security Processor Driver api definitions
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -26,6 +26,7 @@
* CHANGES:
*
* 2010.09.14 Upgrade to Medfield
+ * 2011.02.22 Enable kernel crypto
*
*/
@@ -37,26 +38,32 @@
#define SEP_DRIVER_SRC_REQ 2
#define SEP_DRIVER_SRC_PRINTF 3
-
-/*-------------------------------------------
- TYPEDEFS
-----------------------------------------------*/
-
-struct alloc_struct {
- /* offset from start of shared pool area */
- u32 offset;
- /* number of bytes to allocate */
- u32 num_bytes;
-};
-
-/* command struct for getting caller id value and address */
-struct caller_id_struct {
- /* pid of the process */
- u32 pid;
- /* virtual address of the caller id hash */
- aligned_u64 callerIdAddress;
- /* caller id hash size in bytes */
- u32 callerIdSizeInBytes;
+/* Power state */
+#define SEP_DRIVER_POWERON 1
+#define SEP_DRIVER_POWEROFF 2
+
+/* Following enums are used only for kernel crypto api */
+enum type_of_request {
+ NO_REQUEST,
+ AES_CBC,
+ AES_ECB,
+ DES_CBC,
+ DES_ECB,
+ DES3_ECB,
+ DES3_CBC,
+ SHA1,
+ MD5,
+ SHA224,
+ SHA256
+ };
+
+enum hash_stage {
+ HASH_INIT,
+ HASH_UPDATE,
+ HASH_FINISH,
+ HASH_DIGEST,
+ HASH_FINUP_DATA,
+ HASH_FINUP_FINISH
};
/*
@@ -83,11 +90,6 @@ struct sep_dcblock {
u8 tail_data[68];
};
-struct sep_caller_id_entry {
- int pid;
- unsigned char callerIdHash[SEP_CALLER_ID_HASH_SIZE_IN_BYTES];
-};
-
/*
command structure for building dcb block (currently for ext app only
*/
@@ -104,6 +106,33 @@ struct build_dcb_struct {
/* the size of the block of the operation - if needed,
every table will be modulo this parameter */
u32 tail_block_size;
+
+ /* which application calls the driver DX or applet */
+ u32 is_applet;
+};
+
+/*
+ command structure for building dcb block for kernel crypto
+*/
+struct build_dcb_struct_kernel {
+ /* address value of the data in */
+ void *app_in_address;
+ /* size of data in */
+ ssize_t data_in_size;
+ /* address of the data out */
+ void *app_out_address;
+ /* the size of the block of the operation - if needed,
+ every table will be modulo this parameter */
+ u32 block_size;
+ /* the size of the block of the operation - if needed,
+ every table will be modulo this parameter */
+ u32 tail_block_size;
+
+ /* which application calls the driver DX or applet */
+ u32 is_applet;
+
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
};
/**
@@ -147,6 +176,10 @@ struct sep_dma_resource {
/* number of entries of the output mapp array */
u32 out_map_num_entries;
+
+ /* Scatter list for kernel operations */
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
};
@@ -169,47 +202,201 @@ struct sep_lli_entry {
u32 block_size;
};
-/*----------------------------------------------------------------
- IOCTL command defines
- -----------------------------------------------------------------*/
+/*
+ * header format for each fastcall write operation
+ */
+struct sep_fastcall_hdr {
+ u32 magic;
+ u32 secure_dma;
+ u32 msg_len;
+ u32 num_dcbs;
+};
-/* magic number 1 of the sep IOCTL command */
-#define SEP_IOC_MAGIC_NUMBER 's'
+/*
+ * structure used in file pointer's private data field
+ * to track the status of the calls to the various
+ * driver interface
+ */
+struct sep_call_status {
+ unsigned long status;
+};
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPCOMMAND \
- _IO(SEP_IOC_MAGIC_NUMBER, 0)
+/*
+ * format of dma context buffer used to store all DMA-related
+ * context information of a particular transaction
+ */
+struct sep_dma_context {
+ /* number of data control blocks */
+ u32 nr_dcb_creat;
+ /* number of the lli tables created in the current transaction */
+ u32 num_lli_tables_created;
+ /* size of currently allocated dma tables region */
+ u32 dmatables_len;
+ /* size of input data */
+ u32 input_data_len;
+ /* secure dma use (for imr memory restriced area in output */
+ bool secure_dma;
+ struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS];
+ /* Scatter gather for kernel crypto */
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+};
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPRPLYCOMMAND \
- _IO(SEP_IOC_MAGIC_NUMBER, 1)
+/*
+ * format for file pointer's private_data field
+ */
+struct sep_private_data {
+ struct sep_queue_info *my_queue_elem;
+ struct sep_device *device;
+ struct sep_call_status call_status;
+ struct sep_dma_context *dma_ctx;
+};
-/* allocate memory in data pool */
-#define SEP_IOCALLOCDATAPOLL \
- _IOW(SEP_IOC_MAGIC_NUMBER, 2, struct alloc_struct)
-/* free dynamic data aalocated during table creation */
-#define SEP_IOCFREEDMATABLEDATA \
- _IO(SEP_IOC_MAGIC_NUMBER, 7)
+/* Functions used by sep_crypto */
-/* get the static pool area addersses (physical and virtual) */
-#define SEP_IOCGETSTATICPOOLADDR \
- _IO(SEP_IOC_MAGIC_NUMBER, 8)
+/**
+ * sep_queue_status_remove - Removes transaction from status queue
+ * @sep: SEP device
+ * @sep_queue_info: pointer to status queue
+ *
+ * This function will removes information about transaction from the queue.
+ */
+void sep_queue_status_remove(struct sep_device *sep,
+ struct sep_queue_info **queue_elem);
+/**
+ * sep_queue_status_add - Adds transaction to status queue
+ * @sep: SEP device
+ * @opcode: transaction opcode
+ * @size: input data size
+ * @pid: pid of current process
+ * @name: current process name
+ * @name_len: length of name (current process)
+ *
+ * This function adds information about about transaction started to the status
+ * queue.
+ */
+struct sep_queue_info *sep_queue_status_add(
+ struct sep_device *sep,
+ u32 opcode,
+ u32 size,
+ u32 pid,
+ u8 *name, size_t name_len);
+
+/**
+ * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
+ * for kernel crypto
+ * @sep: SEP device
+ * @dcb_region: DCB region buf to create for current transaction
+ * @dmatables_region: MLLI/DMA tables buf to create for current transaction
+ * @dma_ctx: DMA context buf to create for current transaction
+ * @user_dcb_args: User arguments for DCB/MLLI creation
+ * @num_dcbs: Number of DCBs to create
+ */
+int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ const struct build_dcb_struct_kernel *dcb_data,
+ const u32 num_dcbs);
+
+/**
+ * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
+ * contexts into use
+ * @sep: SEP device
+ * @dcb_region: DCB region copy
+ * @dmatables_region: MLLI/DMA tables copy
+ * @dma_ctx: DMA context for current transaction
+ */
+ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx);
+
+/**
+ * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
+ * @app_in_address: unsigned long; for data buffer in (user space)
+ * @app_out_address: unsigned long; for data buffer out (user space)
+ * @data_in_size: u32; for size of data
+ * @block_size: u32; for block size
+ * @tail_block_size: u32; for size of tail block
+ * @isapplet: bool; to indicate external app
+ * @is_kva: bool; kernel buffer; only used for kernel crypto module
+ * @secure_dma; indicates whether this is secure_dma using IMR
+ *
+ * This function prepares the linked DMA tables and puts the
+ * address for the linked list of tables inta a DCB (data control
+ * block) the address of which is known by the SEP hardware
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
+ unsigned long app_in_address,
+ unsigned long app_out_address,
+ u32 data_in_size,
+ u32 block_size,
+ u32 tail_block_size,
+ bool isapplet,
+ bool is_kva,
+ bool secure_dma,
+ struct sep_dcblock *dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ struct scatterlist *src_sg,
+ struct scatterlist *dst_sg);
+
+/**
+ * sep_free_dma_table_data_handler - free DMA table
+ * @sep: pointere to struct sep_device
+ * @dma_ctx: dma context
+ *
+ * Handles the request to free DMA table for synchronic actions
+ */
+int sep_free_dma_table_data_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx);
+/**
+ * sep_send_command_handler - kick off a command
+ * @sep: SEP being signalled
+ *
+ * This function raises interrupt to SEP that signals that is has a new
+ * command from the host
+ *
+ * Note that this function does fall under the ioctl lock
+ */
+int sep_send_command_handler(struct sep_device *sep);
+
+/**
+ * sep_wait_transaction - Used for synchronizing transactions
+ * @sep: SEP device
+ */
+int sep_wait_transaction(struct sep_device *sep);
+
+/**
+ * IOCTL command defines
+ */
+/* magic number 1 of the sep IOCTL command */
+#define SEP_IOC_MAGIC_NUMBER 's'
+
+/* sends interrupt to sep that message is ready */
+#define SEP_IOCSENDSEPCOMMAND \
+ _IO(SEP_IOC_MAGIC_NUMBER, 0)
/* end transaction command */
#define SEP_IOCENDTRANSACTION \
_IO(SEP_IOC_MAGIC_NUMBER, 15)
-#define SEP_IOCRARPREPAREMESSAGE \
- _IOW(SEP_IOC_MAGIC_NUMBER, 20, struct rar_hndl_to_bus_struct)
-
-#define SEP_IOCTLSETCALLERID \
- _IOW(SEP_IOC_MAGIC_NUMBER, 34, struct caller_id_struct)
-
#define SEP_IOCPREPAREDCB \
_IOW(SEP_IOC_MAGIC_NUMBER, 35, struct build_dcb_struct)
#define SEP_IOCFREEDCB \
_IO(SEP_IOC_MAGIC_NUMBER, 36)
+struct sep_device;
+
+#define SEP_IOCPREPAREDCB_SECURE_DMA \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 38, struct build_dcb_struct)
+
+#define SEP_IOCFREEDCB_SECURE_DMA \
+ _IO(SEP_IOC_MAGIC_NUMBER, 39)
+
#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
index d6bfd2455222..fa7c0d09bfa5 100644
--- a/drivers/staging/sep/sep_driver_config.h
+++ b/drivers/staging/sep/sep_driver_config.h
@@ -2,8 +2,8 @@
*
* sep_driver_config.h - Security Processor Driver configuration
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -26,6 +26,7 @@
* CHANGES:
*
* 2010.06.26 Upgrade to Medfield
+ * 2011.02.22 Enable kernel crypto
*
*/
@@ -48,6 +49,8 @@
/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
#define SEP_DRIVER_ARM_DEBUG_MODE 0
+/* Critical message area contents for sanity checking */
+#define SEP_START_MSG_TOKEN 0x02558808
/*-------------------------------------------
INTERNAL DATA CONFIGURATION
-------------------------------------------*/
@@ -65,21 +68,17 @@
#define SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE 16
/* flag that signifies tah the lock is
-currently held by the process (struct file) */
+currently held by the proccess (struct file) */
#define SEP_DRIVER_OWN_LOCK_FLAG 1
/* flag that signifies tah the lock is currently NOT
-held by the process (struct file) */
+held by the proccess (struct file) */
#define SEP_DRIVER_DISOWN_LOCK_FLAG 0
/* indicates whether driver has mapped/unmapped shared area */
#define SEP_REQUEST_DAEMON_MAPPED 1
#define SEP_REQUEST_DAEMON_UNMAPPED 0
-#define SEP_DEV_NAME "sep_sec_driver"
-#define SEP_DEV_SINGLETON "sep_sec_singleton_driver"
-#define SEP_DEV_DAEMON "sep_req_daemon_driver"
-
/*--------------------------------------------------------
SHARED AREA memory total size is 36K
it is divided is following:
@@ -90,7 +89,7 @@ held by the process (struct file) */
}
DATA_POOL_AREA 12K }
- SYNCHRONIC_DMA_TABLES_AREA 5K
+ SYNCHRONIC_DMA_TABLES_AREA 29K
placeholder until drver changes
FLOW_DMA_TABLES_AREA 4K
@@ -109,6 +108,12 @@ held by the process (struct file) */
/*
+ the minimum length of the message - includes 2 reserved fields
+ at the start, then token, message size and opcode fields. all dwords
+*/
+#define SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES (5*sizeof(u32))
+
+/*
the maximum length of the message - the rest of the message shared
area will be dedicated to the dma lli tables
*/
@@ -124,7 +129,7 @@ held by the process (struct file) */
#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (16 * 1024)
/* the size of the message shared area in pages */
-#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 5)
+#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 29)
/* Placeholder until driver changes */
#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
@@ -132,6 +137,9 @@ held by the process (struct file) */
/* system data (time, caller id etc') pool */
#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES (1024 * 3)
+/* Offset of the sep printf buffer in the message area */
+#define SEP_DRIVER_PRINTF_OFFSET_IN_BYTES (5888)
+
/* the size in bytes of the time memory */
#define SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES 8
@@ -223,10 +231,10 @@ held by the process (struct file) */
#define SEP_ALREADY_INITIALIZED_ERR 12
/* bit that locks access to the shared area */
-#define SEP_MMAP_LOCK_BIT 0
+#define SEP_TRANSACTION_STARTED_LOCK_BIT 0
/* bit that lock access to the poll - after send_command */
-#define SEP_SEND_MSG_LOCK_BIT 1
+#define SEP_WORKING_LOCK_BIT 1
/* the token that defines the static pool address address */
#define SEP_STATIC_POOL_VAL_TOKEN 0xABBAABBA
@@ -240,4 +248,51 @@ held by the process (struct file) */
/* Time limit for SEP to finish */
#define WAIT_TIME 10
+/* Delay for pm runtime suspend (reduces pm thrashing with bursty traffic */
+#define SUSPEND_DELAY 10
+
+/* Number of delays to wait until scu boots after runtime resume */
+#define SCU_DELAY_MAX 50
+
+/* Delay for each iteration (usec) wait for scu boots after runtime resume */
+#define SCU_DELAY_ITERATION 10
+
+
+/*
+ * Bits used in struct sep_call_status to check that
+ * driver's APIs are called in valid order
+ */
+
+/* Bit offset which indicates status of sep_write() */
+#define SEP_FASTCALL_WRITE_DONE_OFFSET 0
+
+/* Bit offset which indicates status of sep_mmap() */
+#define SEP_LEGACY_MMAP_DONE_OFFSET 1
+
+/* Bit offset which indicates status of the SEP_IOCSENDSEPCOMMAND ioctl */
+#define SEP_LEGACY_SENDMSG_DONE_OFFSET 2
+
+/* Bit offset which indicates status of sep_poll() */
+#define SEP_LEGACY_POLL_DONE_OFFSET 3
+
+/* Bit offset which indicates status of the SEP_IOCENDTRANSACTION ioctl */
+#define SEP_LEGACY_ENDTRANSACTION_DONE_OFFSET 4
+
+/*
+ * Used to limit number of concurrent processes
+ * allowed to allocte dynamic buffers in fastcall
+ * interface.
+ */
+#define SEP_DOUBLEBUF_USERS_LIMIT 3
+
+/* Identifier for valid fastcall header */
+#define SEP_FC_MAGIC 0xFFAACCAA
+
+/*
+ * Used for enabling driver runtime power management.
+ * Useful for enabling/disabling it during performance
+ * testing
+ */
+#define SEP_ENABLE_RUNTIME_PM
+
#endif /* SEP DRIVER CONFIG */
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
index 300f90963de3..a6a448170382 100644
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ b/drivers/staging/sep/sep_driver_hw_defs.h
@@ -2,8 +2,8 @@
*
* sep_driver_hw_defs.h - Security Processor Driver hardware definitions
*
- * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
- * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -26,15 +26,13 @@
* CHANGES:
*
* 2010.09.20 Upgrade to Medfield
+ * 2011.02.22 Enable kernel crypto
*
*/
#ifndef SEP_DRIVER_HW_DEFS__H
#define SEP_DRIVER_HW_DEFS__H
-/* PCI ID's */
-#define MFLD_PCI_DEVICE_ID 0x0826
-
/*----------------------- */
/* HW Registers Defines. */
/* */
@@ -42,181 +40,9 @@
/* cf registers */
-#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
-#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
-#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
-#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
-#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
-#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
-#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
-#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
-#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
-#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
-#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
-#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
-#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
-#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
-#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
-#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
-#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
-#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
-#define HW_R3B_REG_ADDR 0x00C0UL
-#define HW_R4B_REG_ADDR 0x0100UL
-#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
-#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
-#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
-#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
-#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
-#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
-#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
-#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
-#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
-#define HW_CSA_REG_ADDR 0x0140UL
-#define HW_SINB_REG_ADDR 0x0180UL
-#define HW_SOUTB_REG_ADDR 0x0184UL
-#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
-#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
-#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
-#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
-#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
-#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
-#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
-#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
-#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
-#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
-#define HW_PKI_CLR_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
-#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
-#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
-#define HW_DES_KEY_0_REG_ADDR 0x0208UL
-#define HW_DES_KEY_1_REG_ADDR 0x020CUL
-#define HW_DES_KEY_2_REG_ADDR 0x0210UL
-#define HW_DES_KEY_3_REG_ADDR 0x0214UL
-#define HW_DES_KEY_4_REG_ADDR 0x0218UL
-#define HW_DES_KEY_5_REG_ADDR 0x021CUL
-#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
-#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
-#define HW_DES_IV_0_REG_ADDR 0x0228UL
-#define HW_DES_IV_1_REG_ADDR 0x022CUL
-#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
-#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
-#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
-#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
-#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
-#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
-#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
-#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
-#define HW_AES_KEY_0_REG_ADDR 0x0400UL
-#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
-#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
-#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
-#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
-#define HW_AES_IV_0_REG_ADDR 0x0440UL
-#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
-#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
-#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
-#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
-#define HW_AES_CTR1_REG_ADDR 0x0460UL
-#define HW_AES_SK_REG_ADDR 0x0478UL
-#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
-#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
-#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
-#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
-#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
-#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
-#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
-#define HW_HASH_H0_REG_ADDR 0x0640UL
-#define HW_HASH_H1_REG_ADDR 0x0644UL
-#define HW_HASH_H2_REG_ADDR 0x0648UL
-#define HW_HASH_H3_REG_ADDR 0x064CUL
-#define HW_HASH_H4_REG_ADDR 0x0650UL
-#define HW_HASH_H5_REG_ADDR 0x0654UL
-#define HW_HASH_H6_REG_ADDR 0x0658UL
-#define HW_HASH_H7_REG_ADDR 0x065CUL
-#define HW_HASH_H8_REG_ADDR 0x0660UL
-#define HW_HASH_H9_REG_ADDR 0x0664UL
-#define HW_HASH_H10_REG_ADDR 0x0668UL
-#define HW_HASH_H11_REG_ADDR 0x066CUL
-#define HW_HASH_H12_REG_ADDR 0x0670UL
-#define HW_HASH_H13_REG_ADDR 0x0674UL
-#define HW_HASH_H14_REG_ADDR 0x0678UL
-#define HW_HASH_H15_REG_ADDR 0x067CUL
-#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
-#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
-#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
-#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
-#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
-#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
-#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
-#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
-#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
-#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
-#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
-#define HW_HASH_DATA_REG_ADDR 0x07ECUL
-#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
-#define HW_DRNG_VALID_REG_ADDR 0x0804UL
-#define HW_DRNG_DATA_REG_ADDR 0x0808UL
-#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
-#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
-#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
-#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
-#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
-#define HW_CLK_STATUS_REG_ADDR 0x0824UL
-#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
-#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
-#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
-#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
-#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
-#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
-#define HW_AES_BUSY_REG_ADDR 0x0914UL
-#define HW_DES_BUSY_REG_ADDR 0x0918UL
-#define HW_HASH_BUSY_REG_ADDR 0x091CUL
-#define HW_CONTENT_REG_ADDR 0x0924UL
-#define HW_VERSION_REG_ADDR 0x0928UL
-#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
-#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
-#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
-#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
-#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
-#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
-#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
-#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
-#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
-#define HW_OLD_DATA_REG_ADDR 0x0C48UL
-#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
-#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
-#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
-#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
-#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
-#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
-#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
-#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
-#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
-#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
-#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
-#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
-#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
-#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
-#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
#define HW_HOST_IRR_REG_ADDR 0x0A00UL
#define HW_HOST_IMR_REG_ADDR 0x0A04UL
#define HW_HOST_ICR_REG_ADDR 0x0A08UL
-#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
-#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
-#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
-#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
-#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
-#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
-#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
-#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
-#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
-#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
-#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
-#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
-#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
@@ -225,9 +51,6 @@
#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
-#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
-#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
-#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
-#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
+#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
new file mode 100644
index 000000000000..cf420f6bbe15
--- /dev/null
+++ b/drivers/staging/sep/sep_main.c
@@ -0,0 +1,4517 @@
+/*
+ *
+ * sep_main.c - Security Processor Driver main group of functions
+ *
+ * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009-2011 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2010.09.14 Upgrade to Medfield
+ * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
+ * 2011.02.22 Enable kernel crypto operation
+ *
+ * Please note that this driver is based on information in the Discretix
+ * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
+ * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
+ * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
+ * Overview and Integration Guide.
+ */
+/* #define DEBUG */
+/* #define SEP_PERF_DEBUG */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <asm/current.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/async.h>
+#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/hash.h>
+
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_dev.h"
+#include "sep_crypto.h"
+
+#define CREATE_TRACE_POINTS
+#include "sep_trace_events.h"
+
+/*
+ * Let's not spend cycles iterating over message
+ * area contents if debugging not enabled
+ */
+#ifdef DEBUG
+#define sep_dump_message(sep) _sep_dump_message(sep)
+#else
+#define sep_dump_message(sep)
+#endif
+
+/**
+ * Currenlty, there is only one SEP device per platform;
+ * In event platforms in the future have more than one SEP
+ * device, this will be a linked list
+ */
+
+struct sep_device *sep_dev;
+
+/**
+ * sep_queue_status_remove - Removes transaction from status queue
+ * @sep: SEP device
+ * @sep_queue_info: pointer to status queue
+ *
+ * This function will removes information about transaction from the queue.
+ */
+void sep_queue_status_remove(struct sep_device *sep,
+ struct sep_queue_info **queue_elem)
+{
+ unsigned long lck_flags;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
+ current->pid);
+
+ if (!queue_elem || !(*queue_elem)) {
+ dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
+ current->pid, __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
+ list_del(&(*queue_elem)->list);
+ sep->sep_queue_num--;
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+
+ kfree(*queue_elem);
+ *queue_elem = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
+ current->pid);
+ return;
+}
+
+/**
+ * sep_queue_status_add - Adds transaction to status queue
+ * @sep: SEP device
+ * @opcode: transaction opcode
+ * @size: input data size
+ * @pid: pid of current process
+ * @name: current process name
+ * @name_len: length of name (current process)
+ *
+ * This function adds information about about transaction started to the status
+ * queue.
+ */
+struct sep_queue_info *sep_queue_status_add(
+ struct sep_device *sep,
+ u32 opcode,
+ u32 size,
+ u32 pid,
+ u8 *name, size_t name_len)
+{
+ unsigned long lck_flags;
+ struct sep_queue_info *my_elem = NULL;
+
+ my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
+
+ if (!my_elem)
+ return NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
+
+ my_elem->data.opcode = opcode;
+ my_elem->data.size = size;
+ my_elem->data.pid = pid;
+
+ if (name_len > TASK_COMM_LEN)
+ name_len = TASK_COMM_LEN;
+
+ memcpy(&my_elem->data.name, name, name_len);
+
+ spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
+
+ list_add_tail(&my_elem->list, &sep->sep_queue_status);
+ sep->sep_queue_num++;
+
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+
+ return my_elem;
+}
+
+/**
+ * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
+ * @sep: SEP device
+ * @dmatables_region: Destination pointer for the buffer
+ * @dma_ctx: DMA context for the transaction
+ * @table_count: Number of MLLI/DMA tables to create
+ * The buffer created will not work as-is for DMA operations,
+ * it needs to be copied over to the appropriate place in the
+ * shared area.
+ */
+static int sep_allocate_dmatables_region(struct sep_device *sep,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx,
+ const u32 table_count)
+{
+ const size_t new_len =
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
+
+ void *tmp_region = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
+ current->pid, dma_ctx);
+ dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
+ current->pid, dmatables_region);
+
+ if (!dma_ctx || !dmatables_region) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dma context/region uninitialized\n",
+ current->pid);
+ return -EINVAL;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
+ current->pid, new_len);
+ dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
+ dma_ctx->dmatables_len);
+ tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
+ if (!tmp_region) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] no mem for dma tables region\n",
+ current->pid);
+ return -ENOMEM;
+ }
+
+ /* Were there any previous tables that need to be preserved ? */
+ if (*dmatables_region) {
+ memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
+ kfree(*dmatables_region);
+ *dmatables_region = NULL;
+ }
+
+ *dmatables_region = tmp_region;
+
+ dma_ctx->dmatables_len += new_len;
+
+ return 0;
+}
+
+/**
+ * sep_wait_transaction - Used for synchronizing transactions
+ * @sep: SEP device
+ */
+int sep_wait_transaction(struct sep_device *sep)
+{
+ int error = 0;
+ DEFINE_WAIT(wait);
+
+ if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
+ &sep->in_use_flags)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no transactions, returning\n",
+ current->pid);
+ goto end_function_setpid;
+ }
+
+ /*
+ * Looping needed even for exclusive waitq entries
+ * due to process wakeup latencies, previous process
+ * might have already created another transaction.
+ */
+ for (;;) {
+ /*
+ * Exclusive waitq entry, so that only one process is
+ * woken up from the queue at a time.
+ */
+ prepare_to_wait_exclusive(&sep->event_transactions,
+ &wait,
+ TASK_INTERRUPTIBLE);
+ if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
+ &sep->in_use_flags)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no transactions, breaking\n",
+ current->pid);
+ break;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] transactions ongoing, sleeping\n",
+ current->pid);
+ schedule();
+ dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
+
+ if (signal_pending(current)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
+ current->pid);
+ error = -EINTR;
+ goto end_function;
+ }
+ }
+end_function_setpid:
+ /*
+ * The pid_doing_transaction indicates that this process
+ * now owns the facilities to performa a transaction with
+ * the SEP. While this process is performing a transaction,
+ * no other process who has the SEP device open can perform
+ * any transactions. This method allows more than one process
+ * to have the device open at any given time, which provides
+ * finer granularity for device utilization by multiple
+ * processes.
+ */
+ /* Only one process is able to progress here at a time */
+ sep->pid_doing_transaction = current->pid;
+
+end_function:
+ finish_wait(&sep->event_transactions, &wait);
+
+ return error;
+}
+
+/**
+ * sep_check_transaction_owner - Checks if current process owns transaction
+ * @sep: SEP device
+ */
+static inline int sep_check_transaction_owner(struct sep_device *sep)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
+ current->pid,
+ sep->pid_doing_transaction);
+
+ if ((sep->pid_doing_transaction == 0) ||
+ (current->pid != sep->pid_doing_transaction)) {
+ return -EACCES;
+ }
+
+ /* We own the transaction */
+ return 0;
+}
+
+#ifdef DEBUG
+
+/**
+ * sep_dump_message - dump the message that is pending
+ * @sep: SEP device
+ * This will only print dump if DEBUG is set; it does
+ * follow kernel debug print enabling
+ */
+static void _sep_dump_message(struct sep_device *sep)
+{
+ int count;
+
+ u32 *p = sep->shared_addr;
+
+ for (count = 0; count < 10 * 4; count += 4)
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Word %d of the message is %x\n",
+ current->pid, count/4, *p++);
+}
+
+#endif
+
+/**
+ * sep_map_and_alloc_shared_area -allocate shared block
+ * @sep: security processor
+ * @size: size of shared area
+ */
+static int sep_map_and_alloc_shared_area(struct sep_device *sep)
+{
+ sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
+ sep->shared_size,
+ &sep->shared_bus, GFP_KERNEL);
+
+ if (!sep->shared_addr) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] shared memory dma_alloc_coherent failed\n",
+ current->pid);
+ return -ENOMEM;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
+ current->pid,
+ sep->shared_size, sep->shared_addr,
+ (unsigned long long)sep->shared_bus);
+ return 0;
+}
+
+/**
+ * sep_unmap_and_free_shared_area - free shared block
+ * @sep: security processor
+ */
+static void sep_unmap_and_free_shared_area(struct sep_device *sep)
+{
+ dma_free_coherent(&sep->pdev->dev, sep->shared_size,
+ sep->shared_addr, sep->shared_bus);
+}
+
+#ifdef DEBUG
+
+/**
+ * sep_shared_bus_to_virt - convert bus/virt addresses
+ * @sep: pointer to struct sep_device
+ * @bus_address: address to convert
+ *
+ * Returns virtual address inside the shared area according
+ * to the bus address.
+ */
+static void *sep_shared_bus_to_virt(struct sep_device *sep,
+ dma_addr_t bus_address)
+{
+ return sep->shared_addr + (bus_address - sep->shared_bus);
+}
+
+#endif
+
+/**
+ * sep_open - device open method
+ * @inode: inode of SEP device
+ * @filp: file handle to SEP device
+ *
+ * Open method for the SEP device. Called when userspace opens
+ * the SEP device node.
+ *
+ * Returns zero on success otherwise an error code.
+ */
+static int sep_open(struct inode *inode, struct file *filp)
+{
+ struct sep_device *sep;
+ struct sep_private_data *priv;
+
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -ENOTSUPP;
+
+ /*
+ * Get the SEP device structure and use it for the
+ * private_data field in filp for other methods
+ */
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sep = sep_dev;
+ priv->device = sep;
+ filp->private_data = priv;
+
+ dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
+ current->pid, priv);
+
+ /* Anyone can open; locking takes place at transaction level */
+ return 0;
+}
+
+/**
+ * sep_free_dma_table_data_handler - free DMA table
+ * @sep: pointere to struct sep_device
+ * @dma_ctx: dma context
+ *
+ * Handles the request to free DMA table for synchronic actions
+ */
+int sep_free_dma_table_data_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx)
+{
+ int count;
+ int dcb_counter;
+ /* Pointer to the current dma_resource struct */
+ struct sep_dma_resource *dma;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] sep_free_dma_table_data_handler\n",
+ current->pid);
+
+ if (!dma_ctx || !(*dma_ctx)) {
+ /* No context or context already freed */
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no DMA context or context already freed\n",
+ current->pid);
+
+ return 0;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
+ current->pid,
+ (*dma_ctx)->nr_dcb_creat);
+
+ for (dcb_counter = 0;
+ dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
+ dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
+
+ /* Unmap and free input map array */
+ if (dma->in_map_array) {
+ for (count = 0; count < dma->in_num_pages; count++) {
+ dma_unmap_page(&sep->pdev->dev,
+ dma->in_map_array[count].dma_addr,
+ dma->in_map_array[count].size,
+ DMA_TO_DEVICE);
+ }
+ kfree(dma->in_map_array);
+ }
+
+ /**
+ * Output is handled different. If
+ * this was a secure dma into restricted memory,
+ * then we skip this step altogether as restricted
+ * memory is not available to the o/s at all.
+ */
+ if (((*dma_ctx)->secure_dma == false) &&
+ (dma->out_map_array)) {
+
+ for (count = 0; count < dma->out_num_pages; count++) {
+ dma_unmap_page(&sep->pdev->dev,
+ dma->out_map_array[count].dma_addr,
+ dma->out_map_array[count].size,
+ DMA_FROM_DEVICE);
+ }
+ kfree(dma->out_map_array);
+ }
+
+ /* Free page cache for output */
+ if (dma->in_page_array) {
+ for (count = 0; count < dma->in_num_pages; count++) {
+ flush_dcache_page(dma->in_page_array[count]);
+ page_cache_release(dma->in_page_array[count]);
+ }
+ kfree(dma->in_page_array);
+ }
+
+ /* Again, we do this only for non secure dma */
+ if (((*dma_ctx)->secure_dma == false) &&
+ (dma->out_page_array)) {
+
+ for (count = 0; count < dma->out_num_pages; count++) {
+ if (!PageReserved(dma->out_page_array[count]))
+
+ SetPageDirty(dma->
+ out_page_array[count]);
+
+ flush_dcache_page(dma->out_page_array[count]);
+ page_cache_release(dma->out_page_array[count]);
+ }
+ kfree(dma->out_page_array);
+ }
+
+ /**
+ * Note that here we use in_map_num_entries because we
+ * don't have a page array; the page array is generated
+ * only in the lock_user_pages, which is not called
+ * for kernel crypto, which is what the sg (scatter gather
+ * is used for exclusively
+ */
+ if (dma->src_sg) {
+ dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
+ dma->in_map_num_entries, DMA_TO_DEVICE);
+ dma->src_sg = NULL;
+ }
+
+ if (dma->dst_sg) {
+ dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
+ dma->in_map_num_entries, DMA_FROM_DEVICE);
+ dma->dst_sg = NULL;
+ }
+
+ /* Reset all the values */
+ dma->in_page_array = NULL;
+ dma->out_page_array = NULL;
+ dma->in_num_pages = 0;
+ dma->out_num_pages = 0;
+ dma->in_map_array = NULL;
+ dma->out_map_array = NULL;
+ dma->in_map_num_entries = 0;
+ dma->out_map_num_entries = 0;
+ }
+
+ (*dma_ctx)->nr_dcb_creat = 0;
+ (*dma_ctx)->num_lli_tables_created = 0;
+
+ kfree(*dma_ctx);
+ *dma_ctx = NULL;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] sep_free_dma_table_data_handler end\n",
+ current->pid);
+
+ return 0;
+}
+
+/**
+ * sep_end_transaction_handler - end transaction
+ * @sep: pointer to struct sep_device
+ * @dma_ctx: DMA context
+ * @call_status: Call status
+ *
+ * This API handles the end transaction request.
+ */
+static int sep_end_transaction_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx,
+ struct sep_call_status *call_status,
+ struct sep_queue_info **my_queue_elem)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
+
+ /*
+ * Extraneous transaction clearing would mess up PM
+ * device usage counters and SEP would get suspended
+ * just before we send a command to SEP in the next
+ * transaction
+ * */
+ if (sep_check_transaction_owner(sep)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
+ current->pid);
+ return 0;
+ }
+
+ /* Update queue status */
+ sep_queue_status_remove(sep, my_queue_elem);
+
+ /* Check that all the DMA resources were freed */
+ if (dma_ctx)
+ sep_free_dma_table_data_handler(sep, dma_ctx);
+
+ /* Reset call status for next transaction */
+ if (call_status)
+ call_status->status = 0;
+
+ /* Clear the message area to avoid next transaction reading
+ * sensitive results from previous transaction */
+ memset(sep->shared_addr, 0,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /* start suspend delay */
+#ifdef SEP_ENABLE_RUNTIME_PM
+ if (sep->in_use) {
+ sep->in_use = 0;
+ pm_runtime_mark_last_busy(&sep->pdev->dev);
+ pm_runtime_put_autosuspend(&sep->pdev->dev);
+ }
+#endif
+
+ clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
+ sep->pid_doing_transaction = 0;
+
+ /* Now it's safe for next process to proceed */
+ dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
+ current->pid);
+ clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
+ wake_up(&sep->event_transactions);
+
+ return 0;
+}
+
+
+/**
+ * sep_release - close a SEP device
+ * @inode: inode of SEP device
+ * @filp: file handle being closed
+ *
+ * Called on the final close of a SEP device.
+ */
+static int sep_release(struct inode *inode, struct file *filp)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
+
+ sep_end_transaction_handler(sep, dma_ctx, call_status,
+ my_queue_elem);
+
+ kfree(filp->private_data);
+
+ return 0;
+}
+
+/**
+ * sep_mmap - maps the shared area to user space
+ * @filp: pointer to struct file
+ * @vma: pointer to vm_area_struct
+ *
+ * Called on an mmap of our space via the normal SEP device
+ */
+static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+ dma_addr_t bus_addr;
+ unsigned long error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
+
+ /* Set the transaction busy (own the device) */
+ /*
+ * Problem for multithreaded applications is that here we're
+ * possibly going to sleep while holding a write lock on
+ * current->mm->mmap_sem, which will cause deadlock for ongoing
+ * transaction trying to create DMA tables
+ */
+ error = sep_wait_transaction(sep);
+ if (error)
+ /* Interrupted by signal, don't clear transaction */
+ goto end_function;
+
+ /* Clear the message area to avoid next transaction reading
+ * sensitive results from previous transaction */
+ memset(sep->shared_addr, 0,
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
+
+ /*
+ * Check that the size of the mapped range is as the size of the message
+ * shared area
+ */
+ if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
+ error = -EINVAL;
+ goto end_function_with_error;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
+ current->pid, sep->shared_addr);
+
+ /* Get bus address */
+ bus_addr = sep->shared_bus;
+
+ if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] remap_page_range failed\n",
+ current->pid);
+ error = -EAGAIN;
+ goto end_function_with_error;
+ }
+
+ /* Update call status */
+ set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
+
+ goto end_function;
+
+end_function_with_error:
+ /* Clear our transaction */
+ sep_end_transaction_handler(sep, NULL, call_status,
+ my_queue_elem);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_poll - poll handler
+ * @filp: pointer to struct file
+ * @wait: pointer to poll_table
+ *
+ * Called by the OS when the kernel is asked to do a poll on
+ * a SEP file handle.
+ */
+static unsigned int sep_poll(struct file *filp, poll_table *wait)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ u32 mask = 0;
+ u32 retval = 0;
+ u32 retval2 = 0;
+ unsigned long lock_irq_flag;
+
+ /* Am I the process that owns the transaction? */
+ if (sep_check_transaction_owner(sep)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
+ current->pid);
+ mask = POLLERR;
+ goto end_function;
+ }
+
+ /* Check if send command or send_reply were activated previously */
+ if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
+ current->pid);
+ mask = POLLERR;
+ goto end_function;
+ }
+
+
+ /* Add the event to the polling wait table */
+ dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
+ current->pid);
+
+ poll_wait(filp, &sep->event_interrupt, wait);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
+ current->pid, sep->send_ct, sep->reply_ct);
+
+ /* Check if error occured during poll */
+ retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ if ((retval2 != 0x0) && (retval2 != 0x8)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
+ current->pid, retval2);
+ mask |= POLLERR;
+ goto end_function;
+ }
+
+ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
+
+ if (sep->send_ct == sep->reply_ct) {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+ retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: data ready check (GPR2) %x\n",
+ current->pid, retval);
+
+ /* Check if printf request */
+ if ((retval >> 30) & 0x1) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: SEP printf request\n",
+ current->pid);
+ goto end_function;
+ }
+
+ /* Check if the this is SEP reply or request */
+ if (retval >> 31) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: SEP request\n",
+ current->pid);
+ } else {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll: normal return\n",
+ current->pid);
+ sep_dump_message(sep);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
+ current->pid);
+ mask |= POLLIN | POLLRDNORM;
+ }
+ set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
+ } else {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] poll; no reply; returning mask of 0\n",
+ current->pid);
+ mask = 0;
+ }
+
+end_function:
+ return mask;
+}
+
+/**
+ * sep_time_address - address in SEP memory of time
+ * @sep: SEP device we want the address from
+ *
+ * Return the address of the two dwords in memory used for time
+ * setting.
+ */
+static u32 *sep_time_address(struct sep_device *sep)
+{
+ return sep->shared_addr +
+ SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
+}
+
+/**
+ * sep_set_time - set the SEP time
+ * @sep: the SEP we are setting the time for
+ *
+ * Calculates time and sets it at the predefined address.
+ * Called with the SEP mutex held.
+ */
+static unsigned long sep_set_time(struct sep_device *sep)
+{
+ struct timeval time;
+ u32 *time_addr; /* Address of time as seen by the kernel */
+
+
+ do_gettimeofday(&time);
+
+ /* Set value in the SYSTEM MEMORY offset */
+ time_addr = sep_time_address(sep);
+
+ time_addr[0] = SEP_TIME_VAL_TOKEN;
+ time_addr[1] = time.tv_sec;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
+ current->pid, time.tv_sec);
+ dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
+ current->pid, time_addr);
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
+ current->pid, sep->shared_addr);
+
+ return time.tv_sec;
+}
+
+/**
+ * sep_send_command_handler - kick off a command
+ * @sep: SEP being signalled
+ *
+ * This function raises interrupt to SEP that signals that is has a new
+ * command from the host
+ *
+ * Note that this function does fall under the ioctl lock
+ */
+int sep_send_command_handler(struct sep_device *sep)
+{
+ unsigned long lock_irq_flag;
+ u32 *msg_pool;
+ int error = 0;
+
+ /* Basic sanity check; set msg pool to start of shared area */
+ msg_pool = (u32 *)sep->shared_addr;
+ msg_pool += 2;
+
+ /* Look for start msg token */
+ if (*msg_pool != SEP_START_MSG_TOKEN) {
+ dev_warn(&sep->pdev->dev, "start message token not present\n");
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ /* Do we have a reasonable size? */
+ msg_pool += 1;
+ if ((*msg_pool < 2) ||
+ (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
+
+ dev_warn(&sep->pdev->dev, "invalid message size\n");
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ /* Does the command look reasonable? */
+ msg_pool += 1;
+ if (*msg_pool < 2) {
+ dev_warn(&sep->pdev->dev, "invalid message opcode\n");
+ error = -EPROTO;
+ goto end_function;
+ }
+
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
+ dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
+ current->pid,
+ sep->pdev->dev.power.runtime_status);
+ sep->in_use = 1; /* device is about to be used */
+ pm_runtime_get_sync(&sep->pdev->dev);
+#endif
+
+ if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
+ error = -EPROTO;
+ goto end_function;
+ }
+ sep->in_use = 1; /* device is about to be used */
+ sep_set_time(sep);
+
+ sep_dump_message(sep);
+
+ /* Update counter */
+ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
+ sep->send_ct++;
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
+ current->pid, sep->send_ct, sep->reply_ct);
+
+ /* Send interrupt to SEP */
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_crypto_dma -
+ * @sep: pointer to struct sep_device
+ * @sg: pointer to struct scatterlist
+ * @direction:
+ * @dma_maps: pointer to place a pointer to array of dma maps
+ * This is filled in; anything previous there will be lost
+ * The structure for dma maps is sep_dma_map
+ * @returns number of dma maps on success; negative on error
+ *
+ * This creates the dma table from the scatterlist
+ * It is used only for kernel crypto as it works with scatterlists
+ * representation of data buffers
+ *
+ */
+static int sep_crypto_dma(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ struct sep_dma_map **dma_maps,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *temp_sg;
+
+ u32 count_segment;
+ u32 count_mapped;
+ struct sep_dma_map *sep_dma;
+ int ct1;
+
+ if (sg->length == 0)
+ return 0;
+
+ /* Count the segments */
+ temp_sg = sg;
+ count_segment = 0;
+ while (temp_sg) {
+ count_segment += 1;
+ temp_sg = scatterwalk_sg_next(temp_sg);
+ }
+ dev_dbg(&sep->pdev->dev,
+ "There are (hex) %x segments in sg\n", count_segment);
+
+ /* DMA map segments */
+ count_mapped = dma_map_sg(&sep->pdev->dev, sg,
+ count_segment, direction);
+
+ dev_dbg(&sep->pdev->dev,
+ "There are (hex) %x maps in sg\n", count_mapped);
+
+ if (count_mapped == 0) {
+ dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
+ return -ENOMEM;
+ }
+
+ sep_dma = kmalloc(sizeof(struct sep_dma_map) *
+ count_mapped, GFP_ATOMIC);
+
+ if (sep_dma == NULL) {
+ dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(sg, temp_sg, count_mapped, ct1) {
+ sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
+ sep_dma[ct1].size = sg_dma_len(temp_sg);
+ dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
+ ct1, (unsigned long)sep_dma[ct1].dma_addr,
+ (unsigned long)sep_dma[ct1].size);
+ }
+
+ *dma_maps = sep_dma;
+ return count_mapped;
+
+}
+
+/**
+ * sep_crypto_lli -
+ * @sep: pointer to struct sep_device
+ * @sg: pointer to struct scatterlist
+ * @data_size: total data size
+ * @direction:
+ * @dma_maps: pointer to place a pointer to array of dma maps
+ * This is filled in; anything previous there will be lost
+ * The structure for dma maps is sep_dma_map
+ * @lli_maps: pointer to place a pointer to array of lli maps
+ * This is filled in; anything previous there will be lost
+ * The structure for dma maps is sep_dma_map
+ * @returns number of dma maps on success; negative on error
+ *
+ * This creates the LLI table from the scatterlist
+ * It is only used for kernel crypto as it works exclusively
+ * with scatterlists (struct scatterlist) representation of
+ * data buffers
+ */
+static int sep_crypto_lli(
+ struct sep_device *sep,
+ struct scatterlist *sg,
+ struct sep_dma_map **maps,
+ struct sep_lli_entry **llis,
+ u32 data_size,
+ enum dma_data_direction direction)
+{
+
+ int ct1;
+ struct sep_lli_entry *sep_lli;
+ struct sep_dma_map *sep_map;
+
+ int nbr_ents;
+
+ nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
+ if (nbr_ents <= 0) {
+ dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
+ nbr_ents);
+ return nbr_ents;
+ }
+
+ sep_map = *maps;
+
+ sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
+
+ if (sep_lli == NULL) {
+ dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
+
+ kfree(*maps);
+ *maps = NULL;
+ return -ENOMEM;
+ }
+
+ for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
+ sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
+
+ /* Maximum for page is total data size */
+ if (sep_map[ct1].size > data_size)
+ sep_map[ct1].size = data_size;
+
+ sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
+ }
+
+ *llis = sep_lli;
+ return nbr_ents;
+}
+
+/**
+ * sep_lock_kernel_pages - map kernel pages for DMA
+ * @sep: pointer to struct sep_device
+ * @kernel_virt_addr: address of data buffer in kernel
+ * @data_size: size of data
+ * @lli_array_ptr: lli array
+ * @in_out_flag: input into device or output from device
+ *
+ * This function locks all the physical pages of the kernel virtual buffer
+ * and construct a basic lli array, where each entry holds the physical
+ * page address and the size that application data holds in this page
+ * This function is used only during kernel crypto mod calls from within
+ * the kernel (when ioctl is not used)
+ *
+ * This is used only for kernel crypto. Kernel pages
+ * are handled differently as they are done via
+ * scatter gather lists (struct scatterlist)
+ */
+static int sep_lock_kernel_pages(struct sep_device *sep,
+ unsigned long kernel_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag,
+ struct sep_dma_context *dma_ctx)
+
+{
+ u32 num_pages;
+ struct scatterlist *sg;
+
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+ /* Map array */
+ struct sep_dma_map *map_array;
+
+ enum dma_data_direction direction;
+
+ lli_array = NULL;
+ map_array = NULL;
+
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ direction = DMA_TO_DEVICE;
+ sg = dma_ctx->src_sg;
+ } else {
+ direction = DMA_FROM_DEVICE;
+ sg = dma_ctx->dst_sg;
+ }
+
+ num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
+ data_size, direction);
+
+ if (num_pages <= 0) {
+ dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
+ num_pages);
+ return -ENOMEM;
+ }
+
+ /* Put mapped kernel sg into kernel resource array */
+
+ /* Set output params acording to the in_out flag */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
+ NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
+ dma_ctx->src_sg;
+ } else {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
+ NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ out_map_num_entries = num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
+ dma_ctx->dst_sg;
+ }
+
+ return 0;
+}
+
+/**
+ * sep_lock_user_pages - lock and map user pages for DMA
+ * @sep: pointer to struct sep_device
+ * @app_virt_addr: user memory data buffer
+ * @data_size: size of data buffer
+ * @lli_array_ptr: lli array
+ * @in_out_flag: input or output to device
+ *
+ * This function locks all the physical pages of the application
+ * virtual buffer and construct a basic lli array, where each entry
+ * holds the physical page address and the size that application
+ * data holds in this physical pages
+ */
+static int sep_lock_user_pages(struct sep_device *sep,
+ u32 app_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag,
+ struct sep_dma_context *dma_ctx)
+
+{
+ int error = 0;
+ u32 count;
+ int result;
+ /* The the page of the end address of the user space buffer */
+ u32 end_page;
+ /* The page of the start address of the user space buffer */
+ u32 start_page;
+ /* The range in pages */
+ u32 num_pages;
+ /* Array of pointers to page */
+ struct page **page_array;
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+ /* Map array */
+ struct sep_dma_map *map_array;
+
+ /* Set start and end pages and num pages */
+ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+ start_page = app_virt_addr >> PAGE_SHIFT;
+ num_pages = end_page - start_page + 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lock user pages app_virt_addr is %x\n",
+ current->pid, app_virt_addr);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
+ current->pid, data_size);
+ dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
+ current->pid, start_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
+ current->pid, end_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
+ current->pid, num_pages);
+
+ /* Allocate array of pages structure pointers */
+ page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
+ if (!page_array) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+ map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
+ if (!map_array) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] kmalloc for map_array failed\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function_with_error1;
+ }
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
+ GFP_ATOMIC);
+
+ if (!lli_array) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] kmalloc for lli_array failed\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function_with_error2;
+ }
+
+ /* Convert the application virtual address into a set of physical */
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, app_virt_addr,
+ num_pages,
+ ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
+ 0, page_array, NULL);
+
+ up_read(&current->mm->mmap_sem);
+
+ /* Check the number of pages locked - if not all then exit with error */
+ if (result != num_pages) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] not all pages locked by get_user_pages, "
+ "result 0x%X, num_pages 0x%X\n",
+ current->pid, result, num_pages);
+ error = -ENOMEM;
+ goto end_function_with_error3;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
+ current->pid);
+
+ /*
+ * Fill the array using page array data and
+ * map the pages - this action will also flush the cache as needed
+ */
+ for (count = 0; count < num_pages; count++) {
+ /* Fill the map array */
+ map_array[count].dma_addr =
+ dma_map_page(&sep->pdev->dev, page_array[count],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ map_array[count].size = PAGE_SIZE;
+
+ /* Fill the lli array entry */
+ lli_array[count].bus_address = (u32)map_array[count].dma_addr;
+ lli_array[count].block_size = PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[%x].bus_address is %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n", current->pid,
+ count, (unsigned long)lli_array[count].bus_address,
+ count, lli_array[count].block_size);
+ }
+
+ /* Check the offset for the first page */
+ lli_array[0].bus_address =
+ lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+
+ /* Check that not all the data is in the first page only */
+ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+ lli_array[0].block_size = data_size;
+ else
+ lli_array[0].block_size =
+ PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After check if page 0 has all data\n",
+ current->pid);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
+ "lli_array[0].block_size is (hex) %x\n",
+ current->pid,
+ (unsigned long)lli_array[0].bus_address,
+ lli_array[0].block_size);
+
+
+ /* Check the size of the last page */
+ if (num_pages > 1) {
+ lli_array[num_pages - 1].block_size =
+ (app_virt_addr + data_size) & (~PAGE_MASK);
+ if (lli_array[num_pages - 1].block_size == 0)
+ lli_array[num_pages - 1].block_size = PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After last page size adjustment\n",
+ current->pid);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n",
+ current->pid,
+ num_pages - 1,
+ (unsigned long)lli_array[num_pages - 1].bus_address,
+ num_pages - 1,
+ lli_array[num_pages - 1].block_size);
+ }
+
+ /* Set output params acording to the in_out flag */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
+ page_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
+ } else {
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
+ num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
+ page_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
+ map_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ out_map_num_entries = num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
+ }
+ goto end_function;
+
+end_function_with_error3:
+ /* Free lli array */
+ kfree(lli_array);
+
+end_function_with_error2:
+ kfree(map_array);
+
+end_function_with_error1:
+ /* Free page array */
+ kfree(page_array);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_lli_table_secure_dma - get lli array for IMR addresses
+ * @sep: pointer to struct sep_device
+ * @app_virt_addr: user memory data buffer
+ * @data_size: size of data buffer
+ * @lli_array_ptr: lli array
+ * @in_out_flag: not used
+ * @dma_ctx: pointer to struct sep_dma_context
+ *
+ * This function creates lli tables for outputting data to
+ * IMR memory, which is memory that cannot be accessed by the
+ * the x86 processor.
+ */
+static int sep_lli_table_secure_dma(struct sep_device *sep,
+ u32 app_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag,
+ struct sep_dma_context *dma_ctx)
+
+{
+ int error = 0;
+ u32 count;
+ /* The the page of the end address of the user space buffer */
+ u32 end_page;
+ /* The page of the start address of the user space buffer */
+ u32 start_page;
+ /* The range in pages */
+ u32 num_pages;
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+
+ /* Set start and end pages and num pages */
+ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+ start_page = app_virt_addr >> PAGE_SHIFT;
+ num_pages = end_page - start_page + 1;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] lock user pages"
+ " app_virt_addr is %x\n", current->pid, app_virt_addr);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
+ current->pid, data_size);
+ dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
+ current->pid, start_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
+ current->pid, end_page);
+ dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
+ current->pid, num_pages);
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
+ GFP_ATOMIC);
+
+ if (!lli_array) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] kmalloc for lli_array failed\n",
+ current->pid);
+ return -ENOMEM;
+ }
+
+ /*
+ * Fill the lli_array
+ */
+ start_page = start_page << PAGE_SHIFT;
+ for (count = 0; count < num_pages; count++) {
+ /* Fill the lli array entry */
+ lli_array[count].bus_address = start_page;
+ lli_array[count].block_size = PAGE_SIZE;
+
+ start_page += PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_array[%x].bus_address is %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n",
+ current->pid,
+ count, (unsigned long)lli_array[count].bus_address,
+ count, lli_array[count].block_size);
+ }
+
+ /* Check the offset for the first page */
+ lli_array[0].bus_address =
+ lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+
+ /* Check that not all the data is in the first page only */
+ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+ lli_array[0].block_size = data_size;
+ else
+ lli_array[0].block_size =
+ PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After check if page 0 has all data\n"
+ "lli_array[0].bus_address is (hex) %08lx, "
+ "lli_array[0].block_size is (hex) %x\n",
+ current->pid,
+ (unsigned long)lli_array[0].bus_address,
+ lli_array[0].block_size);
+
+ /* Check the size of the last page */
+ if (num_pages > 1) {
+ lli_array[num_pages - 1].block_size =
+ (app_virt_addr + data_size) & (~PAGE_MASK);
+ if (lli_array[num_pages - 1].block_size == 0)
+ lli_array[num_pages - 1].block_size = PAGE_SIZE;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] After last page size adjustment\n"
+ "lli_array[%x].bus_address is (hex) %08lx, "
+ "lli_array[%x].block_size is (hex) %x\n",
+ current->pid, num_pages - 1,
+ (unsigned long)lli_array[num_pages - 1].bus_address,
+ num_pages - 1,
+ lli_array[num_pages - 1].block_size);
+ }
+ *lli_array_ptr = lli_array;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
+
+ return error;
+}
+
+/**
+ * sep_calculate_lli_table_max_size - size the LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_in_array_ptr
+ * @num_array_entries
+ * @last_table_flag
+ *
+ * This function calculates the size of data that can be inserted into
+ * the lli table from this array, such that either the table is full
+ * (all entries are entered), or there are no more entries in the
+ * lli array
+ */
+static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
+ struct sep_lli_entry *lli_in_array_ptr,
+ u32 num_array_entries,
+ u32 *last_table_flag)
+{
+ u32 counter;
+ /* Table data size */
+ u32 table_data_size = 0;
+ /* Data size for the next table */
+ u32 next_table_data_size;
+
+ *last_table_flag = 0;
+
+ /*
+ * Calculate the data in the out lli table till we fill the whole
+ * table or till the data has ended
+ */
+ for (counter = 0;
+ (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
+ (counter < num_array_entries); counter++)
+ table_data_size += lli_in_array_ptr[counter].block_size;
+
+ /*
+ * Check if we reached the last entry,
+ * meaning this ia the last table to build,
+ * and no need to check the block alignment
+ */
+ if (counter == num_array_entries) {
+ /* Set the last table flag */
+ *last_table_flag = 1;
+ goto end_function;
+ }
+
+ /*
+ * Calculate the data size of the next table.
+ * Stop if no entries left or if data size is more the DMA restriction
+ */
+ next_table_data_size = 0;
+ for (; counter < num_array_entries; counter++) {
+ next_table_data_size += lli_in_array_ptr[counter].block_size;
+ if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+ break;
+ }
+
+ /*
+ * Check if the next table data size is less then DMA rstriction.
+ * if it is - recalculate the current table size, so that the next
+ * table data size will be adaquete for DMA
+ */
+ if (next_table_data_size &&
+ next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+
+ table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
+ next_table_data_size);
+
+end_function:
+ return table_data_size;
+}
+
+/**
+ * sep_build_lli_table - build an lli array for the given table
+ * @sep: pointer to struct sep_device
+ * @lli_array_ptr: pointer to lli array
+ * @lli_table_ptr: pointer to lli table
+ * @num_processed_entries_ptr: pointer to number of entries
+ * @num_table_entries_ptr: pointer to number of tables
+ * @table_data_size: total data size
+ *
+ * Builds ant lli table from the lli_array according to
+ * the given size of data
+ */
+static void sep_build_lli_table(struct sep_device *sep,
+ struct sep_lli_entry *lli_array_ptr,
+ struct sep_lli_entry *lli_table_ptr,
+ u32 *num_processed_entries_ptr,
+ u32 *num_table_entries_ptr,
+ u32 table_data_size)
+{
+ /* Current table data size */
+ u32 curr_table_data_size;
+ /* Counter of lli array entry */
+ u32 array_counter;
+
+ /* Init current table data size and lli array entry counter */
+ curr_table_data_size = 0;
+ array_counter = 0;
+ *num_table_entries_ptr = 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] build lli table table_data_size: (hex) %x\n",
+ current->pid, table_data_size);
+
+ /* Fill the table till table size reaches the needed amount */
+ while (curr_table_data_size < table_data_size) {
+ /* Update the number of entries in table */
+ (*num_table_entries_ptr)++;
+
+ lli_table_ptr->bus_address =
+ cpu_to_le32(lli_array_ptr[array_counter].bus_address);
+
+ lli_table_ptr->block_size =
+ cpu_to_le32(lli_array_ptr[array_counter].block_size);
+
+ curr_table_data_size += lli_array_ptr[array_counter].block_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr is %p\n",
+ current->pid, lli_table_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->bus_address: %08lx\n",
+ current->pid,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
+ current->pid, lli_table_ptr->block_size);
+
+ /* Check for overflow of the table data */
+ if (curr_table_data_size > table_data_size) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] curr_table_data_size too large\n",
+ current->pid);
+
+ /* Update the size of block in the table */
+ lli_table_ptr->block_size =
+ cpu_to_le32(lli_table_ptr->block_size) -
+ (curr_table_data_size - table_data_size);
+
+ /* Update the physical address in the lli array */
+ lli_array_ptr[array_counter].bus_address +=
+ cpu_to_le32(lli_table_ptr->block_size);
+
+ /* Update the block size left in the lli array */
+ lli_array_ptr[array_counter].block_size =
+ (curr_table_data_size - table_data_size);
+ } else
+ /* Advance to the next entry in the lli_array */
+ array_counter++;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->bus_address is %08lx\n",
+ current->pid,
+ (unsigned long)lli_table_ptr->bus_address);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
+ current->pid,
+ lli_table_ptr->block_size);
+
+ /* Move to the next entry in table */
+ lli_table_ptr++;
+ }
+
+ /* Set the info entry to default */
+ lli_table_ptr->bus_address = 0xffffffff;
+ lli_table_ptr->block_size = 0;
+
+ /* Set the output parameter */
+ *num_processed_entries_ptr += array_counter;
+
+}
+
+/**
+ * sep_shared_area_virt_to_bus - map shared area to bus address
+ * @sep: pointer to struct sep_device
+ * @virt_address: virtual address to convert
+ *
+ * This functions returns the physical address inside shared area according
+ * to the virtual address. It can be either on the externa RAM device
+ * (ioremapped), or on the system RAM
+ * This implementation is for the external RAM
+ */
+static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
+ void *virt_address)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
+ current->pid, virt_address);
+ dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
+ current->pid,
+ (unsigned long)
+ sep->shared_bus + (virt_address - sep->shared_addr));
+
+ return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
+}
+
+/**
+ * sep_shared_area_bus_to_virt - map shared area bus address to kernel
+ * @sep: pointer to struct sep_device
+ * @bus_address: bus address to convert
+ *
+ * This functions returns the virtual address inside shared area
+ * according to the physical address. It can be either on the
+ * externa RAM device (ioremapped), or on the system RAM
+ * This implementation is for the external RAM
+ */
+static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
+ dma_addr_t bus_address)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
+ current->pid,
+ (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
+ (size_t)(bus_address - sep->shared_bus)));
+
+ return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
+}
+
+/**
+ * sep_debug_print_lli_tables - dump LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_table_ptr: pointer to sep_lli_entry
+ * @num_table_entries: number of entries
+ * @table_data_size: total data size
+ *
+ * Walk the the list of the print created tables and print all the data
+ */
+static void sep_debug_print_lli_tables(struct sep_device *sep,
+ struct sep_lli_entry *lli_table_ptr,
+ unsigned long num_table_entries,
+ unsigned long table_data_size)
+{
+#ifdef DEBUG
+ unsigned long table_count = 1;
+ unsigned long entries_count = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
+ current->pid);
+ if (num_table_entries == 0) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
+ current->pid);
+ return;
+ }
+
+ while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli table %08lx, "
+ "table_data_size is (hex) %lx\n",
+ current->pid, table_count, table_data_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] num_table_entries is (hex) %lx\n",
+ current->pid, num_table_entries);
+
+ /* Print entries of the table (without info entry) */
+ for (entries_count = 0; entries_count < num_table_entries;
+ entries_count++, lli_table_ptr++) {
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] lli_table_ptr address is %08lx\n",
+ current->pid,
+ (unsigned long) lli_table_ptr);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys address is %08lx "
+ "block size is (hex) %x\n", current->pid,
+ (unsigned long)lli_table_ptr->bus_address,
+ lli_table_ptr->block_size);
+ }
+
+ /* Point to the info entry */
+ lli_table_ptr--;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys lli_table_ptr->block_size "
+ "is (hex) %x\n",
+ current->pid,
+ lli_table_ptr->block_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys lli_table_ptr->physical_address "
+ "is %08lx\n",
+ current->pid,
+ (unsigned long)lli_table_ptr->bus_address);
+
+
+ table_data_size = lli_table_ptr->block_size & 0xffffff;
+ num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] phys table_data_size is "
+ "(hex) %lx num_table_entries is"
+ " %lx bus_address is%lx\n",
+ current->pid,
+ table_data_size,
+ num_table_entries,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
+ lli_table_ptr = (struct sep_lli_entry *)
+ sep_shared_bus_to_virt(sep,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ table_count++;
+ }
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
+ current->pid);
+#endif
+}
+
+
+/**
+ * sep_prepare_empty_lli_table - create a blank LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_table_addr_ptr: pointer to lli table
+ * @num_entries_ptr: pointer to number of entries
+ * @table_data_size_ptr: point to table data size
+ * @dmatables_region: Optional buffer for DMA tables
+ * @dma_ctx: DMA context
+ *
+ * This function creates empty lli tables when there is no data
+ */
+static void sep_prepare_empty_lli_table(struct sep_device *sep,
+ dma_addr_t *lli_table_addr_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+{
+ struct sep_lli_entry *lli_table_ptr;
+
+ /* Find the area for new table */
+ lli_table_ptr =
+ (struct sep_lli_entry *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ if (dmatables_region && *dmatables_region)
+ lli_table_ptr = *dmatables_region;
+
+ lli_table_ptr->bus_address = 0;
+ lli_table_ptr->block_size = 0;
+
+ lli_table_ptr++;
+ lli_table_ptr->bus_address = 0xFFFFFFFF;
+ lli_table_ptr->block_size = 0;
+
+ /* Set the output parameter value */
+ *lli_table_addr_ptr = sep->shared_bus +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ dma_ctx->num_lli_tables_created *
+ sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Set the num of entries and table data size for empty table */
+ *num_entries_ptr = 2;
+ *table_data_size_ptr = 0;
+
+ /* Update the number of created tables */
+ dma_ctx->num_lli_tables_created++;
+}
+
+/**
+ * sep_prepare_input_dma_table - prepare input DMA mappings
+ * @sep: pointer to struct sep_device
+ * @data_size:
+ * @block_size:
+ * @lli_table_ptr:
+ * @num_entries_ptr:
+ * @table_data_size_ptr:
+ * @is_kva: set for kernel data (kernel cryptio call)
+ *
+ * This function prepares only input DMA table for synhronic symmetric
+ * operations (HASH)
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_dma_table(struct sep_device *sep,
+ unsigned long app_virt_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool is_kva,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx
+)
+{
+ int error = 0;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_entry_ptr;
+ /* Array of pointers to page */
+ struct sep_lli_entry *lli_array_ptr;
+ /* Points to the first entry to be processed in the lli_in_array */
+ u32 current_entry = 0;
+ /* Num entries in the virtual buffer */
+ u32 sep_lli_entries = 0;
+ /* Lli table pointer */
+ struct sep_lli_entry *in_lli_table_ptr;
+ /* The total data in one table */
+ u32 table_data_size = 0;
+ /* Flag for last table */
+ u32 last_table_flag = 0;
+ /* Number of entries in lli table */
+ u32 num_entries_in_table = 0;
+ /* Next table address */
+ void *lli_table_alloc_addr = NULL;
+ void *dma_lli_table_alloc_addr = NULL;
+ void *dma_in_lli_table_ptr = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] prepare intput dma "
+ "tbl data size: (hex) %x\n",
+ current->pid, data_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
+ current->pid, block_size);
+
+ /* Initialize the pages pointers */
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
+
+ /* Set the kernel address for first table to be allocated */
+ lli_table_alloc_addr = (void *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ if (data_size == 0) {
+ if (dmatables_region) {
+ error = sep_allocate_dmatables_region(sep,
+ dmatables_region,
+ dma_ctx,
+ 1);
+ if (error)
+ return error;
+ }
+ /* Special case - create meptu table - 2 entries, zero data */
+ sep_prepare_empty_lli_table(sep, lli_table_ptr,
+ num_entries_ptr, table_data_size_ptr,
+ dmatables_region, dma_ctx);
+ goto update_dcb_counter;
+ }
+
+ /* Check if the pages are in Kernel Virtual Address layout */
+ if (is_kva == true)
+ error = sep_lock_kernel_pages(sep, app_virt_addr,
+ data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+ else
+ /*
+ * Lock the pages of the user buffer
+ * and translate them to pages
+ */
+ error = sep_lock_user_pages(sep, app_virt_addr,
+ data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+
+ if (error)
+ goto end_function;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output sep_in_num_pages is (hex) %x\n",
+ current->pid,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
+
+ current_entry = 0;
+ info_entry_ptr = NULL;
+
+ sep_lli_entries =
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
+
+ dma_lli_table_alloc_addr = lli_table_alloc_addr;
+ if (dmatables_region) {
+ error = sep_allocate_dmatables_region(sep,
+ dmatables_region,
+ dma_ctx,
+ sep_lli_entries);
+ if (error)
+ return error;
+ lli_table_alloc_addr = *dmatables_region;
+ }
+
+ /* Loop till all the entries in in array are processed */
+ while (current_entry < sep_lli_entries) {
+
+ /* Set the new input and output tables */
+ in_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+ dma_in_lli_table_ptr =
+ (struct sep_lli_entry *)dma_lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+ dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ if (dma_lli_table_alloc_addr >
+ ((void *)sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+ error = -ENOMEM;
+ goto end_function_error;
+
+ }
+
+ /* Update the number of created tables */
+ dma_ctx->num_lli_tables_created++;
+
+ /* Calculate the maximum size of data for input table */
+ table_data_size = sep_calculate_lli_table_max_size(sep,
+ &lli_array_ptr[current_entry],
+ (sep_lli_entries - current_entry),
+ &last_table_flag);
+
+ /*
+ * If this is not the last table -
+ * then allign it to the block size
+ */
+ if (!last_table_flag)
+ table_data_size =
+ (table_data_size / block_size) * block_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output table_data_size is (hex) %x\n",
+ current->pid,
+ table_data_size);
+
+ /* Construct input lli table */
+ sep_build_lli_table(sep, &lli_array_ptr[current_entry],
+ in_lli_table_ptr,
+ &current_entry, &num_entries_in_table, table_data_size);
+
+ if (info_entry_ptr == NULL) {
+
+ /* Set the output parameters to physical addresses */
+ *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
+ dma_in_lli_table_ptr);
+ *num_entries_ptr = num_entries_in_table;
+ *table_data_size_ptr = table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_in_ptr is %08lx\n",
+ current->pid,
+ (unsigned long)*lli_table_ptr);
+
+ } else {
+ /* Update the info entry of the previous in table */
+ info_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ dma_in_lli_table_ptr);
+ info_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) |
+ (table_data_size);
+ }
+ /* Save the pointer to the info entry of the current tables */
+ info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
+ }
+ /* Print input tables */
+ if (!dmatables_region) {
+ sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
+ *num_entries_ptr, *table_data_size_ptr);
+ }
+
+ /* The array of the pages */
+ kfree(lli_array_ptr);
+
+update_dcb_counter:
+ /* Update DCB counter */
+ dma_ctx->nr_dcb_creat++;
+ goto end_function;
+
+end_function_error:
+ /* Free all the allocated resources */
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
+ kfree(lli_array_ptr);
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+
+end_function:
+ return error;
+
+}
+
+/**
+ * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
+ * @sep: pointer to struct sep_device
+ * @lli_in_array:
+ * @sep_in_lli_entries:
+ * @lli_out_array:
+ * @sep_out_lli_entries
+ * @block_size
+ * @lli_table_in_ptr
+ * @lli_table_out_ptr
+ * @in_num_entries_ptr
+ * @out_num_entries_ptr
+ * @table_data_size_ptr
+ *
+ * This function creates the input and output DMA tables for
+ * symmetric operations (AES/DES) according to the block
+ * size from LLI arays
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_construct_dma_tables_from_lli(
+ struct sep_device *sep,
+ struct sep_lli_entry *lli_in_array,
+ u32 sep_in_lli_entries,
+ struct sep_lli_entry *lli_out_array,
+ u32 sep_out_lli_entries,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+{
+ /* Points to the area where next lli table can be allocated */
+ void *lli_table_alloc_addr = NULL;
+ /*
+ * Points to the area in shared region where next lli table
+ * can be allocated
+ */
+ void *dma_lli_table_alloc_addr = NULL;
+ /* Input lli table in dmatables_region or shared region */
+ struct sep_lli_entry *in_lli_table_ptr = NULL;
+ /* Input lli table location in the shared region */
+ struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
+ /* Output lli table in dmatables_region or shared region */
+ struct sep_lli_entry *out_lli_table_ptr = NULL;
+ /* Output lli table location in the shared region */
+ struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_in_entry_ptr = NULL;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_out_entry_ptr = NULL;
+ /* Points to the first entry to be processed in the lli_in_array */
+ u32 current_in_entry = 0;
+ /* Points to the first entry to be processed in the lli_out_array */
+ u32 current_out_entry = 0;
+ /* Max size of the input table */
+ u32 in_table_data_size = 0;
+ /* Max size of the output table */
+ u32 out_table_data_size = 0;
+ /* Flag te signifies if this is the last tables build */
+ u32 last_table_flag = 0;
+ /* The data size that should be in table */
+ u32 table_data_size = 0;
+ /* Number of etnries in the input table */
+ u32 num_entries_in_table = 0;
+ /* Number of etnries in the output table */
+ u32 num_entries_out_table = 0;
+
+ if (!dma_ctx) {
+ dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
+ return -EINVAL;
+ }
+
+ /* Initiate to point after the message area */
+ lli_table_alloc_addr = (void *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ (dma_ctx->num_lli_tables_created *
+ (sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
+ dma_lli_table_alloc_addr = lli_table_alloc_addr;
+
+ if (dmatables_region) {
+ /* 2 for both in+out table */
+ if (sep_allocate_dmatables_region(sep,
+ dmatables_region,
+ dma_ctx,
+ 2*sep_in_lli_entries))
+ return -ENOMEM;
+ lli_table_alloc_addr = *dmatables_region;
+ }
+
+ /* Loop till all the entries in in array are not processed */
+ while (current_in_entry < sep_in_lli_entries) {
+ /* Set the new input and output tables */
+ in_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+ dma_in_lli_table_ptr =
+ (struct sep_lli_entry *)dma_lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+ dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Set the first output tables */
+ out_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+ dma_out_lli_table_ptr =
+ (struct sep_lli_entry *)dma_lli_table_alloc_addr;
+
+ /* Check if the DMA table area limit was overrun */
+ if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
+ ((void *)sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+ dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
+ return -ENOMEM;
+ }
+
+ /* Update the number of the lli tables created */
+ dma_ctx->num_lli_tables_created += 2;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+ dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Calculate the maximum size of data for input table */
+ in_table_data_size =
+ sep_calculate_lli_table_max_size(sep,
+ &lli_in_array[current_in_entry],
+ (sep_in_lli_entries - current_in_entry),
+ &last_table_flag);
+
+ /* Calculate the maximum size of data for output table */
+ out_table_data_size =
+ sep_calculate_lli_table_max_size(sep,
+ &lli_out_array[current_out_entry],
+ (sep_out_lli_entries - current_out_entry),
+ &last_table_flag);
+
+ if (!last_table_flag) {
+ in_table_data_size = (in_table_data_size /
+ block_size) * block_size;
+ out_table_data_size = (out_table_data_size /
+ block_size) * block_size;
+ }
+
+ table_data_size = in_table_data_size;
+ if (table_data_size > out_table_data_size)
+ table_data_size = out_table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] construct tables from lli"
+ " in_table_data_size is (hex) %x\n", current->pid,
+ in_table_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] construct tables from lli"
+ "out_table_data_size is (hex) %x\n", current->pid,
+ out_table_data_size);
+
+ /* Construct input lli table */
+ sep_build_lli_table(sep, &lli_in_array[current_in_entry],
+ in_lli_table_ptr,
+ &current_in_entry,
+ &num_entries_in_table,
+ table_data_size);
+
+ /* Construct output lli table */
+ sep_build_lli_table(sep, &lli_out_array[current_out_entry],
+ out_lli_table_ptr,
+ &current_out_entry,
+ &num_entries_out_table,
+ table_data_size);
+
+ /* If info entry is null - this is the first table built */
+ if (info_in_entry_ptr == NULL) {
+ /* Set the output parameters to physical addresses */
+ *lli_table_in_ptr =
+ sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
+
+ *in_num_entries_ptr = num_entries_in_table;
+
+ *lli_table_out_ptr =
+ sep_shared_area_virt_to_bus(sep,
+ dma_out_lli_table_ptr);
+
+ *out_num_entries_ptr = num_entries_out_table;
+ *table_data_size_ptr = table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_in_ptr is %08lx\n",
+ current->pid,
+ (unsigned long)*lli_table_in_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_out_ptr is %08lx\n",
+ current->pid,
+ (unsigned long)*lli_table_out_ptr);
+ } else {
+ /* Update the info entry of the previous in table */
+ info_in_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ dma_in_lli_table_ptr);
+
+ info_in_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) |
+ (table_data_size);
+
+ /* Update the info entry of the previous in table */
+ info_out_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ dma_out_lli_table_ptr);
+
+ info_out_entry_ptr->block_size =
+ ((num_entries_out_table) << 24) |
+ (table_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
+ current->pid,
+ (unsigned long)info_in_entry_ptr->bus_address,
+ info_in_entry_ptr->block_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output lli_table_out_ptr:"
+ "%08lx %08x\n",
+ current->pid,
+ (unsigned long)info_out_entry_ptr->bus_address,
+ info_out_entry_ptr->block_size);
+ }
+
+ /* Save the pointer to the info entry of the current tables */
+ info_in_entry_ptr = in_lli_table_ptr +
+ num_entries_in_table - 1;
+ info_out_entry_ptr = out_lli_table_ptr +
+ num_entries_out_table - 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output num_entries_out_table is %x\n",
+ current->pid,
+ (u32)num_entries_out_table);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output info_in_entry_ptr is %lx\n",
+ current->pid,
+ (unsigned long)info_in_entry_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] output info_out_entry_ptr is %lx\n",
+ current->pid,
+ (unsigned long)info_out_entry_ptr);
+ }
+
+ /* Print input tables */
+ if (!dmatables_region) {
+ sep_debug_print_lli_tables(
+ sep,
+ (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
+ *in_num_entries_ptr,
+ *table_data_size_ptr);
+ }
+
+ /* Print output tables */
+ if (!dmatables_region) {
+ sep_debug_print_lli_tables(
+ sep,
+ (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
+ *out_num_entries_ptr,
+ *table_data_size_ptr);
+ }
+
+ return 0;
+}
+
+/**
+ * sep_prepare_input_output_dma_table - prepare DMA I/O table
+ * @app_virt_in_addr:
+ * @app_virt_out_addr:
+ * @data_size:
+ * @block_size:
+ * @lli_table_in_ptr:
+ * @lli_table_out_ptr:
+ * @in_num_entries_ptr:
+ * @out_num_entries_ptr:
+ * @table_data_size_ptr:
+ * @is_kva: set for kernel data; used only for kernel crypto module
+ *
+ * This function builds input and output DMA tables for synhronic
+ * symmetric operations (AES, DES, HASH). It also checks that each table
+ * is of the modular block size
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_output_dma_table(struct sep_device *sep,
+ unsigned long app_virt_in_addr,
+ unsigned long app_virt_out_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool is_kva,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+
+{
+ int error = 0;
+ /* Array of pointers of page */
+ struct sep_lli_entry *lli_in_array;
+ /* Array of pointers of page */
+ struct sep_lli_entry *lli_out_array;
+
+ if (!dma_ctx) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (data_size == 0) {
+ /* Prepare empty table for input and output */
+ if (dmatables_region) {
+ error = sep_allocate_dmatables_region(
+ sep,
+ dmatables_region,
+ dma_ctx,
+ 2);
+ if (error)
+ goto end_function;
+ }
+ sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
+ in_num_entries_ptr, table_data_size_ptr,
+ dmatables_region, dma_ctx);
+
+ sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
+ out_num_entries_ptr, table_data_size_ptr,
+ dmatables_region, dma_ctx);
+
+ goto update_dcb_counter;
+ }
+
+ /* Initialize the pages pointers */
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
+
+ /* Lock the pages of the buffer and translate them to pages */
+ if (is_kva == true) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
+ current->pid);
+ error = sep_lock_kernel_pages(sep, app_virt_in_addr,
+ data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_kernel_pages for input "
+ "virtual buffer failed\n", current->pid);
+
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
+ current->pid);
+ error = sep_lock_kernel_pages(sep, app_virt_out_addr,
+ data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
+ dma_ctx);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_kernel_pages for output "
+ "virtual buffer failed\n", current->pid);
+
+ goto end_function_free_lli_in;
+ }
+
+ }
+
+ else {
+ dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
+ current->pid);
+ error = sep_lock_user_pages(sep, app_virt_in_addr,
+ data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
+ dma_ctx);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_user_pages for input "
+ "virtual buffer failed\n", current->pid);
+
+ goto end_function;
+ }
+
+ if (dma_ctx->secure_dma == true) {
+ /* secure_dma requires use of non accessible memory */
+ dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
+ current->pid);
+ error = sep_lli_table_secure_dma(sep,
+ app_virt_out_addr, data_size, &lli_out_array,
+ SEP_DRIVER_OUT_FLAG, dma_ctx);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] secure dma table setup "
+ " for output virtual buffer failed\n",
+ current->pid);
+
+ goto end_function_free_lli_in;
+ }
+ } else {
+ /* For normal, non-secure dma */
+ dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
+ current->pid);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Locking user output pages\n",
+ current->pid);
+
+ error = sep_lock_user_pages(sep, app_virt_out_addr,
+ data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
+ dma_ctx);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_lock_user_pages"
+ " for output virtual buffer failed\n",
+ current->pid);
+
+ goto end_function_free_lli_in;
+ }
+ }
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] After lock; prep input output dma "
+ "table sep_in_num_pages is (hex) %x\n", current->pid,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
+ current->pid,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP"
+ " is (hex) %x\n", current->pid,
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ /* Call the fucntion that creates table from the lli arrays */
+ dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
+ current->pid);
+ error = sep_construct_dma_tables_from_lli(
+ sep, lli_in_array,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ in_num_pages,
+ lli_out_array,
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
+ out_num_pages,
+ block_size, lli_table_in_ptr, lli_table_out_ptr,
+ in_num_entries_ptr, out_num_entries_ptr,
+ table_data_size_ptr, dmatables_region, dma_ctx);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] sep_construct_dma_tables_from_lli failed\n",
+ current->pid);
+ goto end_function_with_error;
+ }
+
+ kfree(lli_out_array);
+ kfree(lli_in_array);
+
+update_dcb_counter:
+ /* Update DCB counter */
+ dma_ctx->nr_dcb_creat++;
+
+ goto end_function;
+
+end_function_with_error:
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
+ kfree(lli_out_array);
+
+
+end_function_free_lli_in:
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
+ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
+ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
+ kfree(lli_in_array);
+
+end_function:
+
+ return error;
+
+}
+
+/**
+ * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
+ * @app_in_address: unsigned long; for data buffer in (user space)
+ * @app_out_address: unsigned long; for data buffer out (user space)
+ * @data_in_size: u32; for size of data
+ * @block_size: u32; for block size
+ * @tail_block_size: u32; for size of tail block
+ * @isapplet: bool; to indicate external app
+ * @is_kva: bool; kernel buffer; only used for kernel crypto module
+ * @secure_dma; indicates whether this is secure_dma using IMR
+ *
+ * This function prepares the linked DMA tables and puts the
+ * address for the linked list of tables inta a DCB (data control
+ * block) the address of which is known by the SEP hardware
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
+ unsigned long app_in_address,
+ unsigned long app_out_address,
+ u32 data_in_size,
+ u32 block_size,
+ u32 tail_block_size,
+ bool isapplet,
+ bool is_kva,
+ bool secure_dma,
+ struct sep_dcblock *dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ struct scatterlist *src_sg,
+ struct scatterlist *dst_sg)
+{
+ int error = 0;
+ /* Size of tail */
+ u32 tail_size = 0;
+ /* Address of the created DCB table */
+ struct sep_dcblock *dcb_table_ptr = NULL;
+ /* The physical address of the first input DMA table */
+ dma_addr_t in_first_mlli_address = 0;
+ /* Number of entries in the first input DMA table */
+ u32 in_first_num_entries = 0;
+ /* The physical address of the first output DMA table */
+ dma_addr_t out_first_mlli_address = 0;
+ /* Number of entries in the first output DMA table */
+ u32 out_first_num_entries = 0;
+ /* Data in the first input/output table */
+ u32 first_data_size = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
+ current->pid, app_in_address);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
+ current->pid, app_out_address);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
+ current->pid, data_in_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
+ current->pid, block_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
+ current->pid, tail_block_size);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
+ current->pid, isapplet);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
+ current->pid, is_kva);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
+ current->pid, src_sg);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
+ current->pid, dst_sg);
+
+ if (!dma_ctx) {
+ dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (*dma_ctx) {
+ /* In case there are multiple DCBs for this transaction */
+ dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
+ current->pid);
+ } else {
+ *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
+ if (!(*dma_ctx)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Not enough memory for DMA context\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] Created DMA context addr at 0x%p\n",
+ current->pid, *dma_ctx);
+ }
+
+ (*dma_ctx)->secure_dma = secure_dma;
+
+ /* these are for kernel crypto only */
+ (*dma_ctx)->src_sg = src_sg;
+ (*dma_ctx)->dst_sg = dst_sg;
+
+ if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
+ /* No more DCBs to allocate */
+ dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
+ current->pid);
+ error = -ENOSPC;
+ goto end_function_error;
+ }
+
+ /* Allocate new DCB */
+ if (dcb_region) {
+ dcb_table_ptr = dcb_region;
+ } else {
+ dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
+ ((*dma_ctx)->nr_dcb_creat *
+ sizeof(struct sep_dcblock)));
+ }
+
+ /* Set the default values in the DCB */
+ dcb_table_ptr->input_mlli_address = 0;
+ dcb_table_ptr->input_mlli_num_entries = 0;
+ dcb_table_ptr->input_mlli_data_size = 0;
+ dcb_table_ptr->output_mlli_address = 0;
+ dcb_table_ptr->output_mlli_num_entries = 0;
+ dcb_table_ptr->output_mlli_data_size = 0;
+ dcb_table_ptr->tail_data_size = 0;
+ dcb_table_ptr->out_vr_tail_pt = 0;
+
+ if (isapplet == true) {
+
+ /* Check if there is enough data for DMA operation */
+ if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
+ if (is_kva == true) {
+ error = -ENODEV;
+ goto end_function_error;
+ } else {
+ if (copy_from_user(dcb_table_ptr->tail_data,
+ (void __user *)app_in_address,
+ data_in_size)) {
+ error = -EFAULT;
+ goto end_function_error;
+ }
+ }
+
+ dcb_table_ptr->tail_data_size = data_in_size;
+
+ /* Set the output user-space address for mem2mem op */
+ if (app_out_address)
+ dcb_table_ptr->out_vr_tail_pt =
+ (aligned_u64)app_out_address;
+
+ /*
+ * Update both data length parameters in order to avoid
+ * second data copy and allow building of empty mlli
+ * tables
+ */
+ tail_size = 0x0;
+ data_in_size = 0x0;
+
+ } else {
+ if (!app_out_address) {
+ tail_size = data_in_size % block_size;
+ if (!tail_size) {
+ if (tail_block_size == block_size)
+ tail_size = block_size;
+ }
+ } else {
+ tail_size = 0;
+ }
+ }
+ if (tail_size) {
+ if (tail_size > sizeof(dcb_table_ptr->tail_data))
+ return -EINVAL;
+ if (is_kva == true) {
+ error = -ENODEV;
+ goto end_function_error;
+ } else {
+ /* We have tail data - copy it to DCB */
+ if (copy_from_user(dcb_table_ptr->tail_data,
+ (void __user *)(app_in_address +
+ data_in_size - tail_size), tail_size)) {
+ error = -EFAULT;
+ goto end_function_error;
+ }
+ }
+ if (app_out_address)
+ /*
+ * Calculate the output address
+ * according to tail data size
+ */
+ dcb_table_ptr->out_vr_tail_pt =
+ (aligned_u64)app_out_address +
+ data_in_size - tail_size;
+
+ /* Save the real tail data size */
+ dcb_table_ptr->tail_data_size = tail_size;
+ /*
+ * Update the data size without the tail
+ * data size AKA data for the dma
+ */
+ data_in_size = (data_in_size - tail_size);
+ }
+ }
+ /* Check if we need to build only input table or input/output */
+ if (app_out_address) {
+ /* Prepare input/output tables */
+ error = sep_prepare_input_output_dma_table(sep,
+ app_in_address,
+ app_out_address,
+ data_in_size,
+ block_size,
+ &in_first_mlli_address,
+ &out_first_mlli_address,
+ &in_first_num_entries,
+ &out_first_num_entries,
+ &first_data_size,
+ is_kva,
+ dmatables_region,
+ *dma_ctx);
+ } else {
+ /* Prepare input tables */
+ error = sep_prepare_input_dma_table(sep,
+ app_in_address,
+ data_in_size,
+ block_size,
+ &in_first_mlli_address,
+ &in_first_num_entries,
+ &first_data_size,
+ is_kva,
+ dmatables_region,
+ *dma_ctx);
+ }
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "prepare DMA table call failed "
+ "from prepare DCB call\n");
+ goto end_function_error;
+ }
+
+ /* Set the DCB values */
+ dcb_table_ptr->input_mlli_address = in_first_mlli_address;
+ dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
+ dcb_table_ptr->input_mlli_data_size = first_data_size;
+ dcb_table_ptr->output_mlli_address = out_first_mlli_address;
+ dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
+ dcb_table_ptr->output_mlli_data_size = first_data_size;
+
+ goto end_function;
+
+end_function_error:
+ kfree(*dma_ctx);
+ *dma_ctx = NULL;
+
+end_function:
+ return error;
+
+}
+
+
+/**
+ * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
+ * @sep: pointer to struct sep_device
+ * @isapplet: indicates external application (used for kernel access)
+ * @is_kva: indicates kernel addresses (only used for kernel crypto)
+ *
+ * This function frees the DMA tables and DCB
+ */
+static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
+ bool is_kva, struct sep_dma_context **dma_ctx)
+{
+ struct sep_dcblock *dcb_table_ptr;
+ unsigned long pt_hold;
+ void *tail_pt;
+
+ int i = 0;
+ int error = 0;
+ int error_temp = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
+ current->pid);
+
+ if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
+ current->pid);
+
+ /* Tail stuff is only for non secure_dma */
+ /* Set pointer to first DCB table */
+ dcb_table_ptr = (struct sep_dcblock *)
+ (sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
+
+ /**
+ * Go over each DCB and see if
+ * tail pointer must be updated
+ */
+ for (i = 0; dma_ctx && *dma_ctx &&
+ i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
+ if (dcb_table_ptr->out_vr_tail_pt) {
+ pt_hold = (unsigned long)dcb_table_ptr->
+ out_vr_tail_pt;
+ tail_pt = (void *)pt_hold;
+ if (is_kva == true) {
+ error = -ENODEV;
+ break;
+ } else {
+ error_temp = copy_to_user(
+ (void __user *)tail_pt,
+ dcb_table_ptr->tail_data,
+ dcb_table_ptr->tail_data_size);
+ }
+ if (error_temp) {
+ /* Release the DMA resource */
+ error = -EFAULT;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Free the output pages, if any */
+ sep_free_dma_table_data_handler(sep, dma_ctx);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
+ current->pid);
+
+ return error;
+}
+
+/**
+ * sep_prepare_dcb_handler - prepare a control block
+ * @sep: pointer to struct sep_device
+ * @arg: pointer to user parameters
+ * @secure_dma: indicate whether we are using secure_dma on IMR
+ *
+ * This function will retrieve the RAR buffer physical addresses, type
+ * & size corresponding to the RAR handles provided in the buffers vector.
+ */
+static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
+ bool secure_dma,
+ struct sep_dma_context **dma_ctx)
+{
+ int error;
+ /* Command arguments */
+ static struct build_dcb_struct command_args;
+
+ /* Get the command arguments */
+ if (copy_from_user(&command_args, (void __user *)arg,
+ sizeof(struct build_dcb_struct))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] prep dcb handler app_in_address is %08llx\n",
+ current->pid, command_args.app_in_address);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] app_out_address is %08llx\n",
+ current->pid, command_args.app_out_address);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] data_size is %x\n",
+ current->pid, command_args.data_in_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] block_size is %x\n",
+ current->pid, command_args.block_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] tail block_size is %x\n",
+ current->pid, command_args.tail_block_size);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] is_applet is %x\n",
+ current->pid, command_args.is_applet);
+
+ if (!command_args.app_in_address) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] null app_in_address\n", current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)command_args.app_in_address,
+ (unsigned long)command_args.app_out_address,
+ command_args.data_in_size, command_args.block_size,
+ command_args.tail_block_size,
+ command_args.is_applet, false,
+ secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
+
+end_function:
+ return error;
+
+}
+
+/**
+ * sep_free_dcb_handler - free control block resources
+ * @sep: pointer to struct sep_device
+ *
+ * This function frees the DCB resources and updates the needed
+ * user-space buffers.
+ */
+static int sep_free_dcb_handler(struct sep_device *sep,
+ struct sep_dma_context **dma_ctx)
+{
+ if (!dma_ctx || !(*dma_ctx)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] no dma context defined, nothing to free\n",
+ current->pid);
+ return -EINVAL;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
+ current->pid,
+ (*dma_ctx)->nr_dcb_creat);
+
+ return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
+}
+
+/**
+ * sep_ioctl - ioctl handler for sep device
+ * @filp: pointer to struct file
+ * @cmd: command
+ * @arg: pointer to argument structure
+ *
+ * Implement the ioctl methods availble on the SEP device.
+ */
+static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+ int error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
+ current->pid, cmd);
+ dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
+ current->pid, *dma_ctx);
+
+ /* Make sure we own this device */
+ error = sep_check_transaction_owner(sep);
+ if (error) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
+ current->pid);
+ goto end_function;
+ }
+
+ /* Check that sep_mmap has been called before */
+ if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
+ &call_status->status)) {
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] mmap not called\n", current->pid);
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ /* Check that the command is for SEP device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -ENOTTY;
+ goto end_function;
+ }
+
+ switch (cmd) {
+ case SEP_IOCSENDSEPCOMMAND:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
+ current->pid);
+ if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] send msg already done\n",
+ current->pid);
+ error = -EPROTO;
+ goto end_function;
+ }
+ /* Send command to SEP */
+ error = sep_send_command_handler(sep);
+ if (!error)
+ set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
+ current->pid);
+ break;
+ case SEP_IOCENDTRANSACTION:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCENDTRANSACTION start\n",
+ current->pid);
+ error = sep_end_transaction_handler(sep, dma_ctx, call_status,
+ my_queue_elem);
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCENDTRANSACTION end\n",
+ current->pid);
+ break;
+ case SEP_IOCPREPAREDCB:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCPREPAREDCB start\n",
+ current->pid);
+ case SEP_IOCPREPAREDCB_SECURE_DMA:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
+ current->pid);
+ if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dcb prep needed before send msg\n",
+ current->pid);
+ error = -EPROTO;
+ goto end_function;
+ }
+
+ if (!arg) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dcb null arg\n", current->pid);
+ error = EINVAL;
+ goto end_function;
+ }
+
+ if (cmd == SEP_IOCPREPAREDCB) {
+ /* No secure dma */
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
+ current->pid);
+
+ error = sep_prepare_dcb_handler(sep, arg, false,
+ dma_ctx);
+ } else {
+ /* Secure dma */
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOC_POC (with secure_dma)\n",
+ current->pid);
+
+ error = sep_prepare_dcb_handler(sep, arg, true,
+ dma_ctx);
+ }
+ dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
+ current->pid);
+ break;
+ case SEP_IOCFREEDCB:
+ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
+ current->pid);
+ case SEP_IOCFREEDCB_SECURE_DMA:
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
+ current->pid);
+ error = sep_free_dcb_handler(sep, dma_ctx);
+ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
+ current->pid);
+ break;
+ default:
+ error = -ENOTTY;
+ dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
+ current->pid);
+ break;
+ }
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
+
+ return error;
+}
+
+/**
+ * sep_inthandler - interrupt handler for sep device
+ * @irq: interrupt
+ * @dev_id: device id
+ */
+static irqreturn_t sep_inthandler(int irq, void *dev_id)
+{
+ unsigned long lock_irq_flag;
+ u32 reg_val, reg_val2 = 0;
+ struct sep_device *sep = dev_id;
+ irqreturn_t int_error = IRQ_HANDLED;
+
+ /* Are we in power save? */
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
+ if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
+ dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
+ return IRQ_NONE;
+ }
+#endif
+
+ if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
+ dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
+ return IRQ_NONE;
+ }
+
+ /* Read the IRR register to check if this is SEP interrupt */
+ reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
+
+ dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
+
+ if (reg_val & (0x1 << 13)) {
+
+ /* Lock and update the counter of reply messages */
+ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
+ sep->reply_ct++;
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
+
+ dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
+ sep->send_ct, sep->reply_ct);
+
+ /* Is this a kernel client request */
+ if (sep->in_kernel) {
+ tasklet_schedule(&sep->finish_tasklet);
+ goto finished_interrupt;
+ }
+
+ /* Is this printf or daemon request? */
+ reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ dev_dbg(&sep->pdev->dev,
+ "SEP Interrupt - GPR2 is %08x\n", reg_val2);
+
+ clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
+
+ if ((reg_val2 >> 30) & 0x1) {
+ dev_dbg(&sep->pdev->dev, "int: printf request\n");
+ } else if (reg_val2 >> 31) {
+ dev_dbg(&sep->pdev->dev, "int: daemon request\n");
+ } else {
+ dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
+ wake_up(&sep->event_interrupt);
+ }
+ } else {
+ dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
+ int_error = IRQ_NONE;
+ }
+
+finished_interrupt:
+
+ if (int_error == IRQ_HANDLED)
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
+
+ return int_error;
+}
+
+/**
+ * sep_reconfig_shared_area - reconfigure shared area
+ * @sep: pointer to struct sep_device
+ *
+ * Reconfig the shared area between HOST and SEP - needed in case
+ * the DX_CC_Init function was called before OS loading.
+ */
+static int sep_reconfig_shared_area(struct sep_device *sep)
+{
+ int ret_val;
+
+ /* use to limit waiting for SEP */
+ unsigned long end_time;
+
+ /* Send the new SHARED MESSAGE AREA to the SEP */
+ dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
+ (unsigned long long)sep->shared_bus);
+
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
+
+ /* Poll for SEP response */
+ ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+ end_time = jiffies + (WAIT_TIME * HZ);
+
+ while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
+ (ret_val != sep->shared_bus))
+ ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+ /* Check the return value (register) */
+ if (ret_val != sep->shared_bus) {
+ dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
+ dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
+ ret_val = -ENOMEM;
+ } else
+ ret_val = 0;
+
+ dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
+
+ return ret_val;
+}
+
+/**
+ * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
+ * contexts into use
+ * @sep: SEP device
+ * @dcb_region: DCB region copy
+ * @dmatables_region: MLLI/DMA tables copy
+ * @dma_ctx: DMA context for current transaction
+ */
+ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context *dma_ctx)
+{
+ void *dmaregion_free_start = NULL;
+ void *dmaregion_free_end = NULL;
+ void *dcbregion_free_start = NULL;
+ void *dcbregion_free_end = NULL;
+ ssize_t error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
+ current->pid);
+
+ if (1 > dma_ctx->nr_dcb_creat) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid number of dcbs to activate 0x%08X\n",
+ current->pid, dma_ctx->nr_dcb_creat);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dmaregion_free_start = sep->shared_addr
+ + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
+ dmaregion_free_end = dmaregion_free_start
+ + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
+
+ if (dmaregion_free_start
+ + dma_ctx->dmatables_len > dmaregion_free_end) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+ memcpy(dmaregion_free_start,
+ *dmatables_region,
+ dma_ctx->dmatables_len);
+ /* Free MLLI table copy */
+ kfree(*dmatables_region);
+ *dmatables_region = NULL;
+
+ /* Copy thread's DCB table copy to DCB table region */
+ dcbregion_free_start = sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
+ dcbregion_free_end = dcbregion_free_start +
+ (SEP_MAX_NUM_SYNC_DMA_OPS *
+ sizeof(struct sep_dcblock)) - 1;
+
+ if (dcbregion_free_start
+ + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
+ > dcbregion_free_end) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ memcpy(dcbregion_free_start,
+ *dcb_region,
+ dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
+
+ /* Print the tables */
+ dev_dbg(&sep->pdev->dev, "activate: input table\n");
+ sep_debug_print_lli_tables(sep,
+ (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
+ (*dcb_region)->input_mlli_address),
+ (*dcb_region)->input_mlli_num_entries,
+ (*dcb_region)->input_mlli_data_size);
+
+ dev_dbg(&sep->pdev->dev, "activate: output table\n");
+ sep_debug_print_lli_tables(sep,
+ (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
+ (*dcb_region)->output_mlli_address),
+ (*dcb_region)->output_mlli_num_entries,
+ (*dcb_region)->output_mlli_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "[PID%d] printing activated tables\n", current->pid);
+
+end_function:
+ kfree(*dmatables_region);
+ *dmatables_region = NULL;
+
+ kfree(*dcb_region);
+ *dcb_region = NULL;
+
+ return error;
+}
+
+/**
+ * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
+ * @sep: SEP device
+ * @dcb_region: DCB region buf to create for current transaction
+ * @dmatables_region: MLLI/DMA tables buf to create for current transaction
+ * @dma_ctx: DMA context buf to create for current transaction
+ * @user_dcb_args: User arguments for DCB/MLLI creation
+ * @num_dcbs: Number of DCBs to create
+ * @secure_dma: Indicate use of IMR restricted memory secure dma
+ */
+static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ const struct build_dcb_struct __user *user_dcb_args,
+ const u32 num_dcbs, bool secure_dma)
+{
+ int error = 0;
+ int i = 0;
+ struct build_dcb_struct *dcb_args = NULL;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
+ current->pid);
+
+ if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid number of dcbs 0x%08X\n",
+ current->pid, num_dcbs);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct),
+ GFP_KERNEL);
+ if (!dcb_args) {
+ dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ if (copy_from_user(dcb_args,
+ user_dcb_args,
+ num_dcbs * sizeof(struct build_dcb_struct))) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* Allocate thread-specific memory for DCB */
+ *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
+ GFP_KERNEL);
+ if (!(*dcb_region)) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* Prepare DCB and MLLI table into the allocated regions */
+ for (i = 0; i < num_dcbs; i++) {
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)dcb_args[i].app_in_address,
+ (unsigned long)dcb_args[i].app_out_address,
+ dcb_args[i].data_in_size,
+ dcb_args[i].block_size,
+ dcb_args[i].tail_block_size,
+ dcb_args[i].is_applet,
+ false, secure_dma,
+ *dcb_region, dmatables_region,
+ dma_ctx,
+ NULL,
+ NULL);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dma table creation failed\n",
+ current->pid);
+ goto end_function;
+ }
+
+ if (dcb_args[i].app_in_address != 0)
+ (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
+ }
+
+end_function:
+ kfree(dcb_args);
+ return error;
+
+}
+
+/**
+ * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
+ * for kernel crypto
+ * @sep: SEP device
+ * @dcb_region: DCB region buf to create for current transaction
+ * @dmatables_region: MLLI/DMA tables buf to create for current transaction
+ * @dma_ctx: DMA context buf to create for current transaction
+ * @user_dcb_args: User arguments for DCB/MLLI creation
+ * @num_dcbs: Number of DCBs to create
+ * This does that same thing as sep_create_dcb_dmatables_context
+ * except that it is used only for the kernel crypto operation. It is
+ * separate because there is no user data involved; the dcb data structure
+ * is specific for kernel crypto (build_dcb_struct_kernel)
+ */
+int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
+ struct sep_dcblock **dcb_region,
+ void **dmatables_region,
+ struct sep_dma_context **dma_ctx,
+ const struct build_dcb_struct_kernel *dcb_data,
+ const u32 num_dcbs)
+{
+ int error = 0;
+ int i = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
+ current->pid);
+
+ if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid number of dcbs 0x%08X\n",
+ current->pid, num_dcbs);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
+ current->pid, num_dcbs);
+
+ /* Allocate thread-specific memory for DCB */
+ *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
+ GFP_KERNEL);
+ if (!(*dcb_region)) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* Prepare DCB and MLLI table into the allocated regions */
+ for (i = 0; i < num_dcbs; i++) {
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)dcb_data->app_in_address,
+ (unsigned long)dcb_data->app_out_address,
+ dcb_data->data_in_size,
+ dcb_data->block_size,
+ dcb_data->tail_block_size,
+ dcb_data->is_applet,
+ true,
+ false,
+ *dcb_region, dmatables_region,
+ dma_ctx,
+ dcb_data->src_sg,
+ dcb_data->dst_sg);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] dma table creation failed\n",
+ current->pid);
+ goto end_function;
+ }
+ }
+
+end_function:
+ return error;
+
+}
+
+/**
+ * sep_activate_msgarea_context - Takes the message area context into use
+ * @sep: SEP device
+ * @msg_region: Message area context buf
+ * @msg_len: Message area context buffer size
+ */
+static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
+ void **msg_region,
+ const size_t msg_len)
+{
+ dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
+ current->pid);
+
+ if (!msg_region || !(*msg_region) ||
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid act msgarea len 0x%08zX\n",
+ current->pid, msg_len);
+ return -EINVAL;
+ }
+
+ memcpy(sep->shared_addr, *msg_region, msg_len);
+
+ return 0;
+}
+
+/**
+ * sep_create_msgarea_context - Creates message area context
+ * @sep: SEP device
+ * @msg_region: Msg area region buf to create for current transaction
+ * @msg_user: Content for msg area region from user
+ * @msg_len: Message area size
+ */
+static ssize_t sep_create_msgarea_context(struct sep_device *sep,
+ void **msg_region,
+ const void __user *msg_user,
+ const size_t msg_len)
+{
+ int error = 0;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
+ current->pid);
+
+ if (!msg_region ||
+ !msg_user ||
+ SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
+ SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid creat msgarea len 0x%08zX\n",
+ current->pid, msg_len);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* Allocate thread-specific memory for message buffer */
+ *msg_region = kzalloc(msg_len, GFP_KERNEL);
+ if (!(*msg_region)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] no mem for msgarea context\n",
+ current->pid);
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* Copy input data to write() to allocated message buffer */
+ if (copy_from_user(*msg_region, msg_user, msg_len)) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+end_function:
+ if (error && msg_region) {
+ kfree(*msg_region);
+ *msg_region = NULL;
+ }
+
+ return error;
+}
+
+
+/**
+ * sep_read - Returns results of an operation for fastcall interface
+ * @filp: File pointer
+ * @buf_user: User buffer for storing results
+ * @count_user: User buffer size
+ * @offset: File offset, not supported
+ *
+ * The implementation does not support reading in chunks, all data must be
+ * consumed during a single read system call.
+ */
+static ssize_t sep_read(struct file *filp,
+ char __user *buf_user, size_t count_user,
+ loff_t *offset)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
+ struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
+ ssize_t error = 0, error_tmp = 0;
+
+ /* Am I the process that owns the transaction? */
+ error = sep_check_transaction_owner(sep);
+ if (error) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
+ current->pid);
+ goto end_function;
+ }
+
+ /* Checks that user has called necessarry apis */
+ if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
+ &call_status->status)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] fastcall write not called\n",
+ current->pid);
+ error = -EPROTO;
+ goto end_function_error;
+ }
+
+ if (!buf_user) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] null user buffer\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function_error;
+ }
+
+
+ /* Wait for SEP to finish */
+ wait_event(sep->event_interrupt,
+ test_bit(SEP_WORKING_LOCK_BIT,
+ &sep->in_use_flags) == 0);
+
+ sep_dump_message(sep);
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
+ current->pid, count_user);
+
+ /* In case user has allocated bigger buffer */
+ if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
+ count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
+
+ if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
+ error = -EFAULT;
+ goto end_function_error;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
+ error = count_user;
+
+end_function_error:
+ /* Copy possible tail data to user and free DCB and MLLIs */
+ error_tmp = sep_free_dcb_handler(sep, dma_ctx);
+ if (error_tmp)
+ dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
+ current->pid);
+
+ /* End the transaction, wakeup pending ones */
+ error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
+ my_queue_elem);
+ if (error_tmp)
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] ending transaction failed\n",
+ current->pid);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_fastcall_args_get - Gets fastcall params from user
+ * sep: SEP device
+ * @args: Parameters buffer
+ * @buf_user: User buffer for operation parameters
+ * @count_user: User buffer size
+ */
+static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
+ struct sep_fastcall_hdr *args,
+ const char __user *buf_user,
+ const size_t count_user)
+{
+ ssize_t error = 0;
+ size_t actual_count = 0;
+
+ if (!buf_user) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] null user buffer\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (count_user < sizeof(struct sep_fastcall_hdr)) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] too small message size 0x%08zX\n",
+ current->pid, count_user);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+
+ if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ if (SEP_FC_MAGIC != args->magic) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid fastcall magic 0x%08X\n",
+ current->pid, args->magic);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
+ current->pid, args->num_dcbs);
+ dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
+ current->pid, args->msg_len);
+
+ if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
+ SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] invalid message length\n",
+ current->pid);
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ actual_count = sizeof(struct sep_fastcall_hdr)
+ + args->msg_len
+ + (args->num_dcbs * sizeof(struct build_dcb_struct));
+
+ if (actual_count != count_user) {
+ dev_warn(&sep->pdev->dev,
+ "[PID%d] inconsistent message "
+ "sizes 0x%08zX vs 0x%08zX\n",
+ current->pid, actual_count, count_user);
+ error = -EMSGSIZE;
+ goto end_function;
+ }
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_write - Starts an operation for fastcall interface
+ * @filp: File pointer
+ * @buf_user: User buffer for operation parameters
+ * @count_user: User buffer size
+ * @offset: File offset, not supported
+ *
+ * The implementation does not support writing in chunks,
+ * all data must be given during a single write system call.
+ */
+static ssize_t sep_write(struct file *filp,
+ const char __user *buf_user, size_t count_user,
+ loff_t *offset)
+{
+ struct sep_private_data * const private_data = filp->private_data;
+ struct sep_call_status *call_status = &private_data->call_status;
+ struct sep_device *sep = private_data->device;
+ struct sep_dma_context *dma_ctx = NULL;
+ struct sep_fastcall_hdr call_hdr = {0};
+ void *msg_region = NULL;
+ void *dmatables_region = NULL;
+ struct sep_dcblock *dcb_region = NULL;
+ ssize_t error = 0;
+ struct sep_queue_info *my_queue_elem = NULL;
+ bool my_secure_dma; /* are we using secure_dma (IMR)? */
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
+ current->pid, sep);
+ dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
+ current->pid, private_data);
+
+ error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
+ if (error)
+ goto end_function;
+
+ buf_user += sizeof(struct sep_fastcall_hdr);
+
+ if (call_hdr.secure_dma == 0)
+ my_secure_dma = false;
+ else
+ my_secure_dma = true;
+
+ /*
+ * Controlling driver memory usage by limiting amount of
+ * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
+ * of threads can progress further at a time
+ */
+ dev_dbg(&sep->pdev->dev, "[PID%d] waiting for double buffering "
+ "region access\n", current->pid);
+ error = down_interruptible(&sep->sep_doublebuf);
+ dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
+ current->pid);
+ if (error) {
+ /* Signal received */
+ goto end_function_error;
+ }
+
+
+ /*
+ * Prepare contents of the shared area regions for
+ * the operation into temporary buffers
+ */
+ if (0 < call_hdr.num_dcbs) {
+ error = sep_create_dcb_dmatables_context(sep,
+ &dcb_region,
+ &dmatables_region,
+ &dma_ctx,
+ (const struct build_dcb_struct __user *)
+ buf_user,
+ call_hdr.num_dcbs, my_secure_dma);
+ if (error)
+ goto end_function_error_doublebuf;
+
+ buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
+ }
+
+ error = sep_create_msgarea_context(sep,
+ &msg_region,
+ buf_user,
+ call_hdr.msg_len);
+ if (error)
+ goto end_function_error_doublebuf;
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
+ current->pid);
+ my_queue_elem = sep_queue_status_add(sep,
+ ((struct sep_msgarea_hdr *)msg_region)->opcode,
+ (dma_ctx) ? dma_ctx->input_data_len : 0,
+ current->pid,
+ current->comm, sizeof(current->comm));
+
+ if (!my_queue_elem) {
+ dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
+ "status error\n", current->pid);
+ error = -ENOMEM;
+ goto end_function_error_doublebuf;
+ }
+
+ /* Wait until current process gets the transaction */
+ error = sep_wait_transaction(sep);
+
+ if (error) {
+ /* Interrupted by signal, don't clear transaction */
+ dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
+ current->pid);
+ sep_queue_status_remove(sep, &my_queue_elem);
+ goto end_function_error_doublebuf;
+ }
+
+ dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
+ current->pid);
+ private_data->my_queue_elem = my_queue_elem;
+
+ /* Activate shared area regions for the transaction */
+ error = sep_activate_msgarea_context(sep, &msg_region,
+ call_hdr.msg_len);
+ if (error)
+ goto end_function_error_clear_transact;
+
+ sep_dump_message(sep);
+
+ if (0 < call_hdr.num_dcbs) {
+ error = sep_activate_dcb_dmatables_context(sep,
+ &dcb_region,
+ &dmatables_region,
+ dma_ctx);
+ if (error)
+ goto end_function_error_clear_transact;
+ }
+
+ /* Send command to SEP */
+ error = sep_send_command_handler(sep);
+ if (error)
+ goto end_function_error_clear_transact;
+
+ /* Store DMA context for the transaction */
+ private_data->dma_ctx = dma_ctx;
+ /* Update call status */
+ set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
+ error = count_user;
+
+ up(&sep->sep_doublebuf);
+ dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
+ current->pid);
+
+ goto end_function;
+
+end_function_error_clear_transact:
+ sep_end_transaction_handler(sep, &dma_ctx, call_status,
+ &private_data->my_queue_elem);
+
+end_function_error_doublebuf:
+ up(&sep->sep_doublebuf);
+ dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
+ current->pid);
+
+end_function_error:
+ if (dma_ctx)
+ sep_free_dma_table_data_handler(sep, &dma_ctx);
+
+end_function:
+ kfree(dcb_region);
+ kfree(dmatables_region);
+ kfree(msg_region);
+
+ return error;
+}
+/**
+ * sep_seek - Handler for seek system call
+ * @filp: File pointer
+ * @offset: File offset
+ * @origin: Options for offset
+ *
+ * Fastcall interface does not support seeking, all reads
+ * and writes are from/to offset zero
+ */
+static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
+{
+ return -ENOSYS;
+}
+
+
+
+/**
+ * sep_file_operations - file operation on sep device
+ * @sep_ioctl: ioctl handler from user space call
+ * @sep_poll: poll handler
+ * @sep_open: handles sep device open request
+ * @sep_release:handles sep device release request
+ * @sep_mmap: handles memory mapping requests
+ * @sep_read: handles read request on sep device
+ * @sep_write: handles write request on sep device
+ * @sep_seek: handles seek request on sep device
+ */
+static const struct file_operations sep_file_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_ioctl,
+ .poll = sep_poll,
+ .open = sep_open,
+ .release = sep_release,
+ .mmap = sep_mmap,
+ .read = sep_read,
+ .write = sep_write,
+ .llseek = sep_seek,
+};
+
+/**
+ * sep_sysfs_read - read sysfs entry per gives arguments
+ * @filp: file pointer
+ * @kobj: kobject pointer
+ * @attr: binary file attributes
+ * @buf: read to this buffer
+ * @pos: offset to read
+ * @count: amount of data to read
+ *
+ * This function is to read sysfs entries for sep driver per given arguments.
+ */
+static ssize_t
+sep_sysfs_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t pos, size_t count)
+{
+ unsigned long lck_flags;
+ size_t nleft = count;
+ struct sep_device *sep = sep_dev;
+ struct sep_queue_info *queue_elem = NULL;
+ u32 queue_num = 0;
+ u32 i = 1;
+
+ spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
+
+ queue_num = sep->sep_queue_num;
+ if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
+ queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
+
+
+ if (count < sizeof(queue_num)
+ + (queue_num * sizeof(struct sep_queue_data))) {
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+ return -EINVAL;
+ }
+
+ memcpy(buf, &queue_num, sizeof(queue_num));
+ buf += sizeof(queue_num);
+ nleft -= sizeof(queue_num);
+
+ list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
+ if (i++ > queue_num)
+ break;
+
+ memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
+ nleft -= sizeof(queue_elem->data);
+ buf += sizeof(queue_elem->data);
+ }
+ spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
+
+ return count - nleft;
+}
+
+/**
+ * bin_attributes - defines attributes for queue_status
+ * @attr: attributes (name & permissions)
+ * @read: function pointer to read this file
+ * @size: maxinum size of binary attribute
+ */
+static const struct bin_attribute queue_status = {
+ .attr = {.name = "queue_status", .mode = 0444},
+ .read = sep_sysfs_read,
+ .size = sizeof(u32)
+ + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
+};
+
+/**
+ * sep_register_driver_with_fs - register misc devices
+ * @sep: pointer to struct sep_device
+ *
+ * This function registers the driver with the file system
+ */
+static int sep_register_driver_with_fs(struct sep_device *sep)
+{
+ int ret_val;
+
+ sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
+ sep->miscdev_sep.name = SEP_DEV_NAME;
+ sep->miscdev_sep.fops = &sep_file_operations;
+
+ ret_val = misc_register(&sep->miscdev_sep);
+ if (ret_val) {
+ dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
+ ret_val);
+ return ret_val;
+ }
+
+ ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
+ &queue_status);
+ if (ret_val) {
+ dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
+ ret_val);
+ return ret_val;
+ }
+
+ return ret_val;
+}
+
+
+/**
+ *sep_probe - probe a matching PCI device
+ *@pdev: pci_device
+ *@ent: pci_device_id
+ *
+ *Attempt to set up and configure a SEP device that has been
+ *discovered by the PCI layer. Allocates all required resources.
+ */
+static int __devinit sep_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int error = 0;
+ struct sep_device *sep = NULL;
+
+ if (sep_dev != NULL) {
+ dev_dbg(&pdev->dev, "only one SEP supported.\n");
+ return -EBUSY;
+ }
+
+ /* Enable the device */
+ error = pci_enable_device(pdev);
+ if (error) {
+ dev_warn(&pdev->dev, "error enabling pci device\n");
+ goto end_function;
+ }
+
+ /* Allocate the sep_device structure for this device */
+ sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
+ if (sep_dev == NULL) {
+ dev_warn(&pdev->dev,
+ "can't kmalloc the sep_device structure\n");
+ error = -ENOMEM;
+ goto end_function_disable_device;
+ }
+
+ /*
+ * We're going to use another variable for actually
+ * working with the device; this way, if we have
+ * multiple devices in the future, it would be easier
+ * to make appropriate changes
+ */
+ sep = sep_dev;
+
+ sep->pdev = pci_dev_get(pdev);
+
+ init_waitqueue_head(&sep->event_transactions);
+ init_waitqueue_head(&sep->event_interrupt);
+ spin_lock_init(&sep->snd_rply_lck);
+ spin_lock_init(&sep->sep_queue_lock);
+ sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
+
+ INIT_LIST_HEAD(&sep->sep_queue_status);
+
+ dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, "
+ "device being prepared\n");
+
+ /* Set up our register area */
+ sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
+ if (!sep->reg_physical_addr) {
+ dev_warn(&sep->pdev->dev, "Error getting register start\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
+ if (!sep->reg_physical_end) {
+ dev_warn(&sep->pdev->dev, "Error getting register end\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
+ (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
+ if (!sep->reg_addr) {
+ dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ dev_dbg(&sep->pdev->dev,
+ "Register area start %llx end %llx virtual %p\n",
+ (unsigned long long)sep->reg_physical_addr,
+ (unsigned long long)sep->reg_physical_end,
+ sep->reg_addr);
+
+ /* Allocate the shared area */
+ sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
+
+ if (sep_map_and_alloc_shared_area(sep)) {
+ error = -ENOMEM;
+ /* Allocation failed */
+ goto end_function_error;
+ }
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ /* Get the interrupt line */
+ error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
+ "sep_driver", sep);
+
+ if (error)
+ goto end_function_deallocate_sep_shared_area;
+
+ /* The new chip requires a shared area reconfigure */
+ error = sep_reconfig_shared_area(sep);
+ if (error)
+ goto end_function_free_irq;
+
+ sep->in_use = 1;
+
+ /* Finally magic up the device nodes */
+ /* Register driver with the fs */
+ error = sep_register_driver_with_fs(sep);
+
+ if (error) {
+ dev_err(&sep->pdev->dev, "error registering dev file\n");
+ goto end_function_free_irq;
+ }
+
+ sep->in_use = 0; /* through touching the device */
+#ifdef SEP_ENABLE_RUNTIME_PM
+ pm_runtime_put_noidle(&sep->pdev->dev);
+ pm_runtime_allow(&sep->pdev->dev);
+ pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
+ SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&sep->pdev->dev);
+ pm_runtime_mark_last_busy(&sep->pdev->dev);
+ sep->power_save_setup = 1;
+#endif
+ /* register kernel crypto driver */
+#if defined(CONFIG_CRYPTO)
+ error = sep_crypto_setup();
+ if (error) {
+ dev_err(&sep->pdev->dev, "crypto setup failed\n");
+ goto end_function_free_irq;
+ }
+#endif
+ goto end_function;
+
+end_function_free_irq:
+ free_irq(pdev->irq, sep);
+
+end_function_deallocate_sep_shared_area:
+ /* De-allocate shared area */
+ sep_unmap_and_free_shared_area(sep);
+
+end_function_error:
+ iounmap(sep->reg_addr);
+
+end_function_free_sep_dev:
+ pci_dev_put(sep_dev->pdev);
+ kfree(sep_dev);
+ sep_dev = NULL;
+
+end_function_disable_device:
+ pci_disable_device(pdev);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_remove - handles removing device from pci subsystem
+ * @pdev: pointer to pci device
+ *
+ * This function will handle removing our sep device from pci subsystem on exit
+ * or unloading this module. It should free up all used resources, and unmap if
+ * any memory regions mapped.
+ */
+static void sep_remove(struct pci_dev *pdev)
+{
+ struct sep_device *sep = sep_dev;
+
+ /* Unregister from fs */
+ misc_deregister(&sep->miscdev_sep);
+
+ /* Unregister from kernel crypto */
+ sep_crypto_takedown();
+
+ /* Free the irq */
+ free_irq(sep->pdev->irq, sep);
+
+ /* Free the shared area */
+ sep_unmap_and_free_shared_area(sep_dev);
+ iounmap(sep_dev->reg_addr);
+
+#ifdef SEP_ENABLE_RUNTIME_PM
+ if (sep->in_use) {
+ sep->in_use = 0;
+ pm_runtime_forbid(&sep->pdev->dev);
+ pm_runtime_get_noresume(&sep->pdev->dev);
+ }
+#endif
+ pci_dev_put(sep_dev->pdev);
+ kfree(sep_dev);
+ sep_dev = NULL;
+}
+
+/* Initialize struct pci_device_id for our driver */
+static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
+ {0}
+};
+
+/* Export our pci_device_id structure to user space */
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+#ifdef SEP_ENABLE_RUNTIME_PM
+
+/**
+ * sep_pm_resume - rsume routine while waking up from S3 state
+ * @dev: pointer to sep device
+ *
+ * This function is to be used to wake up sep driver while system awakes from S3
+ * state i.e. suspend to ram. The RAM in intact.
+ * Notes - revisit with more understanding of pm, ICR/IMR & counters.
+ */
+static int sep_pci_resume(struct device *dev)
+{
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pci resume called\n");
+
+ if (sep->power_state == SEP_DRIVER_POWERON)
+ return 0;
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ sep->power_state = SEP_DRIVER_POWERON;
+
+ return 0;
+}
+
+/**
+ * sep_pm_suspend - suspend routine while going to S3 state
+ * @dev: pointer to sep device
+ *
+ * This function is to be used to suspend sep driver while system goes to S3
+ * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
+ * Notes - revisit with more understanding of pm, ICR/IMR
+ */
+static int sep_pci_suspend(struct device *dev)
+{
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pci suspend called\n");
+ if (sep->in_use == 1)
+ return -EAGAIN;
+
+ sep->power_state = SEP_DRIVER_POWEROFF;
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR to block all */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
+
+ return 0;
+}
+
+/**
+ * sep_pm_runtime_resume - runtime resume routine
+ * @dev: pointer to sep device
+ *
+ * Notes - revisit with more understanding of pm, ICR/IMR & counters
+ */
+static int sep_pm_runtime_resume(struct device *dev)
+{
+
+ u32 retval2;
+ u32 delay_count;
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
+
+ /**
+ * Wait until the SCU boot is ready
+ * This is done by iterating SCU_DELAY_ITERATION (10
+ * microseconds each) up to SCU_DELAY_MAX (50) times.
+ * This bit can be set in a random time that is less
+ * than 500 microseconds after each power resume
+ */
+ retval2 = 0;
+ delay_count = 0;
+ while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
+ retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ retval2 &= 0x00000008;
+ if (!retval2) {
+ udelay(SCU_DELAY_ITERATION);
+ delay_count += 1;
+ }
+ }
+
+ if (!retval2) {
+ dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
+ return -EINVAL;
+ }
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ return 0;
+}
+
+/**
+ * sep_pm_runtime_suspend - runtime suspend routine
+ * @dev: pointer to sep device
+ *
+ * Notes - revisit with more understanding of pm
+ */
+static int sep_pm_runtime_suspend(struct device *dev)
+{
+ struct sep_device *sep = sep_dev;
+
+ dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+ return 0;
+}
+
+/**
+ * sep_pm - power management for sep driver
+ * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
+ * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
+ * @sep_pci_suspend: suspend - main memory is still ON
+ * @sep_pci_resume: resume - main meory is still ON
+ */
+static const struct dev_pm_ops sep_pm = {
+ .runtime_resume = sep_pm_runtime_resume,
+ .runtime_suspend = sep_pm_runtime_suspend,
+ .resume = sep_pci_resume,
+ .suspend = sep_pci_suspend,
+};
+#endif /* SEP_ENABLE_RUNTIME_PM */
+
+/**
+ * sep_pci_driver - registers this device with pci subsystem
+ * @name: name identifier for this driver
+ * @sep_pci_id_tbl: pointer to struct pci_device_id table
+ * @sep_probe: pointer to probe function in PCI driver
+ * @sep_remove: pointer to remove function in PCI driver
+ */
+static struct pci_driver sep_pci_driver = {
+#ifdef SEP_ENABLE_RUNTIME_PM
+ .driver = {
+ .pm = &sep_pm,
+ },
+#endif
+ .name = "sep_sec_driver",
+ .id_table = sep_pci_id_tbl,
+ .probe = sep_probe,
+ .remove = sep_remove
+};
+
+/**
+ * sep_init - init function
+ *
+ * Module load time. Register the PCI device driver.
+ */
+
+static int __init sep_init(void)
+{
+ return pci_register_driver(&sep_pci_driver);
+}
+
+
+/**
+ * sep_exit - called to unload driver
+ *
+ * Unregister the driver The device will perform all the cleanup required.
+ */
+static void __exit sep_exit(void)
+{
+ pci_unregister_driver(&sep_pci_driver);
+}
+
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_trace_events.h b/drivers/staging/sep/sep_trace_events.h
new file mode 100644
index 000000000000..2b053a93afe6
--- /dev/null
+++ b/drivers/staging/sep/sep_trace_events.h
@@ -0,0 +1,188 @@
+/*
+ * If TRACE_SYSTEM is defined, that will be the directory created
+ * in the ftrace directory under /sys/kernel/debug/tracing/events/<system>
+ *
+ * The define_trace.h below will also look for a file name of
+ * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
+ * In this case, it would look for sample.h
+ *
+ * If the header name will be different than the system name
+ * (as in this case), then you can override the header name that
+ * define_trace.h will look up by defining TRACE_INCLUDE_FILE
+ *
+ * This file is called trace-events-sample.h but we want the system
+ * to be called "sample". Therefore we must define the name of this
+ * file:
+ *
+ * #define TRACE_INCLUDE_FILE trace-events-sample
+ *
+ * As we do an the bottom of this file.
+ *
+ * Notice that TRACE_SYSTEM should be defined outside of #if
+ * protection, just like TRACE_INCLUDE_FILE.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sep
+
+/*
+ * Notice that this file is not protected like a normal header.
+ * We also must allow for rereading of this file. The
+ *
+ * || defined(TRACE_HEADER_MULTI_READ)
+ *
+ * serves this purpose.
+ */
+#if !defined(_TRACE_SEP_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SEP_EVENTS_H
+
+#ifdef SEP_PERF_DEBUG
+#define SEP_TRACE_FUNC_IN() trace_sep_func_start(__func__, 0)
+#define SEP_TRACE_FUNC_OUT(branch) trace_sep_func_end(__func__, branch)
+#define SEP_TRACE_EVENT(branch) trace_sep_misc_event(__func__, branch)
+#else
+#define SEP_TRACE_FUNC_IN()
+#define SEP_TRACE_FUNC_OUT(branch)
+#define SEP_TRACE_EVENT(branch)
+#endif
+
+
+/*
+ * All trace headers should include tracepoint.h, until we finally
+ * make it into a standard header.
+ */
+#include <linux/tracepoint.h>
+
+/*
+ * The TRACE_EVENT macro is broken up into 5 parts.
+ *
+ * name: name of the trace point. This is also how to enable the tracepoint.
+ * A function called trace_foo_bar() will be created.
+ *
+ * proto: the prototype of the function trace_foo_bar()
+ * Here it is trace_foo_bar(char *foo, int bar).
+ *
+ * args: must match the arguments in the prototype.
+ * Here it is simply "foo, bar".
+ *
+ * struct: This defines the way the data will be stored in the ring buffer.
+ * There are currently two types of elements. __field and __array.
+ * a __field is broken up into (type, name). Where type can be any
+ * type but an array.
+ * For an array. there are three fields. (type, name, size). The
+ * type of elements in the array, the name of the field and the size
+ * of the array.
+ *
+ * __array( char, foo, 10) is the same as saying char foo[10].
+ *
+ * fast_assign: This is a C like function that is used to store the items
+ * into the ring buffer.
+ *
+ * printk: This is a way to print out the data in pretty print. This is
+ * useful if the system crashes and you are logging via a serial line,
+ * the data can be printed to the console using this "printk" method.
+ *
+ * Note, that for both the assign and the printk, __entry is the handler
+ * to the data structure in the ring buffer, and is defined by the
+ * TP_STRUCT__entry.
+ */
+TRACE_EVENT(sep_func_start,
+
+ TP_PROTO(const char *name, int branch),
+
+ TP_ARGS(name, branch),
+
+ TP_STRUCT__entry(
+ __array(char, name, 20)
+ __field(int, branch)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, name, 20);
+ __entry->branch = branch;
+ ),
+
+ TP_printk("func_start %s %d", __entry->name, __entry->branch)
+);
+
+TRACE_EVENT(sep_func_end,
+
+ TP_PROTO(const char *name, int branch),
+
+ TP_ARGS(name, branch),
+
+ TP_STRUCT__entry(
+ __array(char, name, 20)
+ __field(int, branch)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, name, 20);
+ __entry->branch = branch;
+ ),
+
+ TP_printk("func_end %s %d", __entry->name, __entry->branch)
+);
+
+TRACE_EVENT(sep_misc_event,
+
+ TP_PROTO(const char *name, int branch),
+
+ TP_ARGS(name, branch),
+
+ TP_STRUCT__entry(
+ __array(char, name, 20)
+ __field(int, branch)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, name, 20);
+ __entry->branch = branch;
+ ),
+
+ TP_printk("misc_event %s %d", __entry->name, __entry->branch)
+);
+
+
+#endif
+
+/***** NOTICE! The #if protection ends here. *****/
+
+
+/*
+ * There are several ways I could have done this. If I left out the
+ * TRACE_INCLUDE_PATH, then it would default to the kernel source
+ * include/trace/events directory.
+ *
+ * I could specify a path from the define_trace.h file back to this
+ * file.
+ *
+ * #define TRACE_INCLUDE_PATH ../../samples/trace_events
+ *
+ * But the safest and easiest way to simply make it use the directory
+ * that the file is in is to add in the Makefile:
+ *
+ * CFLAGS_trace-events-sample.o := -I$(src)
+ *
+ * This will make sure the current path is part of the include
+ * structure for our file so that define_trace.h can find it.
+ *
+ * I could have made only the top level directory the include:
+ *
+ * CFLAGS_trace-events-sample.o := -I$(PWD)
+ *
+ * And then let the path to this directory be the TRACE_INCLUDE_PATH:
+ *
+ * #define TRACE_INCLUDE_PATH samples/trace_events
+ *
+ * But then if something defines "samples" or "trace_events" as a macro
+ * then we could risk that being converted too, and give us an unexpected
+ * result.
+ */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+/*
+ * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
+ */
+#define TRACE_INCLUDE_FILE sep_trace_events
+#include <trace/define_trace.h>
diff --git a/drivers/staging/telephony/Kconfig b/drivers/staging/telephony/Kconfig
new file mode 100644
index 000000000000..b5f78b6ed2bd
--- /dev/null
+++ b/drivers/staging/telephony/Kconfig
@@ -0,0 +1,47 @@
+#
+# Telephony device configuration
+#
+
+menuconfig PHONE
+ tristate "Telephony support"
+ depends on HAS_IOMEM
+ ---help---
+ Say Y here if you have a telephony card, which for example allows
+ you to use a regular phone for voice-over-IP applications.
+
+ Note: this has nothing to do with modems. You do not need to say Y
+ here in order to be able to use a modem under Linux.
+
+ To compile this driver as a module, choose M here: the
+ module will be called phonedev.
+
+if PHONE
+
+config PHONE_IXJ
+ tristate "QuickNet Internet LineJack/PhoneJack support"
+ depends on ISA || PCI
+ ---help---
+ Say M if you have a telephony card manufactured by Quicknet
+ Technologies, Inc. These include the Internet PhoneJACK and
+ Internet LineJACK Telephony Cards. You will get a module called
+ ixj.
+
+ For the ISA versions of these products, you can configure the
+ cards using the isapnp tools (pnpdump/isapnp) or you can use the
+ isapnp support. Please read <file:Documentation/telephony/ixj.txt>.
+
+ For more information on these cards, see Quicknet's web site at:
+ <http://www.quicknet.net/>.
+
+ If you do not have any Quicknet telephony cards, you can safely
+ say N here.
+
+config PHONE_IXJ_PCMCIA
+ tristate "QuickNet Internet LineJack/PhoneJack PCMCIA support"
+ depends on PHONE_IXJ && PCMCIA
+ help
+ Say Y here to configure in PCMCIA service support for the Quicknet
+ cards manufactured by Quicknet Technologies, Inc. This changes the
+ card initialization code to work with the card manager daemon.
+
+endif # PHONE
diff --git a/drivers/staging/telephony/Makefile b/drivers/staging/telephony/Makefile
new file mode 100644
index 000000000000..1206615d69e4
--- /dev/null
+++ b/drivers/staging/telephony/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for drivers/telephony
+#
+
+obj-$(CONFIG_PHONE) += phonedev.o
+obj-$(CONFIG_PHONE_IXJ) += ixj.o
+obj-$(CONFIG_PHONE_IXJ_PCMCIA) += ixj_pcmcia.o
diff --git a/drivers/staging/telephony/TODO b/drivers/staging/telephony/TODO
new file mode 100644
index 000000000000..d47dec3508d7
--- /dev/null
+++ b/drivers/staging/telephony/TODO
@@ -0,0 +1,10 @@
+TODO
+. Determine if the boards are still in use
+ and move this module back to drivers/telephony if necessary
+. Coding style cleanups
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+cc Joe Perches <joe@perches.com> if the module should be reactivated.
+
+If no module activity occurs before version 3.6 is released, this
+module should be removed.
diff --git a/drivers/staging/telephony/ixj-ver.h b/drivers/staging/telephony/ixj-ver.h
new file mode 100644
index 000000000000..2031ac6c888c
--- /dev/null
+++ b/drivers/staging/telephony/ixj-ver.h
@@ -0,0 +1,4 @@
+/* configuration management identifiers */
+#define IXJ_VER_MAJOR 1
+#define IXJ_VER_MINOR 0
+#define IXJ_BLD_VER 1
diff --git a/drivers/staging/telephony/ixj.c b/drivers/staging/telephony/ixj.c
new file mode 100644
index 000000000000..d5f923bcdffe
--- /dev/null
+++ b/drivers/staging/telephony/ixj.c
@@ -0,0 +1,10552 @@
+/****************************************************************************
+ * ixj.c
+ *
+ * Device Driver for Quicknet Technologies, Inc.'s Telephony cards
+ * including the Internet PhoneJACK, Internet PhoneJACK Lite,
+ * Internet PhoneJACK PCI, Internet LineJACK, Internet PhoneCARD and
+ * SmartCABLE
+ *
+ * (c) Copyright 1999-2001 Quicknet Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Ed Okerson, <eokerson@quicknet.net>
+ *
+ * Contributors: Greg Herlein, <gherlein@quicknet.net>
+ * David W. Erhart, <derhart@quicknet.net>
+ * John Sellers, <jsellers@quicknet.net>
+ * Mike Preston, <mpreston@quicknet.net>
+ *
+ * Fixes: David Huggins-Daines, <dhd@cepstral.com>
+ * Fabio Ferrari, <fabio.ferrari@digitro.com.br>
+ * Artis Kugevics, <artis@mt.lv>
+ * Daniele Bellucci, <bellucda@tiscali.it>
+ *
+ * More information about the hardware related to this driver can be found
+ * at our website: http://www.quicknet.net
+ *
+ * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF QUICKNET
+ * TECHNOLOGIES, INC. HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * QUICKNET TECHNOLOGIES, INC. SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION
+ * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ *
+ ***************************************************************************/
+
+/*
+ * Revision 4.8 2003/07/09 19:39:00 Daniele Bellucci
+ * Audit some copy_*_user and minor cleanup.
+ *
+ * Revision 4.7 2001/08/13 06:19:33 craigs
+ * Added additional changes from Alan Cox and John Anderson for
+ * 2.2 to 2.4 cleanup and bounds checking
+ *
+ * Revision 4.6 2001/08/13 01:05:05 craigs
+ * Really fixed PHONE_QUERY_CODEC problem this time
+ *
+ * Revision 4.5 2001/08/13 00:11:03 craigs
+ * Fixed problem in handling of PHONE_QUERY_CODEC, thanks to Shane Anderson
+ *
+ * Revision 4.4 2001/08/07 07:58:12 craigs
+ * Changed back to three digit version numbers
+ * Added tagbuild target to allow automatic and easy tagging of versions
+ *
+ * Revision 4.3 2001/08/07 07:24:47 craigs
+ * Added ixj-ver.h to allow easy configuration management of driver
+ * Added display of version number in /prox/ixj
+ *
+ * Revision 4.2 2001/08/06 07:07:19 craigs
+ * Reverted IXJCTL_DSP_TYPE and IXJCTL_DSP_VERSION files to original
+ * behaviour of returning int rather than short *
+ *
+ * Revision 4.1 2001/08/05 00:17:37 craigs
+ * More changes for correct PCMCIA installation
+ * Start of changes for backward Linux compatibility
+ *
+ * Revision 4.0 2001/08/04 12:33:12 craigs
+ * New version using GNU autoconf
+ *
+ * Revision 3.105 2001/07/20 23:14:32 eokerson
+ * More work on CallerID generation when using ring cadences.
+ *
+ * Revision 3.104 2001/07/06 01:33:55 eokerson
+ * Some bugfixes from Robert Vojta <vojta@ipex.cz> and a few mods to the Makefile.
+ *
+ * Revision 3.103 2001/07/05 19:20:16 eokerson
+ * Updated HOWTO
+ * Changed mic gain to 30dB on Internet LineJACK mic/speaker port.
+ *
+ * Revision 3.102 2001/07/03 23:51:21 eokerson
+ * Un-mute mic on Internet LineJACK when in speakerphone mode.
+ *
+ * Revision 3.101 2001/07/02 19:26:56 eokerson
+ * Removed initialiazation of ixjdebug and ixj_convert_loaded so they will go in the .bss instead of the .data
+ *
+ * Revision 3.100 2001/07/02 19:18:27 eokerson
+ * Changed driver to make dynamic allocation possible. We now pass IXJ * between functions instead of array indexes.
+ * Fixed the way the POTS and PSTN ports interact during a PSTN call to allow local answering.
+ * Fixed speaker mode on Internet LineJACK.
+ *
+ * Revision 3.99 2001/05/09 14:11:16 eokerson
+ * Fixed kmalloc error in ixj_build_filter_cadence. Thanks David Chan <cat@waulogy.stanford.edu>.
+ *
+ * Revision 3.98 2001/05/08 19:55:33 eokerson
+ * Fixed POTS hookstate detection while it is connected to PSTN port.
+ *
+ * Revision 3.97 2001/05/08 00:01:04 eokerson
+ * Fixed kernel oops when sending caller ID data.
+ *
+ * Revision 3.96 2001/05/04 23:09:30 eokerson
+ * Now uses one kernel timer for each card, instead of one for the entire driver.
+ *
+ * Revision 3.95 2001/04/25 22:06:47 eokerson
+ * Fixed squawking at beginning of some G.723.1 calls.
+ *
+ * Revision 3.94 2001/04/03 23:42:00 eokerson
+ * Added linear volume ioctls
+ * Added raw filter load ioctl
+ *
+ * Revision 3.93 2001/02/27 01:00:06 eokerson
+ * Fixed blocking in CallerID.
+ * Reduced size of ixj structure for smaller driver footprint.
+ *
+ * Revision 3.92 2001/02/20 22:02:59 eokerson
+ * Fixed isapnp and pcmcia module compatibility for 2.4.x kernels.
+ * Improved PSTN ring detection.
+ * Fixed wink generation on POTS ports.
+ *
+ * Revision 3.91 2001/02/13 00:55:44 eokerson
+ * Turn AEC back on after changing frame sizes.
+ *
+ * Revision 3.90 2001/02/12 16:42:00 eokerson
+ * Added ALAW codec, thanks to Fabio Ferrari for the table based converters to make ALAW from ULAW.
+ *
+ * Revision 3.89 2001/02/12 15:41:16 eokerson
+ * Fix from Artis Kugevics - Tone gains were not being set correctly.
+ *
+ * Revision 3.88 2001/02/05 23:25:42 eokerson
+ * Fixed lockup bugs with deregister.
+ *
+ * Revision 3.87 2001/01/29 21:00:39 eokerson
+ * Fix from Fabio Ferrari <fabio.ferrari@digitro.com.br> to properly handle EAGAIN and EINTR during non-blocking write.
+ * Updated copyright date.
+ *
+ * Revision 3.86 2001/01/23 23:53:46 eokerson
+ * Fixes to G.729 compatibility.
+ *
+ * Revision 3.85 2001/01/23 21:30:36 eokerson
+ * Added verbage about cards supported.
+ * Removed commands that put the card in low power mode at some times that it should not be in low power mode.
+ *
+ * Revision 3.84 2001/01/22 23:32:10 eokerson
+ * Some bugfixes from David Huggins-Daines, <dhd@cepstral.com> and other cleanups.
+ *
+ * Revision 3.83 2001/01/19 14:51:41 eokerson
+ * Fixed ixj_WriteDSPCommand to decrement usage counter when command fails.
+ *
+ * Revision 3.82 2001/01/19 00:34:49 eokerson
+ * Added verbosity to write overlap errors.
+ *
+ * Revision 3.81 2001/01/18 23:56:54 eokerson
+ * Fixed PSTN line test functions.
+ *
+ * Revision 3.80 2001/01/18 22:29:27 eokerson
+ * Updated AEC/AGC values for different cards.
+ *
+ * Revision 3.79 2001/01/17 02:58:54 eokerson
+ * Fixed AEC reset after Caller ID.
+ * Fixed Codec lockup after Caller ID on Call Waiting when not using 30ms frames.
+ *
+ * Revision 3.78 2001/01/16 19:43:09 eokerson
+ * Added support for Linux 2.4.x kernels.
+ *
+ * Revision 3.77 2001/01/09 04:00:52 eokerson
+ * Linetest will now test the line, even if it has previously succeeded.
+ *
+ * Revision 3.76 2001/01/08 19:27:00 eokerson
+ * Fixed problem with standard cable on Internet PhoneCARD.
+ *
+ * Revision 3.75 2000/12/22 16:52:14 eokerson
+ * Modified to allow hookstate detection on the POTS port when the PSTN port is selected.
+ *
+ * Revision 3.74 2000/12/08 22:41:50 eokerson
+ * Added capability for G729B.
+ *
+ * Revision 3.73 2000/12/07 23:35:16 eokerson
+ * Added capability to have different ring pattern before CallerID data.
+ * Added hookstate checks in CallerID routines to stop FSK.
+ *
+ * Revision 3.72 2000/12/06 19:31:31 eokerson
+ * Modified signal behavior to only send one signal per event.
+ *
+ * Revision 3.71 2000/12/06 03:23:08 eokerson
+ * Fixed CallerID on Call Waiting.
+ *
+ * Revision 3.70 2000/12/04 21:29:37 eokerson
+ * Added checking to Smart Cable gain functions.
+ *
+ * Revision 3.69 2000/12/04 21:05:20 eokerson
+ * Changed ixjdebug levels.
+ * Added ioctls to change gains in Internet Phone CARD Smart Cable.
+ *
+ * Revision 3.68 2000/12/04 00:17:21 craigs
+ * Changed mixer voice gain to +6dB rather than 0dB
+ *
+ * Revision 3.67 2000/11/30 21:25:51 eokerson
+ * Fixed write signal errors.
+ *
+ * Revision 3.66 2000/11/29 22:42:44 eokerson
+ * Fixed PSTN ring detect problems.
+ *
+ * Revision 3.65 2000/11/29 07:31:55 craigs
+ * Added new 425Hz filter co-efficients
+ * Added card-specific DTMF prescaler initialisation
+ *
+ * Revision 3.64 2000/11/28 14:03:32 craigs
+ * Changed certain mixer initialisations to be 0dB rather than 12dB
+ * Added additional information to /proc/ixj
+ *
+ * Revision 3.63 2000/11/28 11:38:41 craigs
+ * Added display of AEC modes in AUTO and AGC mode
+ *
+ * Revision 3.62 2000/11/28 04:05:44 eokerson
+ * Improved PSTN ring detection routine.
+ *
+ * Revision 3.61 2000/11/27 21:53:12 eokerson
+ * Fixed flash detection.
+ *
+ * Revision 3.60 2000/11/27 15:57:29 eokerson
+ * More work on G.729 load routines.
+ *
+ * Revision 3.59 2000/11/25 21:55:12 eokerson
+ * Fixed errors in G.729 load routine.
+ *
+ * Revision 3.58 2000/11/25 04:08:29 eokerson
+ * Added board locks around G.729 and TS85 load routines.
+ *
+ * Revision 3.57 2000/11/24 05:35:17 craigs
+ * Added ability to retrieve mixer values on LineJACK
+ * Added complete initialisation of all mixer values at startup
+ * Fixed spelling mistake
+ *
+ * Revision 3.56 2000/11/23 02:52:11 robertj
+ * Added cvs change log keyword.
+ * Fixed bug in capabilities list when using G.729 module.
+ *
+ */
+
+#include "ixj-ver.h"
+
+#define PERFMON_STATS
+#define IXJDEBUG 0
+#define MAXRINGS 5
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/poll.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <linux/isapnp.h>
+
+#include "ixj.h"
+
+#define TYPE(inode) (iminor(inode) >> 4)
+#define NUM(inode) (iminor(inode) & 0xf)
+
+static DEFINE_MUTEX(ixj_mutex);
+static int ixjdebug;
+static int hertz = HZ;
+static int samplerate = 100;
+
+module_param(ixjdebug, int, 0);
+
+static DEFINE_PCI_DEVICE_TABLE(ixj_pci_tbl) = {
+ { PCI_VENDOR_ID_QUICKNET, PCI_DEVICE_ID_QUICKNET_XJ,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, ixj_pci_tbl);
+
+/************************************************************************
+*
+* ixjdebug meanings are now bit mapped instead of level based
+* Values can be or'ed together to turn on multiple messages
+*
+* bit 0 (0x0001) = any failure
+* bit 1 (0x0002) = general messages
+* bit 2 (0x0004) = POTS ringing related
+* bit 3 (0x0008) = PSTN events
+* bit 4 (0x0010) = PSTN Cadence state details
+* bit 5 (0x0020) = Tone detection triggers
+* bit 6 (0x0040) = Tone detection cadence details
+* bit 7 (0x0080) = ioctl tracking
+* bit 8 (0x0100) = signal tracking
+* bit 9 (0x0200) = CallerID generation details
+*
+************************************************************************/
+
+#ifdef IXJ_DYN_ALLOC
+
+static IXJ *ixj[IXJMAX];
+#define get_ixj(b) ixj[(b)]
+
+/*
+ * Allocate a free IXJ device
+ */
+
+static IXJ *ixj_alloc()
+{
+ for(cnt=0; cnt<IXJMAX; cnt++)
+ {
+ if(ixj[cnt] == NULL || !ixj[cnt]->DSPbase)
+ {
+ j = kmalloc(sizeof(IXJ), GFP_KERNEL);
+ if (j == NULL)
+ return NULL;
+ ixj[cnt] = j;
+ return j;
+ }
+ }
+ return NULL;
+}
+
+static void ixj_fsk_free(IXJ *j)
+{
+ kfree(j->fskdata);
+ j->fskdata = NULL;
+}
+
+static void ixj_fsk_alloc(IXJ *j)
+{
+ if(!j->fskdata) {
+ j->fskdata = kmalloc(8000, GFP_KERNEL);
+ if (!j->fskdata) {
+ if(ixjdebug & 0x0200) {
+ printk("IXJ phone%d - allocate failed\n", j->board);
+ }
+ return;
+ } else {
+ j->fsksize = 8000;
+ if(ixjdebug & 0x0200) {
+ printk("IXJ phone%d - allocate succeeded\n", j->board);
+ }
+ }
+ }
+}
+
+#else
+
+static IXJ ixj[IXJMAX];
+#define get_ixj(b) (&ixj[(b)])
+
+/*
+ * Allocate a free IXJ device
+ */
+
+static IXJ *ixj_alloc(void)
+{
+ int cnt;
+ for(cnt=0; cnt<IXJMAX; cnt++) {
+ if(!ixj[cnt].DSPbase)
+ return &ixj[cnt];
+ }
+ return NULL;
+}
+
+static inline void ixj_fsk_free(IXJ *j) {;}
+
+static inline void ixj_fsk_alloc(IXJ *j)
+{
+ j->fsksize = 8000;
+}
+
+#endif
+
+#ifdef PERFMON_STATS
+#define ixj_perfmon(x) ((x)++)
+#else
+#define ixj_perfmon(x) do { } while(0)
+#endif
+
+static int ixj_convert_loaded;
+
+static int ixj_WriteDSPCommand(unsigned short, IXJ *j);
+
+/************************************************************************
+*
+* These are function definitions to allow external modules to register
+* enhanced functionality call backs.
+*
+************************************************************************/
+
+static int Stub(IXJ * J, unsigned long arg)
+{
+ return 0;
+}
+
+static IXJ_REGFUNC ixj_PreRead = &Stub;
+static IXJ_REGFUNC ixj_PostRead = &Stub;
+static IXJ_REGFUNC ixj_PreWrite = &Stub;
+static IXJ_REGFUNC ixj_PostWrite = &Stub;
+
+static void ixj_read_frame(IXJ *j);
+static void ixj_write_frame(IXJ *j);
+static void ixj_init_timer(IXJ *j);
+static void ixj_add_timer(IXJ * j);
+static void ixj_timeout(unsigned long ptr);
+static int read_filters(IXJ *j);
+static int LineMonitor(IXJ *j);
+static int ixj_fasync(int fd, struct file *, int mode);
+static int ixj_set_port(IXJ *j, int arg);
+static int ixj_set_pots(IXJ *j, int arg);
+static int ixj_hookstate(IXJ *j);
+static int ixj_record_start(IXJ *j);
+static void ixj_record_stop(IXJ *j);
+static void set_rec_volume(IXJ *j, int volume);
+static int get_rec_volume(IXJ *j);
+static int set_rec_codec(IXJ *j, int rate);
+static void ixj_vad(IXJ *j, int arg);
+static int ixj_play_start(IXJ *j);
+static void ixj_play_stop(IXJ *j);
+static int ixj_set_tone_on(unsigned short arg, IXJ *j);
+static int ixj_set_tone_off(unsigned short, IXJ *j);
+static int ixj_play_tone(IXJ *j, char tone);
+static void ixj_aec_start(IXJ *j, int level);
+static int idle(IXJ *j);
+static void ixj_ring_on(IXJ *j);
+static void ixj_ring_off(IXJ *j);
+static void aec_stop(IXJ *j);
+static void ixj_ringback(IXJ *j);
+static void ixj_busytone(IXJ *j);
+static void ixj_dialtone(IXJ *j);
+static void ixj_cpt_stop(IXJ *j);
+static char daa_int_read(IXJ *j);
+static char daa_CR_read(IXJ *j, int cr);
+static int daa_set_mode(IXJ *j, int mode);
+static int ixj_linetest(IXJ *j);
+static int ixj_daa_write(IXJ *j);
+static int ixj_daa_cid_read(IXJ *j);
+static void DAA_Coeff_US(IXJ *j);
+static void DAA_Coeff_UK(IXJ *j);
+static void DAA_Coeff_France(IXJ *j);
+static void DAA_Coeff_Germany(IXJ *j);
+static void DAA_Coeff_Australia(IXJ *j);
+static void DAA_Coeff_Japan(IXJ *j);
+static int ixj_init_filter(IXJ *j, IXJ_FILTER * jf);
+static int ixj_init_filter_raw(IXJ *j, IXJ_FILTER_RAW * jfr);
+static int ixj_init_tone(IXJ *j, IXJ_TONE * ti);
+static int ixj_build_cadence(IXJ *j, IXJ_CADENCE __user * cp);
+static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp);
+/* Serial Control Interface funtions */
+static int SCI_Control(IXJ *j, int control);
+static int SCI_Prepare(IXJ *j);
+static int SCI_WaitHighSCI(IXJ *j);
+static int SCI_WaitLowSCI(IXJ *j);
+static DWORD PCIEE_GetSerialNumber(WORD wAddress);
+static int ixj_PCcontrol_wait(IXJ *j);
+static void ixj_pre_cid(IXJ *j);
+static void ixj_write_cid(IXJ *j);
+static void ixj_write_cid_bit(IXJ *j, int bit);
+static int set_base_frame(IXJ *j, int size);
+static int set_play_codec(IXJ *j, int rate);
+static void set_rec_depth(IXJ *j, int depth);
+static int ixj_mixer(long val, IXJ *j);
+
+/************************************************************************
+CT8020/CT8021 Host Programmers Model
+Host address Function Access
+DSPbase +
+0-1 Aux Software Status Register (reserved) Read Only
+2-3 Software Status Register Read Only
+4-5 Aux Software Control Register (reserved) Read Write
+6-7 Software Control Register Read Write
+8-9 Hardware Status Register Read Only
+A-B Hardware Control Register Read Write
+C-D Host Transmit (Write) Data Buffer Access Port (buffer input)Write Only
+E-F Host Receive (Read) Data Buffer Access Port (buffer input) Read Only
+************************************************************************/
+
+static inline void ixj_read_HSR(IXJ *j)
+{
+ j->hsr.bytes.low = inb_p(j->DSPbase + 8);
+ j->hsr.bytes.high = inb_p(j->DSPbase + 9);
+}
+
+static inline int IsControlReady(IXJ *j)
+{
+ ixj_read_HSR(j);
+ return j->hsr.bits.controlrdy ? 1 : 0;
+}
+
+static inline int IsPCControlReady(IXJ *j)
+{
+ j->pccr1.byte = inb_p(j->XILINXbase + 3);
+ return j->pccr1.bits.crr ? 1 : 0;
+}
+
+static inline int IsStatusReady(IXJ *j)
+{
+ ixj_read_HSR(j);
+ return j->hsr.bits.statusrdy ? 1 : 0;
+}
+
+static inline int IsRxReady(IXJ *j)
+{
+ ixj_read_HSR(j);
+ ixj_perfmon(j->rxreadycheck);
+ return j->hsr.bits.rxrdy ? 1 : 0;
+}
+
+static inline int IsTxReady(IXJ *j)
+{
+ ixj_read_HSR(j);
+ ixj_perfmon(j->txreadycheck);
+ return j->hsr.bits.txrdy ? 1 : 0;
+}
+
+static inline void set_play_volume(IXJ *j, int volume)
+{
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone%d Setting Play Volume to 0x%4.4x\n", j->board, volume);
+ ixj_WriteDSPCommand(0xCF02, j);
+ ixj_WriteDSPCommand(volume, j);
+}
+
+static int set_play_volume_linear(IXJ *j, int volume)
+{
+ int newvolume, dspplaymax;
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone %d Setting Linear Play Volume to 0x%4.4x\n", j->board, volume);
+ if(volume > 100 || volume < 0) {
+ return -1;
+ }
+
+ /* This should normalize the perceived volumes between the different cards caused by differences in the hardware */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ dspplaymax = 0x380;
+ break;
+ case QTI_LINEJACK:
+ if(j->port == PORT_PSTN) {
+ dspplaymax = 0x48;
+ } else {
+ dspplaymax = 0x100;
+ }
+ break;
+ case QTI_PHONEJACK_LITE:
+ dspplaymax = 0x380;
+ break;
+ case QTI_PHONEJACK_PCI:
+ dspplaymax = 0x6C;
+ break;
+ case QTI_PHONECARD:
+ dspplaymax = 0x50;
+ break;
+ default:
+ return -1;
+ }
+ newvolume = (dspplaymax * volume) / 100;
+ set_play_volume(j, newvolume);
+ return 0;
+}
+
+static inline void set_play_depth(IXJ *j, int depth)
+{
+ if (depth > 60)
+ depth = 60;
+ if (depth < 0)
+ depth = 0;
+ ixj_WriteDSPCommand(0x5280 + depth, j);
+}
+
+static inline int get_play_volume(IXJ *j)
+{
+ ixj_WriteDSPCommand(0xCF00, j);
+ return j->ssr.high << 8 | j->ssr.low;
+}
+
+static int get_play_volume_linear(IXJ *j)
+{
+ int volume, newvolume, dspplaymax;
+
+ /* This should normalize the perceived volumes between the different cards caused by differences in the hardware */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ dspplaymax = 0x380;
+ break;
+ case QTI_LINEJACK:
+ if(j->port == PORT_PSTN) {
+ dspplaymax = 0x48;
+ } else {
+ dspplaymax = 0x100;
+ }
+ break;
+ case QTI_PHONEJACK_LITE:
+ dspplaymax = 0x380;
+ break;
+ case QTI_PHONEJACK_PCI:
+ dspplaymax = 0x6C;
+ break;
+ case QTI_PHONECARD:
+ dspplaymax = 100;
+ break;
+ default:
+ return -1;
+ }
+ volume = get_play_volume(j);
+ newvolume = (volume * 100) / dspplaymax;
+ if(newvolume > 100)
+ newvolume = 100;
+ return newvolume;
+}
+
+static inline BYTE SLIC_GetState(IXJ *j)
+{
+ if (j->cardtype == QTI_PHONECARD) {
+ j->pccr1.byte = 0;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 1;
+ outw_p(j->psccr.byte << 8, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ j->pslic.byte = inw_p(j->XILINXbase + 0x00) & 0xFF;
+ ixj_PCcontrol_wait(j);
+ if (j->pslic.bits.powerdown)
+ return PLD_SLIC_STATE_OC;
+ else if (!j->pslic.bits.ring0 && !j->pslic.bits.ring1)
+ return PLD_SLIC_STATE_ACTIVE;
+ else
+ return PLD_SLIC_STATE_RINGING;
+ } else {
+ j->pld_slicr.byte = inb_p(j->XILINXbase + 0x01);
+ }
+ return j->pld_slicr.bits.state;
+}
+
+static bool SLIC_SetState(BYTE byState, IXJ *j)
+{
+ bool fRetVal = false;
+
+ if (j->cardtype == QTI_PHONECARD) {
+ if (j->flags.pcmciasct) {
+ switch (byState) {
+ case PLD_SLIC_STATE_TIPOPEN:
+ case PLD_SLIC_STATE_OC:
+ j->pslic.bits.powerdown = 1;
+ j->pslic.bits.ring0 = j->pslic.bits.ring1 = 0;
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_RINGING:
+ if (j->readers || j->writers) {
+ j->pslic.bits.powerdown = 0;
+ j->pslic.bits.ring0 = 1;
+ j->pslic.bits.ring1 = 0;
+ fRetVal = true;
+ }
+ break;
+ case PLD_SLIC_STATE_OHT: /* On-hook transmit */
+
+ case PLD_SLIC_STATE_STANDBY:
+ case PLD_SLIC_STATE_ACTIVE:
+ if (j->readers || j->writers) {
+ j->pslic.bits.powerdown = 0;
+ } else {
+ j->pslic.bits.powerdown = 1;
+ }
+ j->pslic.bits.ring0 = j->pslic.bits.ring1 = 0;
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_APR: /* Active polarity reversal */
+
+ case PLD_SLIC_STATE_OHTPR: /* OHT polarity reversal */
+
+ default:
+ fRetVal = false;
+ break;
+ }
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 0;
+ outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ }
+ } else {
+ /* Set the C1, C2, C3 & B2EN signals. */
+ switch (byState) {
+ case PLD_SLIC_STATE_OC:
+ j->pld_slicw.bits.c1 = 0;
+ j->pld_slicw.bits.c2 = 0;
+ j->pld_slicw.bits.c3 = 0;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_RINGING:
+ j->pld_slicw.bits.c1 = 1;
+ j->pld_slicw.bits.c2 = 0;
+ j->pld_slicw.bits.c3 = 0;
+ j->pld_slicw.bits.b2en = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_ACTIVE:
+ j->pld_slicw.bits.c1 = 0;
+ j->pld_slicw.bits.c2 = 1;
+ j->pld_slicw.bits.c3 = 0;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_OHT: /* On-hook transmit */
+
+ j->pld_slicw.bits.c1 = 1;
+ j->pld_slicw.bits.c2 = 1;
+ j->pld_slicw.bits.c3 = 0;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_TIPOPEN:
+ j->pld_slicw.bits.c1 = 0;
+ j->pld_slicw.bits.c2 = 0;
+ j->pld_slicw.bits.c3 = 1;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_STANDBY:
+ j->pld_slicw.bits.c1 = 1;
+ j->pld_slicw.bits.c2 = 0;
+ j->pld_slicw.bits.c3 = 1;
+ j->pld_slicw.bits.b2en = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_APR: /* Active polarity reversal */
+
+ j->pld_slicw.bits.c1 = 0;
+ j->pld_slicw.bits.c2 = 1;
+ j->pld_slicw.bits.c3 = 1;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ case PLD_SLIC_STATE_OHTPR: /* OHT polarity reversal */
+
+ j->pld_slicw.bits.c1 = 1;
+ j->pld_slicw.bits.c2 = 1;
+ j->pld_slicw.bits.c3 = 1;
+ j->pld_slicw.bits.b2en = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ fRetVal = true;
+ break;
+ default:
+ fRetVal = false;
+ break;
+ }
+ }
+
+ return fRetVal;
+}
+
+static int ixj_wink(IXJ *j)
+{
+ BYTE slicnow;
+
+ slicnow = SLIC_GetState(j);
+
+ j->pots_winkstart = jiffies;
+ SLIC_SetState(PLD_SLIC_STATE_OC, j);
+
+ msleep(jiffies_to_msecs(j->winktime));
+
+ SLIC_SetState(slicnow, j);
+ return 0;
+}
+
+static void ixj_init_timer(IXJ *j)
+{
+ init_timer(&j->timer);
+ j->timer.function = ixj_timeout;
+ j->timer.data = (unsigned long)j;
+}
+
+static void ixj_add_timer(IXJ *j)
+{
+ j->timer.expires = jiffies + (hertz / samplerate);
+ add_timer(&j->timer);
+}
+
+static void ixj_tone_timeout(IXJ *j)
+{
+ IXJ_TONE ti;
+
+ j->tone_state++;
+ if (j->tone_state == 3) {
+ j->tone_state = 0;
+ if (j->cadence_t) {
+ j->tone_cadence_state++;
+ if (j->tone_cadence_state >= j->cadence_t->elements_used) {
+ switch (j->cadence_t->termination) {
+ case PLAY_ONCE:
+ ixj_cpt_stop(j);
+ break;
+ case REPEAT_LAST_ELEMENT:
+ j->tone_cadence_state--;
+ ixj_play_tone(j, j->cadence_t->ce[j->tone_cadence_state].index);
+ break;
+ case REPEAT_ALL:
+ j->tone_cadence_state = 0;
+ if (j->cadence_t->ce[j->tone_cadence_state].freq0) {
+ ti.tone_index = j->cadence_t->ce[j->tone_cadence_state].index;
+ ti.freq0 = j->cadence_t->ce[j->tone_cadence_state].freq0;
+ ti.gain0 = j->cadence_t->ce[j->tone_cadence_state].gain0;
+ ti.freq1 = j->cadence_t->ce[j->tone_cadence_state].freq1;
+ ti.gain1 = j->cadence_t->ce[j->tone_cadence_state].gain1;
+ ixj_init_tone(j, &ti);
+ }
+ ixj_set_tone_on(j->cadence_t->ce[0].tone_on_time, j);
+ ixj_set_tone_off(j->cadence_t->ce[0].tone_off_time, j);
+ ixj_play_tone(j, j->cadence_t->ce[0].index);
+ break;
+ }
+ } else {
+ if (j->cadence_t->ce[j->tone_cadence_state].gain0) {
+ ti.tone_index = j->cadence_t->ce[j->tone_cadence_state].index;
+ ti.freq0 = j->cadence_t->ce[j->tone_cadence_state].freq0;
+ ti.gain0 = j->cadence_t->ce[j->tone_cadence_state].gain0;
+ ti.freq1 = j->cadence_t->ce[j->tone_cadence_state].freq1;
+ ti.gain1 = j->cadence_t->ce[j->tone_cadence_state].gain1;
+ ixj_init_tone(j, &ti);
+ }
+ ixj_set_tone_on(j->cadence_t->ce[j->tone_cadence_state].tone_on_time, j);
+ ixj_set_tone_off(j->cadence_t->ce[j->tone_cadence_state].tone_off_time, j);
+ ixj_play_tone(j, j->cadence_t->ce[j->tone_cadence_state].index);
+ }
+ }
+ }
+}
+
+static inline void ixj_kill_fasync(IXJ *j, IXJ_SIGEVENT event, int dir)
+{
+ if(j->ixj_signals[event]) {
+ if(ixjdebug & 0x0100)
+ printk("Sending signal for event %d\n", event);
+ /* Send apps notice of change */
+ /* see config.h for macro definition */
+ kill_fasync(&(j->async_queue), j->ixj_signals[event], dir);
+ }
+}
+
+static void ixj_pstn_state(IXJ *j)
+{
+ int var;
+ union XOPXR0 XR0, daaint;
+
+ var = 10;
+
+ XR0.reg = j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.reg;
+ daaint.reg = 0;
+ XR0.bitreg.RMR = j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR;
+
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ if (j->pld_scrr.bits.daaflag) {
+ daa_int_read(j);
+ if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.RING) {
+ if(time_after(jiffies, j->pstn_sleeptil) && !(j->flags.pots_pstn && j->hookstate)) {
+ daaint.bitreg.RING = 1;
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ DAA Ring Interrupt /dev/phone%d at %ld\n", j->board, jiffies);
+ }
+ } else {
+ daa_set_mode(j, SOP_PU_RESET);
+ }
+ }
+ if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.Caller_ID) {
+ daaint.bitreg.Caller_ID = 1;
+ j->pstn_cid_intr = 1;
+ j->pstn_cid_received = jiffies;
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ DAA Caller_ID Interrupt /dev/phone%d at %ld\n", j->board, jiffies);
+ }
+ }
+ if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.Cadence) {
+ daaint.bitreg.Cadence = 1;
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ DAA Cadence Interrupt /dev/phone%d at %ld\n", j->board, jiffies);
+ }
+ }
+ if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK != XR0.bitreg.VDD_OK) {
+ daaint.bitreg.VDD_OK = 1;
+ daaint.bitreg.SI_0 = j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK;
+ }
+ }
+ daa_CR_read(j, 1);
+ if(j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR != XR0.bitreg.RMR && time_after(jiffies, j->pstn_sleeptil) && !(j->flags.pots_pstn && j->hookstate)) {
+ daaint.bitreg.RMR = 1;
+ daaint.bitreg.SI_1 = j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR;
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ DAA RMR /dev/phone%d was %s for %ld\n", j->board, XR0.bitreg.RMR?"on":"off", jiffies - j->pstn_last_rmr);
+ }
+ j->pstn_prev_rmr = j->pstn_last_rmr;
+ j->pstn_last_rmr = jiffies;
+ }
+ switch(j->daa_mode) {
+ case SOP_PU_SLEEP:
+ if (daaint.bitreg.RING) {
+ if (!j->flags.pstn_ringing) {
+ if (j->daa_mode != SOP_PU_RINGING) {
+ j->pstn_ring_int = jiffies;
+ daa_set_mode(j, SOP_PU_RINGING);
+ }
+ }
+ }
+ break;
+ case SOP_PU_RINGING:
+ if (daaint.bitreg.RMR) {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence a state = %d /dev/phone%d at %ld\n", j->cadence_f[4].state, j->board, jiffies);
+ }
+ if (daaint.bitreg.SI_1) { /* Rising edge of RMR */
+ j->flags.pstn_rmr = 1;
+ j->pstn_ring_start = jiffies;
+ j->pstn_ring_stop = 0;
+ j->ex.bits.pstn_ring = 0;
+ if (j->cadence_f[4].state == 0) {
+ j->cadence_f[4].state = 1;
+ j->cadence_f[4].on1min = jiffies + (long)((j->cadence_f[4].on1 * hertz * (100 - var)) / 10000);
+ j->cadence_f[4].on1dot = jiffies + (long)((j->cadence_f[4].on1 * hertz * (100)) / 10000);
+ j->cadence_f[4].on1max = jiffies + (long)((j->cadence_f[4].on1 * hertz * (100 + var)) / 10000);
+ } else if (j->cadence_f[4].state == 2) {
+ if((time_after(jiffies, j->cadence_f[4].off1min) &&
+ time_before(jiffies, j->cadence_f[4].off1max))) {
+ if (j->cadence_f[4].on2) {
+ j->cadence_f[4].state = 3;
+ j->cadence_f[4].on2min = jiffies + (long)((j->cadence_f[4].on2 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].on2dot = jiffies + (long)((j->cadence_f[4].on2 * (hertz * (100)) / 10000));
+ j->cadence_f[4].on2max = jiffies + (long)((j->cadence_f[4].on2 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].off1);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else if (j->cadence_f[4].state == 4) {
+ if((time_after(jiffies, j->cadence_f[4].off2min) &&
+ time_before(jiffies, j->cadence_f[4].off2max))) {
+ if (j->cadence_f[4].on3) {
+ j->cadence_f[4].state = 5;
+ j->cadence_f[4].on3min = jiffies + (long)((j->cadence_f[4].on3 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].on3dot = jiffies + (long)((j->cadence_f[4].on3 * (hertz * (100)) / 10000));
+ j->cadence_f[4].on3max = jiffies + (long)((j->cadence_f[4].on3 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].off2);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else if (j->cadence_f[4].state == 6) {
+ if((time_after(jiffies, j->cadence_f[4].off3min) &&
+ time_before(jiffies, j->cadence_f[4].off3max))) {
+ j->cadence_f[4].state = 7;
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].off3);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else {
+ j->cadence_f[4].state = 0;
+ }
+ } else { /* Falling edge of RMR */
+ j->pstn_ring_start = 0;
+ j->pstn_ring_stop = jiffies;
+ if (j->cadence_f[4].state == 1) {
+ if(!j->cadence_f[4].on1) {
+ j->cadence_f[4].state = 7;
+ } else if((time_after(jiffies, j->cadence_f[4].on1min) &&
+ time_before(jiffies, j->cadence_f[4].on1max))) {
+ if (j->cadence_f[4].off1) {
+ j->cadence_f[4].state = 2;
+ j->cadence_f[4].off1min = jiffies + (long)((j->cadence_f[4].off1 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].off1dot = jiffies + (long)((j->cadence_f[4].off1 * (hertz * (100)) / 10000));
+ j->cadence_f[4].off1max = jiffies + (long)((j->cadence_f[4].off1 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].on1);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else if (j->cadence_f[4].state == 3) {
+ if((time_after(jiffies, j->cadence_f[4].on2min) &&
+ time_before(jiffies, j->cadence_f[4].on2max))) {
+ if (j->cadence_f[4].off2) {
+ j->cadence_f[4].state = 4;
+ j->cadence_f[4].off2min = jiffies + (long)((j->cadence_f[4].off2 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].off2dot = jiffies + (long)((j->cadence_f[4].off2 * (hertz * (100)) / 10000));
+ j->cadence_f[4].off2max = jiffies + (long)((j->cadence_f[4].off2 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].on2);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ } else if (j->cadence_f[4].state == 5) {
+ if((time_after(jiffies, j->cadence_f[4].on3min) &&
+ time_before(jiffies, j->cadence_f[4].on3max))) {
+ if (j->cadence_f[4].off3) {
+ j->cadence_f[4].state = 6;
+ j->cadence_f[4].off3min = jiffies + (long)((j->cadence_f[4].off3 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[4].off3dot = jiffies + (long)((j->cadence_f[4].off3 * (hertz * (100)) / 10000));
+ j->cadence_f[4].off3max = jiffies + (long)((j->cadence_f[4].off3 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[4].state = 7;
+ }
+ } else {
+ j->cadence_f[4].state = 0;
+ }
+ } else {
+ if (ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
+ j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
+ j->cadence_f[4].on3);
+ }
+ j->cadence_f[4].state = 0;
+ }
+ }
+ if (ixjdebug & 0x0010) {
+ printk(KERN_INFO "IXJ Ring Cadence b state = %d /dev/phone%d at %ld\n", j->cadence_f[4].state, j->board, jiffies);
+ }
+ if (ixjdebug & 0x0010) {
+ switch(j->cadence_f[4].state) {
+ case 1:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].on1, j->cadence_f[4].on1min, j->cadence_f[4].on1dot, j->cadence_f[4].on1max);
+ break;
+ case 2:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].off1, j->cadence_f[4].off1min, j->cadence_f[4].off1dot, j->cadence_f[4].off1max);
+ break;
+ case 3:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].on2, j->cadence_f[4].on2min, j->cadence_f[4].on2dot, j->cadence_f[4].on2max);
+ break;
+ case 4:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].off2, j->cadence_f[4].off2min, j->cadence_f[4].off2dot, j->cadence_f[4].off2max);
+ break;
+ case 5:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].on3, j->cadence_f[4].on3min, j->cadence_f[4].on3dot, j->cadence_f[4].on3max);
+ break;
+ case 6:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
+ j->cadence_f[4].off3, j->cadence_f[4].off3min, j->cadence_f[4].off3dot, j->cadence_f[4].off3max);
+ break;
+ }
+ }
+ }
+ if (j->cadence_f[4].state == 7) {
+ j->cadence_f[4].state = 0;
+ j->pstn_ring_stop = jiffies;
+ j->ex.bits.pstn_ring = 1;
+ ixj_kill_fasync(j, SIG_PSTN_RING, POLL_IN);
+ if(ixjdebug & 0x0008) {
+ printk(KERN_INFO "IXJ Ring int set /dev/phone%d at %ld\n", j->board, jiffies);
+ }
+ }
+ if((j->pstn_ring_int != 0 && time_after(jiffies, j->pstn_ring_int + (hertz * 5)) && !j->flags.pstn_rmr) ||
+ (j->pstn_ring_stop != 0 && time_after(jiffies, j->pstn_ring_stop + (hertz * 5)))) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA no ring in 5 seconds /dev/phone%d at %ld\n", j->board, jiffies);
+ printk("IXJ DAA pstn ring int /dev/phone%d at %ld\n", j->board, j->pstn_ring_int);
+ printk("IXJ DAA pstn ring stop /dev/phone%d at %ld\n", j->board, j->pstn_ring_stop);
+ }
+ j->pstn_ring_stop = j->pstn_ring_int = 0;
+ daa_set_mode(j, SOP_PU_SLEEP);
+ }
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ if (j->pstn_cid_intr && time_after(jiffies, j->pstn_cid_received + hertz)) {
+ ixj_daa_cid_read(j);
+ j->ex.bits.caller_id = 1;
+ ixj_kill_fasync(j, SIG_CALLER_ID, POLL_IN);
+ j->pstn_cid_intr = 0;
+ }
+ if (daaint.bitreg.Cadence) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA Cadence interrupt going to sleep /dev/phone%d\n", j->board);
+ }
+ daa_set_mode(j, SOP_PU_SLEEP);
+ j->ex.bits.pstn_ring = 0;
+ }
+ break;
+ case SOP_PU_CONVERSATION:
+ if (daaint.bitreg.VDD_OK) {
+ if(!daaint.bitreg.SI_0) {
+ if (!j->pstn_winkstart) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA possible wink /dev/phone%d %ld\n", j->board, jiffies);
+ }
+ j->pstn_winkstart = jiffies;
+ }
+ } else {
+ if (j->pstn_winkstart) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA possible wink end /dev/phone%d %ld\n", j->board, jiffies);
+ }
+ j->pstn_winkstart = 0;
+ }
+ }
+ }
+ if (j->pstn_winkstart && time_after(jiffies, j->pstn_winkstart + ((hertz * j->winktime) / 1000))) {
+ if(ixjdebug & 0x0008) {
+ printk("IXJ DAA wink detected going to sleep /dev/phone%d %ld\n", j->board, jiffies);
+ }
+ daa_set_mode(j, SOP_PU_SLEEP);
+ j->pstn_winkstart = 0;
+ j->ex.bits.pstn_wink = 1;
+ ixj_kill_fasync(j, SIG_PSTN_WINK, POLL_IN);
+ }
+ break;
+ }
+}
+
+static void ixj_timeout(unsigned long ptr)
+{
+ int board;
+ unsigned long jifon;
+ IXJ *j = (IXJ *)ptr;
+ board = j->board;
+
+ if (j->DSPbase && atomic_read(&j->DSPWrite) == 0 && test_and_set_bit(board, (void *)&j->busyflags) == 0) {
+ ixj_perfmon(j->timerchecks);
+ j->hookstate = ixj_hookstate(j);
+ if (j->tone_state) {
+ if (!(j->hookstate)) {
+ ixj_cpt_stop(j);
+ if (j->m_hook) {
+ j->m_hook = 0;
+ j->ex.bits.hookstate = 1;
+ ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
+ }
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ if (j->tone_state == 1)
+ jifon = ((hertz * j->tone_on_time) * 25 / 100000);
+ else
+ jifon = ((hertz * j->tone_on_time) * 25 / 100000) + ((hertz * j->tone_off_time) * 25 / 100000);
+ if (time_before(jiffies, j->tone_start_jif + jifon)) {
+ if (j->tone_state == 1) {
+ ixj_play_tone(j, j->tone_index);
+ if (j->dsp.low == 0x20) {
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ } else {
+ ixj_play_tone(j, 0);
+ if (j->dsp.low == 0x20) {
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ }
+ } else {
+ ixj_tone_timeout(j);
+ if (j->flags.dialtone) {
+ ixj_dialtone(j);
+ }
+ if (j->flags.busytone) {
+ ixj_busytone(j);
+ if (j->dsp.low == 0x20) {
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ }
+ if (j->flags.ringback) {
+ ixj_ringback(j);
+ if (j->dsp.low == 0x20) {
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ }
+ if (!j->tone_state) {
+ ixj_cpt_stop(j);
+ }
+ }
+ }
+ if (!(j->tone_state && j->dsp.low == 0x20)) {
+ if (IsRxReady(j)) {
+ ixj_read_frame(j);
+ }
+ if (IsTxReady(j)) {
+ ixj_write_frame(j);
+ }
+ }
+ if (j->flags.cringing) {
+ if (j->hookstate & 1) {
+ j->flags.cringing = 0;
+ ixj_ring_off(j);
+ } else if(j->cadence_f[5].enable && ((!j->cadence_f[5].en_filter) || (j->cadence_f[5].en_filter && j->flags.firstring))) {
+ switch(j->cadence_f[5].state) {
+ case 0:
+ j->cadence_f[5].on1dot = jiffies + (long)((j->cadence_f[5].on1 * (hertz * 100) / 10000));
+ if (time_before(jiffies, j->cadence_f[5].on1dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_on(j);
+ }
+ j->cadence_f[5].state = 1;
+ break;
+ case 1:
+ if (time_after(jiffies, j->cadence_f[5].on1dot)) {
+ j->cadence_f[5].off1dot = jiffies + (long)((j->cadence_f[5].off1 * (hertz * 100) / 10000));
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_off(j);
+ j->cadence_f[5].state = 2;
+ }
+ break;
+ case 2:
+ if (time_after(jiffies, j->cadence_f[5].off1dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_on(j);
+ if (j->cadence_f[5].on2) {
+ j->cadence_f[5].on2dot = jiffies + (long)((j->cadence_f[5].on2 * (hertz * 100) / 10000));
+ j->cadence_f[5].state = 3;
+ } else {
+ j->cadence_f[5].state = 7;
+ }
+ }
+ break;
+ case 3:
+ if (time_after(jiffies, j->cadence_f[5].on2dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_off(j);
+ if (j->cadence_f[5].off2) {
+ j->cadence_f[5].off2dot = jiffies + (long)((j->cadence_f[5].off2 * (hertz * 100) / 10000));
+ j->cadence_f[5].state = 4;
+ } else {
+ j->cadence_f[5].state = 7;
+ }
+ }
+ break;
+ case 4:
+ if (time_after(jiffies, j->cadence_f[5].off2dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_on(j);
+ if (j->cadence_f[5].on3) {
+ j->cadence_f[5].on3dot = jiffies + (long)((j->cadence_f[5].on3 * (hertz * 100) / 10000));
+ j->cadence_f[5].state = 5;
+ } else {
+ j->cadence_f[5].state = 7;
+ }
+ }
+ break;
+ case 5:
+ if (time_after(jiffies, j->cadence_f[5].on3dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ ixj_ring_off(j);
+ if (j->cadence_f[5].off3) {
+ j->cadence_f[5].off3dot = jiffies + (long)((j->cadence_f[5].off3 * (hertz * 100) / 10000));
+ j->cadence_f[5].state = 6;
+ } else {
+ j->cadence_f[5].state = 7;
+ }
+ }
+ break;
+ case 6:
+ if (time_after(jiffies, j->cadence_f[5].off3dot)) {
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ j->cadence_f[5].state = 7;
+ }
+ break;
+ case 7:
+ if(ixjdebug & 0x0004) {
+ printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
+ }
+ j->flags.cidring = 1;
+ j->cadence_f[5].state = 0;
+ break;
+ }
+ if (j->flags.cidring && !j->flags.cidsent) {
+ j->flags.cidsent = 1;
+ if(j->fskdcnt) {
+ SLIC_SetState(PLD_SLIC_STATE_OHT, j);
+ ixj_pre_cid(j);
+ }
+ j->flags.cidring = 0;
+ }
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ } else {
+ if (time_after(jiffies, j->ring_cadence_jif + (hertz / 2))) {
+ if (j->flags.cidring && !j->flags.cidsent) {
+ j->flags.cidsent = 1;
+ if(j->fskdcnt) {
+ SLIC_SetState(PLD_SLIC_STATE_OHT, j);
+ ixj_pre_cid(j);
+ }
+ j->flags.cidring = 0;
+ }
+ j->ring_cadence_t--;
+ if (j->ring_cadence_t == -1)
+ j->ring_cadence_t = 15;
+ j->ring_cadence_jif = jiffies;
+
+ if (j->ring_cadence & 1 << j->ring_cadence_t) {
+ if(j->flags.cidsent && j->cadence_f[5].en_filter)
+ j->flags.firstring = 1;
+ else
+ ixj_ring_on(j);
+ } else {
+ ixj_ring_off(j);
+ if(!j->flags.cidsent)
+ j->flags.cidring = 1;
+ }
+ }
+ clear_bit(board, &j->busyflags);
+ ixj_add_timer(j);
+ return;
+ }
+ }
+ if (!j->flags.ringing) {
+ if (j->hookstate) { /* & 1) { */
+ if (j->dsp.low != 0x20 &&
+ SLIC_GetState(j) != PLD_SLIC_STATE_ACTIVE) {
+ SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j);
+ }
+ LineMonitor(j);
+ read_filters(j);
+ ixj_WriteDSPCommand(0x511B, j);
+ j->proc_load = j->ssr.high << 8 | j->ssr.low;
+ if (!j->m_hook && (j->hookstate & 1)) {
+ j->m_hook = j->ex.bits.hookstate = 1;
+ ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
+ }
+ } else {
+ if (j->ex.bits.dtmf_ready) {
+ j->dtmf_wp = j->dtmf_rp = j->ex.bits.dtmf_ready = 0;
+ }
+ if (j->m_hook) {
+ j->m_hook = 0;
+ j->ex.bits.hookstate = 1;
+ ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
+ }
+ }
+ }
+ if (j->cardtype == QTI_LINEJACK && !j->flags.pstncheck && j->flags.pstn_present) {
+ ixj_pstn_state(j);
+ }
+ if (j->ex.bytes) {
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+ }
+ clear_bit(board, &j->busyflags);
+ }
+ ixj_add_timer(j);
+}
+
+static int ixj_status_wait(IXJ *j)
+{
+ unsigned long jif;
+
+ jif = jiffies + ((60 * hertz) / 100);
+ while (!IsStatusReady(j)) {
+ ixj_perfmon(j->statuswait);
+ if (time_after(jiffies, jif)) {
+ ixj_perfmon(j->statuswaitfail);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int ixj_PCcontrol_wait(IXJ *j)
+{
+ unsigned long jif;
+
+ jif = jiffies + ((60 * hertz) / 100);
+ while (!IsPCControlReady(j)) {
+ ixj_perfmon(j->pcontrolwait);
+ if (time_after(jiffies, jif)) {
+ ixj_perfmon(j->pcontrolwaitfail);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int ixj_WriteDSPCommand(unsigned short cmd, IXJ *j)
+{
+ BYTES bytes;
+ unsigned long jif;
+
+ atomic_inc(&j->DSPWrite);
+ if(atomic_read(&j->DSPWrite) > 1) {
+ printk("IXJ %d DSP write overlap attempting command 0x%4.4x\n", j->board, cmd);
+ return -1;
+ }
+ bytes.high = (cmd & 0xFF00) >> 8;
+ bytes.low = cmd & 0x00FF;
+ jif = jiffies + ((60 * hertz) / 100);
+ while (!IsControlReady(j)) {
+ ixj_perfmon(j->iscontrolready);
+ if (time_after(jiffies, jif)) {
+ ixj_perfmon(j->iscontrolreadyfail);
+ atomic_dec(&j->DSPWrite);
+ if(atomic_read(&j->DSPWrite) > 0) {
+ printk("IXJ %d DSP overlaped command 0x%4.4x during control ready failure.\n", j->board, cmd);
+ while(atomic_read(&j->DSPWrite) > 0) {
+ atomic_dec(&j->DSPWrite);
+ }
+ }
+ return -1;
+ }
+ }
+ outb(bytes.low, j->DSPbase + 6);
+ outb(bytes.high, j->DSPbase + 7);
+
+ if (ixj_status_wait(j)) {
+ j->ssr.low = 0xFF;
+ j->ssr.high = 0xFF;
+ atomic_dec(&j->DSPWrite);
+ if(atomic_read(&j->DSPWrite) > 0) {
+ printk("IXJ %d DSP overlaped command 0x%4.4x during status wait failure.\n", j->board, cmd);
+ while(atomic_read(&j->DSPWrite) > 0) {
+ atomic_dec(&j->DSPWrite);
+ }
+ }
+ return -1;
+ }
+/* Read Software Status Register */
+ j->ssr.low = inb_p(j->DSPbase + 2);
+ j->ssr.high = inb_p(j->DSPbase + 3);
+ atomic_dec(&j->DSPWrite);
+ if(atomic_read(&j->DSPWrite) > 0) {
+ printk("IXJ %d DSP overlaped command 0x%4.4x\n", j->board, cmd);
+ while(atomic_read(&j->DSPWrite) > 0) {
+ atomic_dec(&j->DSPWrite);
+ }
+ }
+ return 0;
+}
+
+/***************************************************************************
+*
+* General Purpose IO Register read routine
+*
+***************************************************************************/
+static inline int ixj_gpio_read(IXJ *j)
+{
+ if (ixj_WriteDSPCommand(0x5143, j))
+ return -1;
+
+ j->gpio.bytes.low = j->ssr.low;
+ j->gpio.bytes.high = j->ssr.high;
+
+ return 0;
+}
+
+static inline void LED_SetState(int state, IXJ *j)
+{
+ if (j->cardtype == QTI_LINEJACK) {
+ j->pld_scrw.bits.led1 = state & 0x1 ? 1 : 0;
+ j->pld_scrw.bits.led2 = state & 0x2 ? 1 : 0;
+ j->pld_scrw.bits.led3 = state & 0x4 ? 1 : 0;
+ j->pld_scrw.bits.led4 = state & 0x8 ? 1 : 0;
+
+ outb(j->pld_scrw.byte, j->XILINXbase);
+ }
+}
+
+/*********************************************************************
+* GPIO Pins are configured as follows on the Quicknet Internet
+* PhoneJACK Telephony Cards
+*
+* POTS Select GPIO_6=0 GPIO_7=0
+* Mic/Speaker Select GPIO_6=0 GPIO_7=1
+* Handset Select GPIO_6=1 GPIO_7=0
+*
+* SLIC Active GPIO_1=0 GPIO_2=1 GPIO_5=0
+* SLIC Ringing GPIO_1=1 GPIO_2=1 GPIO_5=0
+* SLIC Open Circuit GPIO_1=0 GPIO_2=0 GPIO_5=0
+*
+* Hook Switch changes reported on GPIO_3
+*********************************************************************/
+static int ixj_set_port(IXJ *j, int arg)
+{
+ if (j->cardtype == QTI_PHONEJACK_LITE) {
+ if (arg != PORT_POTS)
+ return 10;
+ else
+ return 0;
+ }
+ switch (arg) {
+ case PORT_POTS:
+ j->port = PORT_POTS;
+ switch (j->cardtype) {
+ case QTI_PHONECARD:
+ if (j->flags.pcmciasct == 1)
+ SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j);
+ else
+ return 11;
+ break;
+ case QTI_PHONEJACK_PCI:
+ j->pld_slicw.pcib.mic = 0;
+ j->pld_slicw.pcib.spk = 0;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ break;
+ case QTI_LINEJACK:
+ ixj_set_pots(j, 0); /* Disconnect POTS/PSTN relay */
+ if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to
+ Software Control Register */
+ return 2;
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_clock.byte = 0;
+ outb(j->pld_clock.byte, j->XILINXbase + 0x04);
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.spken = 0;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ ixj_mixer(0x1200, j); /* Turn Off MIC switch on mixer left */
+ ixj_mixer(0x1401, j); /* Turn On Mono1 switch on mixer left */
+ ixj_mixer(0x1300, j); /* Turn Off MIC switch on mixer right */
+ ixj_mixer(0x1501, j); /* Turn On Mono1 switch on mixer right */
+ ixj_mixer(0x0E80, j); /*Mic mute */
+ ixj_mixer(0x0F00, j); /* Set mono out (SLIC) to 0dB */
+ ixj_mixer(0x0080, j); /* Mute Master Left volume */
+ ixj_mixer(0x0180, j); /* Mute Master Right volume */
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+/* SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j); */
+ break;
+ case QTI_PHONEJACK:
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bits.gpio6 = 0;
+ j->gpio.bits.gpio7 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j);
+ break;
+ }
+ break;
+ case PORT_PSTN:
+ if (j->cardtype == QTI_LINEJACK) {
+ ixj_WriteDSPCommand(0xC534, j); /* Write CODEC config to Software Control Register */
+
+ j->pld_slicw.bits.rly3 = 0;
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.spken = 0;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->port = PORT_PSTN;
+ } else {
+ return 4;
+ }
+ break;
+ case PORT_SPEAKER:
+ j->port = PORT_SPEAKER;
+ switch (j->cardtype) {
+ case QTI_PHONECARD:
+ if (j->flags.pcmciasct) {
+ SLIC_SetState(PLD_SLIC_STATE_OC, j);
+ }
+ break;
+ case QTI_PHONEJACK_PCI:
+ j->pld_slicw.pcib.mic = 1;
+ j->pld_slicw.pcib.spk = 1;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ break;
+ case QTI_LINEJACK:
+ ixj_set_pots(j, 0); /* Disconnect POTS/PSTN relay */
+ if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to
+ Software Control Register */
+ return 2;
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_clock.byte = 0;
+ outb(j->pld_clock.byte, j->XILINXbase + 0x04);
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.spken = 1;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ ixj_mixer(0x1201, j); /* Turn On MIC switch on mixer left */
+ ixj_mixer(0x1400, j); /* Turn Off Mono1 switch on mixer left */
+ ixj_mixer(0x1301, j); /* Turn On MIC switch on mixer right */
+ ixj_mixer(0x1500, j); /* Turn Off Mono1 switch on mixer right */
+ ixj_mixer(0x0E06, j); /*Mic un-mute 0dB */
+ ixj_mixer(0x0F80, j); /* Mute mono out (SLIC) */
+ ixj_mixer(0x0000, j); /* Set Master Left volume to 0dB */
+ ixj_mixer(0x0100, j); /* Set Master Right volume to 0dB */
+ break;
+ case QTI_PHONEJACK:
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bits.gpio6 = 0;
+ j->gpio.bits.gpio7 = 1;
+ ixj_WriteDSPCommand(j->gpio.word, j);
+ break;
+ }
+ break;
+ case PORT_HANDSET:
+ if (j->cardtype != QTI_PHONEJACK) {
+ return 5;
+ } else {
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bits.gpio6 = 1;
+ j->gpio.bits.gpio7 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j);
+ j->port = PORT_HANDSET;
+ }
+ break;
+ default:
+ return 6;
+ break;
+ }
+ return 0;
+}
+
+static int ixj_set_pots(IXJ *j, int arg)
+{
+ if (j->cardtype == QTI_LINEJACK) {
+ if (arg) {
+ if (j->port == PORT_PSTN) {
+ j->pld_slicw.bits.rly1 = 0;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->flags.pots_pstn = 1;
+ return 1;
+ } else {
+ j->flags.pots_pstn = 0;
+ return 0;
+ }
+ } else {
+ j->pld_slicw.bits.rly1 = 1;
+ outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->flags.pots_pstn = 0;
+ return 1;
+ }
+ } else {
+ return 0;
+ }
+}
+
+static void ixj_ring_on(IXJ *j)
+{
+ if (j->dsp.low == 0x20) /* Internet PhoneJACK */
+ {
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Ring On /dev/phone%d\n", j->board);
+
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bytes.low = 0x00;
+ j->gpio.bits.gpio1 = 1;
+ j->gpio.bits.gpio2 = 1;
+ j->gpio.bits.gpio5 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j); /* send the ring signal */
+ } else /* Internet LineJACK, Internet PhoneJACK Lite or Internet PhoneJACK PCI */
+ {
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Ring On /dev/phone%d\n", j->board);
+
+ SLIC_SetState(PLD_SLIC_STATE_RINGING, j);
+ }
+}
+
+static int ixj_siadc(IXJ *j, int val)
+{
+ if(j->cardtype == QTI_PHONECARD){
+ if(j->flags.pcmciascp){
+ if(val == -1)
+ return j->siadc.bits.rxg;
+
+ if(val < 0 || val > 0x1F)
+ return -1;
+
+ j->siadc.bits.hom = 0; /* Handset Out Mute */
+ j->siadc.bits.lom = 0; /* Line Out Mute */
+ j->siadc.bits.rxg = val; /*(0xC000 - 0x41C8) / 0x4EF; RX PGA Gain */
+ j->psccr.bits.addr = 6; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->siadc.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+ return j->siadc.bits.rxg;
+ }
+ }
+ return -1;
+}
+
+static int ixj_sidac(IXJ *j, int val)
+{
+ if(j->cardtype == QTI_PHONECARD){
+ if(j->flags.pcmciascp){
+ if(val == -1)
+ return j->sidac.bits.txg;
+
+ if(val < 0 || val > 0x1F)
+ return -1;
+
+ j->sidac.bits.srm = 1; /* Speaker Right Mute */
+ j->sidac.bits.slm = 1; /* Speaker Left Mute */
+ j->sidac.bits.txg = val; /* (0xC000 - 0x45E4) / 0x5D3; TX PGA Gain */
+ j->psccr.bits.addr = 7; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->sidac.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+ return j->sidac.bits.txg;
+ }
+ }
+ return -1;
+}
+
+static int ixj_pcmcia_cable_check(IXJ *j)
+{
+ j->pccr1.byte = inb_p(j->XILINXbase + 0x03);
+ if (!j->flags.pcmciastate) {
+ j->pccr2.byte = inb_p(j->XILINXbase + 0x02);
+ if (j->pccr1.bits.drf || j->pccr2.bits.rstc) {
+ j->flags.pcmciastate = 4;
+ return 0;
+ }
+ if (j->pccr1.bits.ed) {
+ j->pccr1.bits.ed = 0;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 1;
+ outw_p(j->psccr.byte << 8, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ j->pslic.byte = inw_p(j->XILINXbase + 0x00) & 0xFF;
+ j->pslic.bits.led2 = j->pslic.bits.det ? 1 : 0;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 0;
+ outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ return j->pslic.bits.led2 ? 1 : 0;
+ } else if (j->flags.pcmciasct) {
+ return j->r_hook;
+ } else {
+ return 1;
+ }
+ } else if (j->flags.pcmciastate == 4) {
+ if (!j->pccr1.bits.drf) {
+ j->flags.pcmciastate = 3;
+ }
+ return 0;
+ } else if (j->flags.pcmciastate == 3) {
+ j->pccr2.bits.pwr = 0;
+ j->pccr2.bits.rstc = 1;
+ outb(j->pccr2.byte, j->XILINXbase + 0x02);
+ j->checkwait = jiffies + (hertz * 2);
+ j->flags.incheck = 1;
+ j->flags.pcmciastate = 2;
+ return 0;
+ } else if (j->flags.pcmciastate == 2) {
+ if (j->flags.incheck) {
+ if (time_before(jiffies, j->checkwait)) {
+ return 0;
+ } else {
+ j->flags.incheck = 0;
+ }
+ }
+ j->pccr2.bits.pwr = 0;
+ j->pccr2.bits.rstc = 0;
+ outb_p(j->pccr2.byte, j->XILINXbase + 0x02);
+ j->flags.pcmciastate = 1;
+ return 0;
+ } else if (j->flags.pcmciastate == 1) {
+ j->flags.pcmciastate = 0;
+ if (!j->pccr1.bits.drf) {
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 1;
+ outb_p(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+ j->flags.pcmciascp = 1; /* Set Cable Present Flag */
+
+ j->flags.pcmciasct = (inw_p(j->XILINXbase + 0x00) >> 8) & 0x03; /* Get Cable Type */
+
+ if (j->flags.pcmciasct == 3) {
+ j->flags.pcmciastate = 4;
+ return 0;
+ } else if (j->flags.pcmciasct == 0) {
+ j->pccr2.bits.pwr = 1;
+ j->pccr2.bits.rstc = 0;
+ outb_p(j->pccr2.byte, j->XILINXbase + 0x02);
+ j->port = PORT_SPEAKER;
+ } else {
+ j->port = PORT_POTS;
+ }
+ j->sic1.bits.cpd = 0; /* Chip Power Down */
+ j->sic1.bits.mpd = 0; /* MIC Bias Power Down */
+ j->sic1.bits.hpd = 0; /* Handset Bias Power Down */
+ j->sic1.bits.lpd = 0; /* Line Bias Power Down */
+ j->sic1.bits.spd = 1; /* Speaker Drive Power Down */
+ j->psccr.bits.addr = 1; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->sic1.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ j->sic2.bits.al = 0; /* Analog Loopback DAC analog -> ADC analog */
+ j->sic2.bits.dl2 = 0; /* Digital Loopback DAC -> ADC one bit */
+ j->sic2.bits.dl1 = 0; /* Digital Loopback ADC -> DAC one bit */
+ j->sic2.bits.pll = 0; /* 1 = div 10, 0 = div 5 */
+ j->sic2.bits.hpd = 0; /* HPF disable */
+ j->psccr.bits.addr = 2; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->sic2.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ j->psccr.bits.addr = 3; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(0x00, j->XILINXbase + 0x00); /* PLL Divide N1 */
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ j->psccr.bits.addr = 4; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(0x09, j->XILINXbase + 0x00); /* PLL Multiply M1 */
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ j->sirxg.bits.lig = 1; /* Line In Gain */
+ j->sirxg.bits.lim = 1; /* Line In Mute */
+ j->sirxg.bits.mcg = 0; /* MIC In Gain was 3 */
+ j->sirxg.bits.mcm = 0; /* MIC In Mute */
+ j->sirxg.bits.him = 0; /* Handset In Mute */
+ j->sirxg.bits.iir = 1; /* IIR */
+ j->psccr.bits.addr = 5; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->sirxg.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ ixj_siadc(j, 0x17);
+ ixj_sidac(j, 0x1D);
+
+ j->siaatt.bits.sot = 0;
+ j->psccr.bits.addr = 9; /* R/W Smart Cable Register Address */
+ j->psccr.bits.rw = 0; /* Read / Write flag */
+ j->psccr.bits.dev = 0;
+ outb(j->siaatt.byte, j->XILINXbase + 0x00);
+ outb(j->psccr.byte, j->XILINXbase + 0x01);
+ ixj_PCcontrol_wait(j);
+
+ if (j->flags.pcmciasct == 1 && !j->readers && !j->writers) {
+ j->psccr.byte = j->pslic.byte = 0;
+ j->pslic.bits.powerdown = 1;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 0;
+ outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ }
+ }
+ return 0;
+ } else {
+ j->flags.pcmciascp = 0;
+ return 0;
+ }
+ return 0;
+}
+
+static int ixj_hookstate(IXJ *j)
+{
+ int fOffHook = 0;
+
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ ixj_gpio_read(j);
+ fOffHook = j->gpio.bits.gpio3read ? 1 : 0;
+ break;
+ case QTI_LINEJACK:
+ case QTI_PHONEJACK_LITE:
+ case QTI_PHONEJACK_PCI:
+ SLIC_GetState(j);
+ if(j->cardtype == QTI_LINEJACK && j->flags.pots_pstn == 1 && (j->readers || j->writers)) {
+ fOffHook = j->pld_slicr.bits.potspstn ? 1 : 0;
+ if(fOffHook != j->p_hook) {
+ if(!j->checkwait) {
+ j->checkwait = jiffies;
+ }
+ if(time_before(jiffies, j->checkwait + 2)) {
+ fOffHook ^= 1;
+ } else {
+ j->checkwait = 0;
+ }
+ j->p_hook = fOffHook;
+ printk("IXJ : /dev/phone%d pots-pstn hookstate check %d at %ld\n", j->board, fOffHook, jiffies);
+ }
+ } else {
+ if (j->pld_slicr.bits.state == PLD_SLIC_STATE_ACTIVE ||
+ j->pld_slicr.bits.state == PLD_SLIC_STATE_STANDBY) {
+ if (j->flags.ringing || j->flags.cringing) {
+ if (!in_interrupt()) {
+ msleep(20);
+ }
+ SLIC_GetState(j);
+ if (j->pld_slicr.bits.state == PLD_SLIC_STATE_RINGING) {
+ ixj_ring_on(j);
+ }
+ }
+ if (j->cardtype == QTI_PHONEJACK_PCI) {
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ fOffHook = j->pld_scrr.pcib.det ? 1 : 0;
+ } else
+ fOffHook = j->pld_slicr.bits.det ? 1 : 0;
+ }
+ }
+ break;
+ case QTI_PHONECARD:
+ fOffHook = ixj_pcmcia_cable_check(j);
+ break;
+ }
+ if (j->r_hook != fOffHook) {
+ j->r_hook = fOffHook;
+ if (j->port == PORT_SPEAKER || j->port == PORT_HANDSET) { // || (j->port == PORT_PSTN && j->flags.pots_pstn == 0)) {
+ j->ex.bits.hookstate = 1;
+ ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
+ } else if (!fOffHook) {
+ j->flash_end = jiffies + ((60 * hertz) / 100);
+ }
+ }
+ if (fOffHook) {
+ if(time_before(jiffies, j->flash_end)) {
+ j->ex.bits.flash = 1;
+ j->flash_end = 0;
+ ixj_kill_fasync(j, SIG_FLASH, POLL_IN);
+ }
+ } else {
+ if(time_before(jiffies, j->flash_end)) {
+ fOffHook = 1;
+ }
+ }
+
+ if (j->port == PORT_PSTN && j->daa_mode == SOP_PU_CONVERSATION)
+ fOffHook |= 2;
+
+ if (j->port == PORT_SPEAKER) {
+ if(j->cardtype == QTI_PHONECARD) {
+ if(j->flags.pcmciascp && j->flags.pcmciasct) {
+ fOffHook |= 2;
+ }
+ } else {
+ fOffHook |= 2;
+ }
+ }
+
+ if (j->port == PORT_HANDSET)
+ fOffHook |= 2;
+
+ return fOffHook;
+}
+
+static void ixj_ring_off(IXJ *j)
+{
+ if (j->dsp.low == 0x20) /* Internet PhoneJACK */
+ {
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Ring Off\n");
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bytes.low = 0x00;
+ j->gpio.bits.gpio1 = 0;
+ j->gpio.bits.gpio2 = 1;
+ j->gpio.bits.gpio5 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j);
+ } else /* Internet LineJACK */
+ {
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Ring Off\n");
+
+ if(!j->flags.cidplay)
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+
+ SLIC_GetState(j);
+ }
+}
+
+static void ixj_ring_start(IXJ *j)
+{
+ j->flags.cringing = 1;
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Cadence Ringing Start /dev/phone%d\n", j->board);
+ if (ixj_hookstate(j) & 1) {
+ if (j->port == PORT_POTS)
+ ixj_ring_off(j);
+ j->flags.cringing = 0;
+ if (ixjdebug & 0x0004)
+ printk(KERN_INFO "IXJ Cadence Ringing Stopped /dev/phone%d off hook\n", j->board);
+ } else if(j->cadence_f[5].enable && (!j->cadence_f[5].en_filter)) {
+ j->ring_cadence_jif = jiffies;
+ j->flags.cidsent = j->flags.cidring = 0;
+ j->cadence_f[5].state = 0;
+ if(j->cadence_f[5].on1)
+ ixj_ring_on(j);
+ } else {
+ j->ring_cadence_jif = jiffies;
+ j->ring_cadence_t = 15;
+ if (j->ring_cadence & 1 << j->ring_cadence_t) {
+ ixj_ring_on(j);
+ } else {
+ ixj_ring_off(j);
+ }
+ j->flags.cidsent = j->flags.cidring = j->flags.firstring = 0;
+ }
+}
+
+static int ixj_ring(IXJ *j)
+{
+ char cntr;
+ unsigned long jif;
+
+ j->flags.ringing = 1;
+ if (ixj_hookstate(j) & 1) {
+ ixj_ring_off(j);
+ j->flags.ringing = 0;
+ return 1;
+ }
+ for (cntr = 0; cntr < j->maxrings; cntr++) {
+ jif = jiffies + (1 * hertz);
+ ixj_ring_on(j);
+ while (time_before(jiffies, jif)) {
+ if (ixj_hookstate(j) & 1) {
+ ixj_ring_off(j);
+ j->flags.ringing = 0;
+ return 1;
+ }
+ schedule_timeout_interruptible(1);
+ if (signal_pending(current))
+ break;
+ }
+ jif = jiffies + (3 * hertz);
+ ixj_ring_off(j);
+ while (time_before(jiffies, jif)) {
+ if (ixj_hookstate(j) & 1) {
+ msleep(10);
+ if (ixj_hookstate(j) & 1) {
+ j->flags.ringing = 0;
+ return 1;
+ }
+ }
+ schedule_timeout_interruptible(1);
+ if (signal_pending(current))
+ break;
+ }
+ }
+ ixj_ring_off(j);
+ j->flags.ringing = 0;
+ return 0;
+}
+
+static int ixj_open(struct phone_device *p, struct file *file_p)
+{
+ IXJ *j = get_ixj(p->board);
+ file_p->private_data = j;
+
+ if (!j->DSPbase)
+ return -ENODEV;
+
+ if (file_p->f_mode & FMODE_READ) {
+ if(!j->readers) {
+ j->readers++;
+ } else {
+ return -EBUSY;
+ }
+ }
+
+ if (file_p->f_mode & FMODE_WRITE) {
+ if(!j->writers) {
+ j->writers++;
+ } else {
+ if (file_p->f_mode & FMODE_READ){
+ j->readers--;
+ }
+ return -EBUSY;
+ }
+ }
+
+ if (j->cardtype == QTI_PHONECARD) {
+ j->pslic.bits.powerdown = 0;
+ j->psccr.bits.dev = 3;
+ j->psccr.bits.rw = 0;
+ outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
+ ixj_PCcontrol_wait(j);
+ }
+
+ j->flags.cidplay = 0;
+ j->flags.cidcw_ack = 0;
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Opening board %d\n", p->board);
+
+ j->framesread = j->frameswritten = 0;
+ return 0;
+}
+
+static int ixj_release(struct inode *inode, struct file *file_p)
+{
+ IXJ_TONE ti;
+ int cnt;
+ IXJ *j = file_p->private_data;
+ int board = j->p.board;
+
+ /*
+ * Set up locks to ensure that only one process is talking to the DSP at a time.
+ * This is necessary to keep the DSP from locking up.
+ */
+ while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Closing board %d\n", NUM(inode));
+
+ if (j->cardtype == QTI_PHONECARD)
+ ixj_set_port(j, PORT_SPEAKER);
+ else
+ ixj_set_port(j, PORT_POTS);
+
+ aec_stop(j);
+ ixj_play_stop(j);
+ ixj_record_stop(j);
+ set_play_volume(j, 0x100);
+ set_rec_volume(j, 0x100);
+ ixj_ring_off(j);
+
+ /* Restore the tone table to default settings. */
+ ti.tone_index = 10;
+ ti.gain0 = 1;
+ ti.freq0 = hz941;
+ ti.gain1 = 0;
+ ti.freq1 = hz1209;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 11;
+ ti.gain0 = 1;
+ ti.freq0 = hz941;
+ ti.gain1 = 0;
+ ti.freq1 = hz1336;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 12;
+ ti.gain0 = 1;
+ ti.freq0 = hz941;
+ ti.gain1 = 0;
+ ti.freq1 = hz1477;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 13;
+ ti.gain0 = 1;
+ ti.freq0 = hz800;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 14;
+ ti.gain0 = 1;
+ ti.freq0 = hz1000;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 15;
+ ti.gain0 = 1;
+ ti.freq0 = hz1250;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 16;
+ ti.gain0 = 1;
+ ti.freq0 = hz950;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 17;
+ ti.gain0 = 1;
+ ti.freq0 = hz1100;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 18;
+ ti.gain0 = 1;
+ ti.freq0 = hz1400;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 19;
+ ti.gain0 = 1;
+ ti.freq0 = hz1500;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 20;
+ ti.gain0 = 1;
+ ti.freq0 = hz1600;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 21;
+ ti.gain0 = 1;
+ ti.freq0 = hz1800;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 22;
+ ti.gain0 = 1;
+ ti.freq0 = hz2100;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 23;
+ ti.gain0 = 1;
+ ti.freq0 = hz1300;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 24;
+ ti.gain0 = 1;
+ ti.freq0 = hz2450;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 25;
+ ti.gain0 = 1;
+ ti.freq0 = hz350;
+ ti.gain1 = 0;
+ ti.freq1 = hz440;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 26;
+ ti.gain0 = 1;
+ ti.freq0 = hz440;
+ ti.gain1 = 0;
+ ti.freq1 = hz480;
+ ixj_init_tone(j, &ti);
+ ti.tone_index = 27;
+ ti.gain0 = 1;
+ ti.freq0 = hz480;
+ ti.gain1 = 0;
+ ti.freq1 = hz620;
+ ixj_init_tone(j, &ti);
+
+ set_rec_depth(j, 2); /* Set Record Channel Limit to 2 frames */
+
+ set_play_depth(j, 2); /* Set Playback Channel Limit to 2 frames */
+
+ j->ex.bits.dtmf_ready = 0;
+ j->dtmf_state = 0;
+ j->dtmf_wp = j->dtmf_rp = 0;
+ j->rec_mode = j->play_mode = -1;
+ j->flags.ringing = 0;
+ j->maxrings = MAXRINGS;
+ j->ring_cadence = USA_RING_CADENCE;
+ if(j->cadence_f[5].enable) {
+ j->cadence_f[5].enable = j->cadence_f[5].en_filter = j->cadence_f[5].state = 0;
+ }
+ j->drybuffer = 0;
+ j->winktime = 320;
+ j->flags.dtmf_oob = 0;
+ for (cnt = 0; cnt < 4; cnt++)
+ j->cadence_f[cnt].enable = 0;
+
+ idle(j);
+
+ if(j->cardtype == QTI_PHONECARD) {
+ SLIC_SetState(PLD_SLIC_STATE_OC, j);
+ }
+
+ if (file_p->f_mode & FMODE_READ)
+ j->readers--;
+ if (file_p->f_mode & FMODE_WRITE)
+ j->writers--;
+
+ if (j->read_buffer && !j->readers) {
+ kfree(j->read_buffer);
+ j->read_buffer = NULL;
+ j->read_buffer_size = 0;
+ }
+ if (j->write_buffer && !j->writers) {
+ kfree(j->write_buffer);
+ j->write_buffer = NULL;
+ j->write_buffer_size = 0;
+ }
+ j->rec_codec = j->play_codec = 0;
+ j->rec_frame_size = j->play_frame_size = 0;
+ j->flags.cidsent = j->flags.cidring = 0;
+
+ if(j->cardtype == QTI_LINEJACK && !j->readers && !j->writers) {
+ ixj_set_port(j, PORT_PSTN);
+ daa_set_mode(j, SOP_PU_SLEEP);
+ ixj_set_pots(j, 1);
+ }
+ ixj_WriteDSPCommand(0x0FE3, j); /* Put the DSP in 1/5 power mode. */
+
+ /* Set up the default signals for events */
+ for (cnt = 0; cnt < 35; cnt++)
+ j->ixj_signals[cnt] = SIGIO;
+
+ /* Set the excetion signal enable flags */
+ j->ex_sig.bits.dtmf_ready = j->ex_sig.bits.hookstate = j->ex_sig.bits.flash = j->ex_sig.bits.pstn_ring =
+ j->ex_sig.bits.caller_id = j->ex_sig.bits.pstn_wink = j->ex_sig.bits.f0 = j->ex_sig.bits.f1 = j->ex_sig.bits.f2 =
+ j->ex_sig.bits.f3 = j->ex_sig.bits.fc0 = j->ex_sig.bits.fc1 = j->ex_sig.bits.fc2 = j->ex_sig.bits.fc3 = 1;
+
+ file_p->private_data = NULL;
+ clear_bit(board, &j->busyflags);
+ return 0;
+}
+
+static int read_filters(IXJ *j)
+{
+ unsigned short fc, cnt, trg;
+ int var;
+
+ trg = 0;
+ if (ixj_WriteDSPCommand(0x5144, j)) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Read Frame Counter failed!\n");
+ }
+ return -1;
+ }
+ fc = j->ssr.high << 8 | j->ssr.low;
+ if (fc == j->frame_count)
+ return 1;
+
+ j->frame_count = fc;
+
+ if (j->dtmf_proc)
+ return 1;
+
+ var = 10;
+
+ for (cnt = 0; cnt < 4; cnt++) {
+ if (ixj_WriteDSPCommand(0x5154 + cnt, j)) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Select Filter %d failed!\n", cnt);
+ }
+ return -1;
+ }
+ if (ixj_WriteDSPCommand(0x515C, j)) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Read Filter History %d failed!\n", cnt);
+ }
+ return -1;
+ }
+ j->filter_hist[cnt] = j->ssr.high << 8 | j->ssr.low;
+
+ if (j->cadence_f[cnt].enable) {
+ if (j->filter_hist[cnt] & 3 && !(j->filter_hist[cnt] & 12)) {
+ if (j->cadence_f[cnt].state == 0) {
+ j->cadence_f[cnt].state = 1;
+ j->cadence_f[cnt].on1min = jiffies + (long)((j->cadence_f[cnt].on1 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].on1dot = jiffies + (long)((j->cadence_f[cnt].on1 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].on1max = jiffies + (long)((j->cadence_f[cnt].on1 * (hertz * (100 + var)) / 10000));
+ } else if (j->cadence_f[cnt].state == 2 &&
+ (time_after(jiffies, j->cadence_f[cnt].off1min) &&
+ time_before(jiffies, j->cadence_f[cnt].off1max))) {
+ if (j->cadence_f[cnt].on2) {
+ j->cadence_f[cnt].state = 3;
+ j->cadence_f[cnt].on2min = jiffies + (long)((j->cadence_f[cnt].on2 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].on2dot = jiffies + (long)((j->cadence_f[cnt].on2 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].on2max = jiffies + (long)((j->cadence_f[cnt].on2 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else if (j->cadence_f[cnt].state == 4 &&
+ (time_after(jiffies, j->cadence_f[cnt].off2min) &&
+ time_before(jiffies, j->cadence_f[cnt].off2max))) {
+ if (j->cadence_f[cnt].on3) {
+ j->cadence_f[cnt].state = 5;
+ j->cadence_f[cnt].on3min = jiffies + (long)((j->cadence_f[cnt].on3 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].on3dot = jiffies + (long)((j->cadence_f[cnt].on3 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].on3max = jiffies + (long)((j->cadence_f[cnt].on3 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else if (j->filter_hist[cnt] & 12 && !(j->filter_hist[cnt] & 3)) {
+ if (j->cadence_f[cnt].state == 1) {
+ if(!j->cadence_f[cnt].on1) {
+ j->cadence_f[cnt].state = 7;
+ } else if((time_after(jiffies, j->cadence_f[cnt].on1min) &&
+ time_before(jiffies, j->cadence_f[cnt].on1max))) {
+ if(j->cadence_f[cnt].off1) {
+ j->cadence_f[cnt].state = 2;
+ j->cadence_f[cnt].off1min = jiffies + (long)((j->cadence_f[cnt].off1 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].off1dot = jiffies + (long)((j->cadence_f[cnt].off1 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].off1max = jiffies + (long)((j->cadence_f[cnt].off1 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else if (j->cadence_f[cnt].state == 3) {
+ if((time_after(jiffies, j->cadence_f[cnt].on2min) &&
+ time_before(jiffies, j->cadence_f[cnt].on2max))) {
+ if(j->cadence_f[cnt].off2) {
+ j->cadence_f[cnt].state = 4;
+ j->cadence_f[cnt].off2min = jiffies + (long)((j->cadence_f[cnt].off2 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].off2dot = jiffies + (long)((j->cadence_f[cnt].off2 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].off2max = jiffies + (long)((j->cadence_f[cnt].off2 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else if (j->cadence_f[cnt].state == 5) {
+ if ((time_after(jiffies, j->cadence_f[cnt].on3min) &&
+ time_before(jiffies, j->cadence_f[cnt].on3max))) {
+ if(j->cadence_f[cnt].off3) {
+ j->cadence_f[cnt].state = 6;
+ j->cadence_f[cnt].off3min = jiffies + (long)((j->cadence_f[cnt].off3 * (hertz * (100 - var)) / 10000));
+ j->cadence_f[cnt].off3dot = jiffies + (long)((j->cadence_f[cnt].off3 * (hertz * (100)) / 10000));
+ j->cadence_f[cnt].off3max = jiffies + (long)((j->cadence_f[cnt].off3 * (hertz * (100 + var)) / 10000));
+ } else {
+ j->cadence_f[cnt].state = 7;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else {
+ j->cadence_f[cnt].state = 0;
+ }
+ } else {
+ switch(j->cadence_f[cnt].state) {
+ case 1:
+ if(time_after(jiffies, j->cadence_f[cnt].on1dot) &&
+ !j->cadence_f[cnt].off1 &&
+ !j->cadence_f[cnt].on2 && !j->cadence_f[cnt].off2 &&
+ !j->cadence_f[cnt].on3 && !j->cadence_f[cnt].off3) {
+ j->cadence_f[cnt].state = 7;
+ }
+ break;
+ case 3:
+ if(time_after(jiffies, j->cadence_f[cnt].on2dot) &&
+ !j->cadence_f[cnt].off2 &&
+ !j->cadence_f[cnt].on3 && !j->cadence_f[cnt].off3) {
+ j->cadence_f[cnt].state = 7;
+ }
+ break;
+ case 5:
+ if(time_after(jiffies, j->cadence_f[cnt].on3dot) &&
+ !j->cadence_f[cnt].off3) {
+ j->cadence_f[cnt].state = 7;
+ }
+ break;
+ }
+ }
+
+ if (ixjdebug & 0x0040) {
+ printk(KERN_INFO "IXJ Tone Cadence state = %d /dev/phone%d at %ld\n", j->cadence_f[cnt].state, j->board, jiffies);
+ switch(j->cadence_f[cnt].state) {
+ case 0:
+ printk(KERN_INFO "IXJ /dev/phone%d No Tone detected\n", j->board);
+ break;
+ case 1:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %u %ld - %ld - %ld\n", j->board,
+ j->cadence_f[cnt].on1, j->cadence_f[cnt].on1min, j->cadence_f[cnt].on1dot, j->cadence_f[cnt].on1max);
+ break;
+ case 2:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].off1min,
+ j->cadence_f[cnt].off1max);
+ break;
+ case 3:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].on2min,
+ j->cadence_f[cnt].on2max);
+ break;
+ case 4:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].off2min,
+ j->cadence_f[cnt].off2max);
+ break;
+ case 5:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].on3min,
+ j->cadence_f[cnt].on3max);
+ break;
+ case 6:
+ printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].off3min,
+ j->cadence_f[cnt].off3max);
+ break;
+ }
+ }
+ }
+ if (j->cadence_f[cnt].state == 7) {
+ j->cadence_f[cnt].state = 0;
+ if (j->cadence_f[cnt].enable == 1)
+ j->cadence_f[cnt].enable = 0;
+ switch (cnt) {
+ case 0:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter Cadence 0 triggered %ld\n", jiffies);
+ }
+ j->ex.bits.fc0 = 1;
+ ixj_kill_fasync(j, SIG_FC0, POLL_IN);
+ break;
+ case 1:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter Cadence 1 triggered %ld\n", jiffies);
+ }
+ j->ex.bits.fc1 = 1;
+ ixj_kill_fasync(j, SIG_FC1, POLL_IN);
+ break;
+ case 2:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter Cadence 2 triggered %ld\n", jiffies);
+ }
+ j->ex.bits.fc2 = 1;
+ ixj_kill_fasync(j, SIG_FC2, POLL_IN);
+ break;
+ case 3:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter Cadence 3 triggered %ld\n", jiffies);
+ }
+ j->ex.bits.fc3 = 1;
+ ixj_kill_fasync(j, SIG_FC3, POLL_IN);
+ break;
+ }
+ }
+ if (j->filter_en[cnt] && ((j->filter_hist[cnt] & 3 && !(j->filter_hist[cnt] & 12)) ||
+ (j->filter_hist[cnt] & 12 && !(j->filter_hist[cnt] & 3)))) {
+ if((j->filter_hist[cnt] & 3 && !(j->filter_hist[cnt] & 12))) {
+ trg = 1;
+ } else if((j->filter_hist[cnt] & 12 && !(j->filter_hist[cnt] & 3))) {
+ trg = 0;
+ }
+ switch (cnt) {
+ case 0:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter 0 triggered %d at %ld\n", trg, jiffies);
+ }
+ j->ex.bits.f0 = 1;
+ ixj_kill_fasync(j, SIG_F0, POLL_IN);
+ break;
+ case 1:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter 1 triggered %d at %ld\n", trg, jiffies);
+ }
+ j->ex.bits.f1 = 1;
+ ixj_kill_fasync(j, SIG_F1, POLL_IN);
+ break;
+ case 2:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter 2 triggered %d at %ld\n", trg, jiffies);
+ }
+ j->ex.bits.f2 = 1;
+ ixj_kill_fasync(j, SIG_F2, POLL_IN);
+ break;
+ case 3:
+ if(ixjdebug & 0x0020) {
+ printk(KERN_INFO "Filter 3 triggered %d at %ld\n", trg, jiffies);
+ }
+ j->ex.bits.f3 = 1;
+ ixj_kill_fasync(j, SIG_F3, POLL_IN);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static int LineMonitor(IXJ *j)
+{
+ if (j->dtmf_proc) {
+ return -1;
+ }
+ j->dtmf_proc = 1;
+
+ if (ixj_WriteDSPCommand(0x7000, j)) /* Line Monitor */
+ return -1;
+
+ j->dtmf.bytes.high = j->ssr.high;
+ j->dtmf.bytes.low = j->ssr.low;
+ if (!j->dtmf_state && j->dtmf.bits.dtmf_valid) {
+ j->dtmf_state = 1;
+ j->dtmf_current = j->dtmf.bits.digit;
+ }
+ if (j->dtmf_state && !j->dtmf.bits.dtmf_valid) /* && j->dtmf_wp != j->dtmf_rp) */
+ {
+ if(!j->cidcw_wait) {
+ j->dtmfbuffer[j->dtmf_wp] = j->dtmf_current;
+ j->dtmf_wp++;
+ if (j->dtmf_wp == 79)
+ j->dtmf_wp = 0;
+ j->ex.bits.dtmf_ready = 1;
+ if(j->ex_sig.bits.dtmf_ready) {
+ ixj_kill_fasync(j, SIG_DTMF_READY, POLL_IN);
+ }
+ }
+ else if(j->dtmf_current == 0x00 || j->dtmf_current == 0x0D) {
+ if(ixjdebug & 0x0020) {
+ printk("IXJ phone%d saw CIDCW Ack DTMF %d from display at %ld\n", j->board, j->dtmf_current, jiffies);
+ }
+ j->flags.cidcw_ack = 1;
+ }
+ j->dtmf_state = 0;
+ }
+ j->dtmf_proc = 0;
+
+ return 0;
+}
+
+/************************************************************************
+*
+* Functions to allow alaw <-> ulaw conversions.
+*
+************************************************************************/
+
+static void ulaw2alaw(unsigned char *buff, unsigned long len)
+{
+ static unsigned char table_ulaw2alaw[] =
+ {
+ 0x2A, 0x2B, 0x28, 0x29, 0x2E, 0x2F, 0x2C, 0x2D,
+ 0x22, 0x23, 0x20, 0x21, 0x26, 0x27, 0x24, 0x25,
+ 0x3A, 0x3B, 0x38, 0x39, 0x3E, 0x3F, 0x3C, 0x3D,
+ 0x32, 0x33, 0x30, 0x31, 0x36, 0x37, 0x34, 0x35,
+ 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D, 0x02,
+ 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05, 0x1A,
+ 0x1B, 0x18, 0x19, 0x1E, 0x1F, 0x1C, 0x1D, 0x12,
+ 0x13, 0x10, 0x11, 0x16, 0x17, 0x14, 0x15, 0x6B,
+ 0x68, 0x69, 0x6E, 0x6F, 0x6C, 0x6D, 0x62, 0x63,
+ 0x60, 0x61, 0x66, 0x67, 0x64, 0x65, 0x7B, 0x79,
+ 0x7E, 0x7F, 0x7C, 0x7D, 0x72, 0x73, 0x70, 0x71,
+ 0x76, 0x77, 0x74, 0x75, 0x4B, 0x49, 0x4F, 0x4D,
+ 0x42, 0x43, 0x40, 0x41, 0x46, 0x47, 0x44, 0x45,
+ 0x5A, 0x5B, 0x58, 0x59, 0x5E, 0x5F, 0x5C, 0x5D,
+ 0x52, 0x52, 0x53, 0x53, 0x50, 0x50, 0x51, 0x51,
+ 0x56, 0x56, 0x57, 0x57, 0x54, 0x54, 0x55, 0xD5,
+ 0xAA, 0xAB, 0xA8, 0xA9, 0xAE, 0xAF, 0xAC, 0xAD,
+ 0xA2, 0xA3, 0xA0, 0xA1, 0xA6, 0xA7, 0xA4, 0xA5,
+ 0xBA, 0xBB, 0xB8, 0xB9, 0xBE, 0xBF, 0xBC, 0xBD,
+ 0xB2, 0xB3, 0xB0, 0xB1, 0xB6, 0xB7, 0xB4, 0xB5,
+ 0x8B, 0x88, 0x89, 0x8E, 0x8F, 0x8C, 0x8D, 0x82,
+ 0x83, 0x80, 0x81, 0x86, 0x87, 0x84, 0x85, 0x9A,
+ 0x9B, 0x98, 0x99, 0x9E, 0x9F, 0x9C, 0x9D, 0x92,
+ 0x93, 0x90, 0x91, 0x96, 0x97, 0x94, 0x95, 0xEB,
+ 0xE8, 0xE9, 0xEE, 0xEF, 0xEC, 0xED, 0xE2, 0xE3,
+ 0xE0, 0xE1, 0xE6, 0xE7, 0xE4, 0xE5, 0xFB, 0xF9,
+ 0xFE, 0xFF, 0xFC, 0xFD, 0xF2, 0xF3, 0xF0, 0xF1,
+ 0xF6, 0xF7, 0xF4, 0xF5, 0xCB, 0xC9, 0xCF, 0xCD,
+ 0xC2, 0xC3, 0xC0, 0xC1, 0xC6, 0xC7, 0xC4, 0xC5,
+ 0xDA, 0xDB, 0xD8, 0xD9, 0xDE, 0xDF, 0xDC, 0xDD,
+ 0xD2, 0xD2, 0xD3, 0xD3, 0xD0, 0xD0, 0xD1, 0xD1,
+ 0xD6, 0xD6, 0xD7, 0xD7, 0xD4, 0xD4, 0xD5, 0xD5
+ };
+
+ while (len--)
+ {
+ *buff = table_ulaw2alaw[*(unsigned char *)buff];
+ buff++;
+ }
+}
+
+static void alaw2ulaw(unsigned char *buff, unsigned long len)
+{
+ static unsigned char table_alaw2ulaw[] =
+ {
+ 0x29, 0x2A, 0x27, 0x28, 0x2D, 0x2E, 0x2B, 0x2C,
+ 0x21, 0x22, 0x1F, 0x20, 0x25, 0x26, 0x23, 0x24,
+ 0x39, 0x3A, 0x37, 0x38, 0x3D, 0x3E, 0x3B, 0x3C,
+ 0x31, 0x32, 0x2F, 0x30, 0x35, 0x36, 0x33, 0x34,
+ 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D,
+ 0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
+ 0x1A, 0x1B, 0x18, 0x19, 0x1E, 0x1F, 0x1C, 0x1D,
+ 0x12, 0x13, 0x10, 0x11, 0x16, 0x17, 0x14, 0x15,
+ 0x62, 0x63, 0x60, 0x61, 0x66, 0x67, 0x64, 0x65,
+ 0x5D, 0x5D, 0x5C, 0x5C, 0x5F, 0x5F, 0x5E, 0x5E,
+ 0x74, 0x76, 0x70, 0x72, 0x7C, 0x7E, 0x78, 0x7A,
+ 0x6A, 0x6B, 0x68, 0x69, 0x6E, 0x6F, 0x6C, 0x6D,
+ 0x48, 0x49, 0x46, 0x47, 0x4C, 0x4D, 0x4A, 0x4B,
+ 0x40, 0x41, 0x3F, 0x3F, 0x44, 0x45, 0x42, 0x43,
+ 0x56, 0x57, 0x54, 0x55, 0x5A, 0x5B, 0x58, 0x59,
+ 0x4F, 0x4F, 0x4E, 0x4E, 0x52, 0x53, 0x50, 0x51,
+ 0xA9, 0xAA, 0xA7, 0xA8, 0xAD, 0xAE, 0xAB, 0xAC,
+ 0xA1, 0xA2, 0x9F, 0xA0, 0xA5, 0xA6, 0xA3, 0xA4,
+ 0xB9, 0xBA, 0xB7, 0xB8, 0xBD, 0xBE, 0xBB, 0xBC,
+ 0xB1, 0xB2, 0xAF, 0xB0, 0xB5, 0xB6, 0xB3, 0xB4,
+ 0x8A, 0x8B, 0x88, 0x89, 0x8E, 0x8F, 0x8C, 0x8D,
+ 0x82, 0x83, 0x80, 0x81, 0x86, 0x87, 0x84, 0x85,
+ 0x9A, 0x9B, 0x98, 0x99, 0x9E, 0x9F, 0x9C, 0x9D,
+ 0x92, 0x93, 0x90, 0x91, 0x96, 0x97, 0x94, 0x95,
+ 0xE2, 0xE3, 0xE0, 0xE1, 0xE6, 0xE7, 0xE4, 0xE5,
+ 0xDD, 0xDD, 0xDC, 0xDC, 0xDF, 0xDF, 0xDE, 0xDE,
+ 0xF4, 0xF6, 0xF0, 0xF2, 0xFC, 0xFE, 0xF8, 0xFA,
+ 0xEA, 0xEB, 0xE8, 0xE9, 0xEE, 0xEF, 0xEC, 0xED,
+ 0xC8, 0xC9, 0xC6, 0xC7, 0xCC, 0xCD, 0xCA, 0xCB,
+ 0xC0, 0xC1, 0xBF, 0xBF, 0xC4, 0xC5, 0xC2, 0xC3,
+ 0xD6, 0xD7, 0xD4, 0xD5, 0xDA, 0xDB, 0xD8, 0xD9,
+ 0xCF, 0xCF, 0xCE, 0xCE, 0xD2, 0xD3, 0xD0, 0xD1
+ };
+
+ while (len--)
+ {
+ *buff = table_alaw2ulaw[*(unsigned char *)buff];
+ buff++;
+ }
+}
+
+static ssize_t ixj_read(struct file * file_p, char __user *buf, size_t length, loff_t * ppos)
+{
+ unsigned long i = *ppos;
+ IXJ * j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (j->flags.inread)
+ return -EALREADY;
+
+ j->flags.inread = 1;
+
+ add_wait_queue(&j->read_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ mb();
+
+ while (!j->read_buffer_ready || (j->dtmf_state && j->flags.dtmf_oob)) {
+ ++j->read_wait;
+ if (file_p->f_flags & O_NONBLOCK) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->read_q, &wait);
+ j->flags.inread = 0;
+ return -EAGAIN;
+ }
+ if (!ixj_hookstate(j)) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->read_q, &wait);
+ j->flags.inread = 0;
+ return 0;
+ }
+ interruptible_sleep_on(&j->read_q);
+ if (signal_pending(current)) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->read_q, &wait);
+ j->flags.inread = 0;
+ return -EINTR;
+ }
+ }
+
+ remove_wait_queue(&j->read_q, &wait);
+ set_current_state(TASK_RUNNING);
+ /* Don't ever copy more than the user asks */
+ if(j->rec_codec == ALAW)
+ ulaw2alaw(j->read_buffer, min(length, j->read_buffer_size));
+ i = copy_to_user(buf, j->read_buffer, min(length, j->read_buffer_size));
+ j->read_buffer_ready = 0;
+ if (i) {
+ j->flags.inread = 0;
+ return -EFAULT;
+ } else {
+ j->flags.inread = 0;
+ return min(length, j->read_buffer_size);
+ }
+}
+
+static ssize_t ixj_enhanced_read(struct file * file_p, char __user *buf, size_t length,
+ loff_t * ppos)
+{
+ int pre_retval;
+ ssize_t read_retval = 0;
+ IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ pre_retval = ixj_PreRead(j, 0L);
+ switch (pre_retval) {
+ case NORMAL:
+ read_retval = ixj_read(file_p, buf, length, ppos);
+ ixj_PostRead(j, 0L);
+ break;
+ case NOPOST:
+ read_retval = ixj_read(file_p, buf, length, ppos);
+ break;
+ case POSTONLY:
+ ixj_PostRead(j, 0L);
+ break;
+ default:
+ read_retval = pre_retval;
+ }
+ return read_retval;
+}
+
+static ssize_t ixj_write(struct file *file_p, const char __user *buf, size_t count, loff_t * ppos)
+{
+ unsigned long i = *ppos;
+ IXJ *j = file_p->private_data;
+
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (j->flags.inwrite)
+ return -EALREADY;
+
+ j->flags.inwrite = 1;
+
+ add_wait_queue(&j->write_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ mb();
+
+
+ while (!j->write_buffers_empty) {
+ ++j->write_wait;
+ if (file_p->f_flags & O_NONBLOCK) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->write_q, &wait);
+ j->flags.inwrite = 0;
+ return -EAGAIN;
+ }
+ if (!ixj_hookstate(j)) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->write_q, &wait);
+ j->flags.inwrite = 0;
+ return 0;
+ }
+ interruptible_sleep_on(&j->write_q);
+ if (signal_pending(current)) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->write_q, &wait);
+ j->flags.inwrite = 0;
+ return -EINTR;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&j->write_q, &wait);
+ if (j->write_buffer_wp + count >= j->write_buffer_end)
+ j->write_buffer_wp = j->write_buffer;
+ i = copy_from_user(j->write_buffer_wp, buf, min(count, j->write_buffer_size));
+ if (i) {
+ j->flags.inwrite = 0;
+ return -EFAULT;
+ }
+ if(j->play_codec == ALAW)
+ alaw2ulaw(j->write_buffer_wp, min(count, j->write_buffer_size));
+ j->flags.inwrite = 0;
+ return min(count, j->write_buffer_size);
+}
+
+static ssize_t ixj_enhanced_write(struct file * file_p, const char __user *buf, size_t count, loff_t * ppos)
+{
+ int pre_retval;
+ ssize_t write_retval = 0;
+
+ IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ pre_retval = ixj_PreWrite(j, 0L);
+ switch (pre_retval) {
+ case NORMAL:
+ write_retval = ixj_write(file_p, buf, count, ppos);
+ if (write_retval > 0) {
+ ixj_PostWrite(j, 0L);
+ j->write_buffer_wp += write_retval;
+ j->write_buffers_empty--;
+ }
+ break;
+ case NOPOST:
+ write_retval = ixj_write(file_p, buf, count, ppos);
+ if (write_retval > 0) {
+ j->write_buffer_wp += write_retval;
+ j->write_buffers_empty--;
+ }
+ break;
+ case POSTONLY:
+ ixj_PostWrite(j, 0L);
+ break;
+ default:
+ write_retval = pre_retval;
+ }
+ return write_retval;
+}
+
+static void ixj_read_frame(IXJ *j)
+{
+ int cnt, dly;
+
+ if (j->read_buffer) {
+ for (cnt = 0; cnt < j->rec_frame_size * 2; cnt += 2) {
+ if (!(cnt % 16) && !IsRxReady(j)) {
+ dly = 0;
+ while (!IsRxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ /* Throw away word 0 of the 8021 compressed format to get standard G.729. */
+ if (j->rec_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) {
+ inb_p(j->DSPbase + 0x0E);
+ inb_p(j->DSPbase + 0x0F);
+ }
+ *(j->read_buffer + cnt) = inb_p(j->DSPbase + 0x0E);
+ *(j->read_buffer + cnt + 1) = inb_p(j->DSPbase + 0x0F);
+ }
+ ++j->framesread;
+ if (j->intercom != -1) {
+ if (IsTxReady(get_ixj(j->intercom))) {
+ for (cnt = 0; cnt < j->rec_frame_size * 2; cnt += 2) {
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ outb_p(*(j->read_buffer + cnt), get_ixj(j->intercom)->DSPbase + 0x0C);
+ outb_p(*(j->read_buffer + cnt + 1), get_ixj(j->intercom)->DSPbase + 0x0D);
+ }
+ get_ixj(j->intercom)->frameswritten++;
+ }
+ } else {
+ j->read_buffer_ready = 1;
+ wake_up_interruptible(&j->read_q); /* Wake any blocked readers */
+
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+
+ if(j->ixj_signals[SIG_READ_READY])
+ ixj_kill_fasync(j, SIG_READ_READY, POLL_OUT);
+ }
+ }
+}
+
+static short fsk[][6][20] =
+{
+ {
+ {
+ 0, 17846, 29934, 32364, 24351, 8481, -10126, -25465, -32587, -29196,
+ -16384, 1715, 19260, 30591, 32051, 23170, 6813, -11743, -26509, -32722
+ },
+ {
+ -28377, -14876, 3425, 20621, 31163, 31650, 21925, 5126, -13328, -27481,
+ -32767, -27481, -13328, 5126, 21925, 31650, 31163, 20621, 3425, -14876
+ },
+ {
+ -28377, -32722, -26509, -11743, 6813, 23170, 32051, 30591, 19260, 1715,
+ -16384, -29196, -32587, -25465, -10126, 8481, 24351, 32364, 29934, 17846
+ },
+ {
+ 0, -17846, -29934, -32364, -24351, -8481, 10126, 25465, 32587, 29196,
+ 16384, -1715, -19260, -30591, -32051, -23170, -6813, 11743, 26509, 32722
+ },
+ {
+ 28377, 14876, -3425, -20621, -31163, -31650, -21925, -5126, 13328, 27481,
+ 32767, 27481, 13328, -5126, -21925, -31650, -31163, -20621, -3425, 14876
+ },
+ {
+ 28377, 32722, 26509, 11743, -6813, -23170, -32051, -30591, -19260, -1715,
+ 16384, 29196, 32587, 25465, 10126, -8481, -24351, -32364, -29934, -17846
+ }
+ },
+ {
+ {
+ 0, 10126, 19260, 26509, 31163, 32767, 31163, 26509, 19260, 10126,
+ 0, -10126, -19260, -26509, -31163, -32767, -31163, -26509, -19260, -10126
+ },
+ {
+ -28377, -21925, -13328, -3425, 6813, 16384, 24351, 29934, 32587, 32051,
+ 28377, 21925, 13328, 3425, -6813, -16384, -24351, -29934, -32587, -32051
+ },
+ {
+ -28377, -32051, -32587, -29934, -24351, -16384, -6813, 3425, 13328, 21925,
+ 28377, 32051, 32587, 29934, 24351, 16384, 6813, -3425, -13328, -21925
+ },
+ {
+ 0, -10126, -19260, -26509, -31163, -32767, -31163, -26509, -19260, -10126,
+ 0, 10126, 19260, 26509, 31163, 32767, 31163, 26509, 19260, 10126
+ },
+ {
+ 28377, 21925, 13328, 3425, -6813, -16383, -24351, -29934, -32587, -32051,
+ -28377, -21925, -13328, -3425, 6813, 16383, 24351, 29934, 32587, 32051
+ },
+ {
+ 28377, 32051, 32587, 29934, 24351, 16384, 6813, -3425, -13328, -21925,
+ -28377, -32051, -32587, -29934, -24351, -16384, -6813, 3425, 13328, 21925
+ }
+ }
+};
+
+
+static void ixj_write_cid_bit(IXJ *j, int bit)
+{
+ while (j->fskcnt < 20) {
+ if(j->fskdcnt < (j->fsksize - 1))
+ j->fskdata[j->fskdcnt++] = fsk[bit][j->fskz][j->fskcnt];
+
+ j->fskcnt += 3;
+ }
+ j->fskcnt %= 20;
+
+ if (!bit)
+ j->fskz++;
+ if (j->fskz >= 6)
+ j->fskz = 0;
+
+}
+
+static void ixj_write_cid_byte(IXJ *j, char byte)
+{
+ IXJ_CBYTE cb;
+
+ cb.cbyte = byte;
+ ixj_write_cid_bit(j, 0);
+ ixj_write_cid_bit(j, cb.cbits.b0 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b1 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b2 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b3 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b4 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b5 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b6 ? 1 : 0);
+ ixj_write_cid_bit(j, cb.cbits.b7 ? 1 : 0);
+ ixj_write_cid_bit(j, 1);
+}
+
+static void ixj_write_cid_seize(IXJ *j)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < 150; cnt++) {
+ ixj_write_cid_bit(j, 0);
+ ixj_write_cid_bit(j, 1);
+ }
+ for (cnt = 0; cnt < 180; cnt++) {
+ ixj_write_cid_bit(j, 1);
+ }
+}
+
+static void ixj_write_cidcw_seize(IXJ *j)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < 80; cnt++) {
+ ixj_write_cid_bit(j, 1);
+ }
+}
+
+static int ixj_write_cid_string(IXJ *j, char *s, int checksum)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < strlen(s); cnt++) {
+ ixj_write_cid_byte(j, s[cnt]);
+ checksum = (checksum + s[cnt]);
+ }
+ return checksum;
+}
+
+static void ixj_pad_fsk(IXJ *j, int pad)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < pad; cnt++) {
+ if(j->fskdcnt < (j->fsksize - 1))
+ j->fskdata[j->fskdcnt++] = 0x0000;
+ }
+ for (cnt = 0; cnt < 720; cnt++) {
+ if(j->fskdcnt < (j->fsksize - 1))
+ j->fskdata[j->fskdcnt++] = 0x0000;
+ }
+}
+
+static void ixj_pre_cid(IXJ *j)
+{
+ j->cid_play_codec = j->play_codec;
+ j->cid_play_frame_size = j->play_frame_size;
+ j->cid_play_volume = get_play_volume(j);
+ j->cid_play_flag = j->flags.playing;
+
+ j->cid_rec_codec = j->rec_codec;
+ j->cid_rec_volume = get_rec_volume(j);
+ j->cid_rec_flag = j->flags.recording;
+
+ j->cid_play_aec_level = j->aec_level;
+
+ switch(j->baseframe.low) {
+ case 0xA0:
+ j->cid_base_frame_size = 20;
+ break;
+ case 0x50:
+ j->cid_base_frame_size = 10;
+ break;
+ case 0xF0:
+ j->cid_base_frame_size = 30;
+ break;
+ }
+
+ ixj_play_stop(j);
+ ixj_cpt_stop(j);
+
+ j->flags.cidplay = 1;
+
+ set_base_frame(j, 30);
+ set_play_codec(j, LINEAR16);
+ set_play_volume(j, 0x1B);
+ ixj_play_start(j);
+}
+
+static void ixj_post_cid(IXJ *j)
+{
+ ixj_play_stop(j);
+
+ if(j->cidsize > 5000) {
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+ }
+ j->flags.cidplay = 0;
+ if(ixjdebug & 0x0200) {
+ printk("IXJ phone%d Finished Playing CallerID data %ld\n", j->board, jiffies);
+ }
+
+ ixj_fsk_free(j);
+
+ j->fskdcnt = 0;
+ set_base_frame(j, j->cid_base_frame_size);
+ set_play_codec(j, j->cid_play_codec);
+ ixj_aec_start(j, j->cid_play_aec_level);
+ set_play_volume(j, j->cid_play_volume);
+
+ set_rec_codec(j, j->cid_rec_codec);
+ set_rec_volume(j, j->cid_rec_volume);
+
+ if(j->cid_rec_flag)
+ ixj_record_start(j);
+
+ if(j->cid_play_flag)
+ ixj_play_start(j);
+
+ if(j->cid_play_flag) {
+ wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
+ }
+}
+
+static void ixj_write_cid(IXJ *j)
+{
+ char sdmf1[50];
+ char sdmf2[50];
+ char sdmf3[80];
+ char mdmflen, len1, len2, len3;
+ int pad;
+
+ int checksum = 0;
+
+ if (j->dsp.low == 0x20 || j->flags.cidplay)
+ return;
+
+ j->fskz = j->fskphase = j->fskcnt = j->fskdcnt = 0;
+ j->cidsize = j->cidcnt = 0;
+
+ ixj_fsk_alloc(j);
+
+ strcpy(sdmf1, j->cid_send.month);
+ strcat(sdmf1, j->cid_send.day);
+ strcat(sdmf1, j->cid_send.hour);
+ strcat(sdmf1, j->cid_send.min);
+ strcpy(sdmf2, j->cid_send.number);
+ strcpy(sdmf3, j->cid_send.name);
+
+ len1 = strlen(sdmf1);
+ len2 = strlen(sdmf2);
+ len3 = strlen(sdmf3);
+ mdmflen = len1 + len2 + len3 + 6;
+
+ while(1){
+ ixj_write_cid_seize(j);
+
+ ixj_write_cid_byte(j, 0x80);
+ checksum = 0x80;
+ ixj_write_cid_byte(j, mdmflen);
+ checksum = checksum + mdmflen;
+
+ ixj_write_cid_byte(j, 0x01);
+ checksum = checksum + 0x01;
+ ixj_write_cid_byte(j, len1);
+ checksum = checksum + len1;
+ checksum = ixj_write_cid_string(j, sdmf1, checksum);
+ if(ixj_hookstate(j) & 1)
+ break;
+
+ ixj_write_cid_byte(j, 0x02);
+ checksum = checksum + 0x02;
+ ixj_write_cid_byte(j, len2);
+ checksum = checksum + len2;
+ checksum = ixj_write_cid_string(j, sdmf2, checksum);
+ if(ixj_hookstate(j) & 1)
+ break;
+
+ ixj_write_cid_byte(j, 0x07);
+ checksum = checksum + 0x07;
+ ixj_write_cid_byte(j, len3);
+ checksum = checksum + len3;
+ checksum = ixj_write_cid_string(j, sdmf3, checksum);
+ if(ixj_hookstate(j) & 1)
+ break;
+
+ checksum %= 256;
+ checksum ^= 0xFF;
+ checksum += 1;
+
+ ixj_write_cid_byte(j, (char) checksum);
+
+ pad = j->fskdcnt % 240;
+ if (pad) {
+ pad = 240 - pad;
+ }
+ ixj_pad_fsk(j, pad);
+ break;
+ }
+
+ ixj_write_frame(j);
+}
+
+static void ixj_write_cidcw(IXJ *j)
+{
+ IXJ_TONE ti;
+
+ char sdmf1[50];
+ char sdmf2[50];
+ char sdmf3[80];
+ char mdmflen, len1, len2, len3;
+ int pad;
+
+ int checksum = 0;
+
+ if (j->dsp.low == 0x20 || j->flags.cidplay)
+ return;
+
+ j->fskz = j->fskphase = j->fskcnt = j->fskdcnt = 0;
+ j->cidsize = j->cidcnt = 0;
+
+ ixj_fsk_alloc(j);
+
+ j->flags.cidcw_ack = 0;
+
+ ti.tone_index = 23;
+ ti.gain0 = 1;
+ ti.freq0 = hz440;
+ ti.gain1 = 0;
+ ti.freq1 = 0;
+ ixj_init_tone(j, &ti);
+
+ ixj_set_tone_on(1500, j);
+ ixj_set_tone_off(32, j);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d first tone start at %ld\n", j->board, jiffies);
+ }
+ ixj_play_tone(j, 23);
+
+ clear_bit(j->board, &j->busyflags);
+ while(j->tone_state)
+ schedule_timeout_interruptible(1);
+ while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d first tone end at %ld\n", j->board, jiffies);
+ }
+
+ ti.tone_index = 24;
+ ti.gain0 = 1;
+ ti.freq0 = hz2130;
+ ti.gain1 = 0;
+ ti.freq1 = hz2750;
+ ixj_init_tone(j, &ti);
+
+ ixj_set_tone_off(10, j);
+ ixj_set_tone_on(600, j);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d second tone start at %ld\n", j->board, jiffies);
+ }
+ ixj_play_tone(j, 24);
+
+ clear_bit(j->board, &j->busyflags);
+ while(j->tone_state)
+ schedule_timeout_interruptible(1);
+ while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d sent second tone at %ld\n", j->board, jiffies);
+ }
+
+ j->cidcw_wait = jiffies + ((50 * hertz) / 100);
+
+ clear_bit(j->board, &j->busyflags);
+ while(!j->flags.cidcw_ack && time_before(jiffies, j->cidcw_wait))
+ schedule_timeout_interruptible(1);
+ while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ j->cidcw_wait = 0;
+ if(!j->flags.cidcw_ack) {
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d did not receive ACK from display %ld\n", j->board, jiffies);
+ }
+ ixj_post_cid(j);
+ if(j->cid_play_flag) {
+ wake_up_interruptible(&j->write_q); /* Wake any blocked readers */
+ }
+ return;
+ } else {
+ ixj_pre_cid(j);
+ }
+ j->flags.cidcw_ack = 0;
+ strcpy(sdmf1, j->cid_send.month);
+ strcat(sdmf1, j->cid_send.day);
+ strcat(sdmf1, j->cid_send.hour);
+ strcat(sdmf1, j->cid_send.min);
+ strcpy(sdmf2, j->cid_send.number);
+ strcpy(sdmf3, j->cid_send.name);
+
+ len1 = strlen(sdmf1);
+ len2 = strlen(sdmf2);
+ len3 = strlen(sdmf3);
+ mdmflen = len1 + len2 + len3 + 6;
+
+ ixj_write_cidcw_seize(j);
+
+ ixj_write_cid_byte(j, 0x80);
+ checksum = 0x80;
+ ixj_write_cid_byte(j, mdmflen);
+ checksum = checksum + mdmflen;
+
+ ixj_write_cid_byte(j, 0x01);
+ checksum = checksum + 0x01;
+ ixj_write_cid_byte(j, len1);
+ checksum = checksum + len1;
+ checksum = ixj_write_cid_string(j, sdmf1, checksum);
+
+ ixj_write_cid_byte(j, 0x02);
+ checksum = checksum + 0x02;
+ ixj_write_cid_byte(j, len2);
+ checksum = checksum + len2;
+ checksum = ixj_write_cid_string(j, sdmf2, checksum);
+
+ ixj_write_cid_byte(j, 0x07);
+ checksum = checksum + 0x07;
+ ixj_write_cid_byte(j, len3);
+ checksum = checksum + len3;
+ checksum = ixj_write_cid_string(j, sdmf3, checksum);
+
+ checksum %= 256;
+ checksum ^= 0xFF;
+ checksum += 1;
+
+ ixj_write_cid_byte(j, (char) checksum);
+
+ pad = j->fskdcnt % 240;
+ if (pad) {
+ pad = 240 - pad;
+ }
+ ixj_pad_fsk(j, pad);
+ if(ixjdebug & 0x0200) {
+ printk("IXJ cidcw phone%d sent FSK data at %ld\n", j->board, jiffies);
+ }
+}
+
+static void ixj_write_vmwi(IXJ *j, int msg)
+{
+ char mdmflen;
+ int pad;
+
+ int checksum = 0;
+
+ if (j->dsp.low == 0x20 || j->flags.cidplay)
+ return;
+
+ j->fskz = j->fskphase = j->fskcnt = j->fskdcnt = 0;
+ j->cidsize = j->cidcnt = 0;
+
+ ixj_fsk_alloc(j);
+
+ mdmflen = 3;
+
+ if (j->port == PORT_POTS)
+ SLIC_SetState(PLD_SLIC_STATE_OHT, j);
+
+ ixj_write_cid_seize(j);
+
+ ixj_write_cid_byte(j, 0x82);
+ checksum = 0x82;
+ ixj_write_cid_byte(j, mdmflen);
+ checksum = checksum + mdmflen;
+
+ ixj_write_cid_byte(j, 0x0B);
+ checksum = checksum + 0x0B;
+ ixj_write_cid_byte(j, 1);
+ checksum = checksum + 1;
+
+ if(msg) {
+ ixj_write_cid_byte(j, 0xFF);
+ checksum = checksum + 0xFF;
+ }
+ else {
+ ixj_write_cid_byte(j, 0x00);
+ checksum = checksum + 0x00;
+ }
+
+ checksum %= 256;
+ checksum ^= 0xFF;
+ checksum += 1;
+
+ ixj_write_cid_byte(j, (char) checksum);
+
+ pad = j->fskdcnt % 240;
+ if (pad) {
+ pad = 240 - pad;
+ }
+ ixj_pad_fsk(j, pad);
+}
+
+static void ixj_write_frame(IXJ *j)
+{
+ int cnt, frame_count, dly;
+ IXJ_WORD dat;
+
+ frame_count = 0;
+ if(j->flags.cidplay) {
+ for(cnt = 0; cnt < 480; cnt++) {
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ dat.word = j->fskdata[j->cidcnt++];
+ outb_p(dat.bytes.low, j->DSPbase + 0x0C);
+ outb_p(dat.bytes.high, j->DSPbase + 0x0D);
+ cnt++;
+ }
+ if(j->cidcnt >= j->fskdcnt) {
+ ixj_post_cid(j);
+ }
+ /* This may seem rude, but if we just played one frame of FSK data for CallerID
+ and there is real audio data in the buffer, we need to throw it away because
+ we just used it's time slot */
+ if (j->write_buffer_rp > j->write_buffer_wp) {
+ j->write_buffer_rp += j->cid_play_frame_size * 2;
+ if (j->write_buffer_rp >= j->write_buffer_end) {
+ j->write_buffer_rp = j->write_buffer;
+ }
+ j->write_buffers_empty++;
+ wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
+
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+ }
+ } else if (j->write_buffer && j->write_buffers_empty < 1) {
+ if (j->write_buffer_wp > j->write_buffer_rp) {
+ frame_count =
+ (j->write_buffer_wp - j->write_buffer_rp) / (j->play_frame_size * 2);
+ }
+ if (j->write_buffer_rp > j->write_buffer_wp) {
+ frame_count =
+ (j->write_buffer_wp - j->write_buffer) / (j->play_frame_size * 2) +
+ (j->write_buffer_end - j->write_buffer_rp) / (j->play_frame_size * 2);
+ }
+ if (frame_count >= 1) {
+ if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) {
+ BYTES blankword;
+
+ switch (j->play_mode) {
+ case PLAYBACK_MODE_ULAW:
+ case PLAYBACK_MODE_ALAW:
+ blankword.low = blankword.high = 0xFF;
+ break;
+ case PLAYBACK_MODE_8LINEAR:
+ case PLAYBACK_MODE_16LINEAR:
+ default:
+ blankword.low = blankword.high = 0x00;
+ break;
+ case PLAYBACK_MODE_8LINEAR_WSS:
+ blankword.low = blankword.high = 0x80;
+ break;
+ }
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ outb_p((blankword.low), j->DSPbase + 0x0C);
+ outb_p((blankword.high), j->DSPbase + 0x0D);
+ }
+ j->flags.play_first_frame = 0;
+ } else if (j->play_codec == G723_63 && j->flags.play_first_frame) {
+ for (cnt = 0; cnt < 24; cnt++) {
+ BYTES blankword;
+
+ if(cnt == 12) {
+ blankword.low = 0x02;
+ blankword.high = 0x00;
+ }
+ else {
+ blankword.low = blankword.high = 0x00;
+ }
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ outb_p((blankword.low), j->DSPbase + 0x0C);
+ outb_p((blankword.high), j->DSPbase + 0x0D);
+ }
+ j->flags.play_first_frame = 0;
+ }
+ for (cnt = 0; cnt < j->play_frame_size * 2; cnt += 2) {
+ if (!(cnt % 16) && !IsTxReady(j)) {
+ dly = 0;
+ while (!IsTxReady(j)) {
+ if (dly++ > 5) {
+ dly = 0;
+ break;
+ }
+ udelay(10);
+ }
+ }
+ /* Add word 0 to G.729 frames for the 8021. Right now we don't do VAD/CNG */
+ if (j->play_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) {
+ if (j->write_buffer_rp[cnt] == 0 &&
+ j->write_buffer_rp[cnt + 1] == 0 &&
+ j->write_buffer_rp[cnt + 2] == 0 &&
+ j->write_buffer_rp[cnt + 3] == 0 &&
+ j->write_buffer_rp[cnt + 4] == 0 &&
+ j->write_buffer_rp[cnt + 5] == 0 &&
+ j->write_buffer_rp[cnt + 6] == 0 &&
+ j->write_buffer_rp[cnt + 7] == 0 &&
+ j->write_buffer_rp[cnt + 8] == 0 &&
+ j->write_buffer_rp[cnt + 9] == 0) {
+ /* someone is trying to write silence lets make this a type 0 frame. */
+ outb_p(0x00, j->DSPbase + 0x0C);
+ outb_p(0x00, j->DSPbase + 0x0D);
+ } else {
+ /* so all other frames are type 1. */
+ outb_p(0x01, j->DSPbase + 0x0C);
+ outb_p(0x00, j->DSPbase + 0x0D);
+ }
+ }
+ outb_p(*(j->write_buffer_rp + cnt), j->DSPbase + 0x0C);
+ outb_p(*(j->write_buffer_rp + cnt + 1), j->DSPbase + 0x0D);
+ *(j->write_buffer_rp + cnt) = 0;
+ *(j->write_buffer_rp + cnt + 1) = 0;
+ }
+ j->write_buffer_rp += j->play_frame_size * 2;
+ if (j->write_buffer_rp >= j->write_buffer_end) {
+ j->write_buffer_rp = j->write_buffer;
+ }
+ j->write_buffers_empty++;
+ wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
+
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+
+ ++j->frameswritten;
+ }
+ } else {
+ j->drybuffer++;
+ }
+ if(j->ixj_signals[SIG_WRITE_READY]) {
+ ixj_kill_fasync(j, SIG_WRITE_READY, POLL_OUT);
+ }
+}
+
+static int idle(IXJ *j)
+{
+ if (ixj_WriteDSPCommand(0x0000, j)) /* DSP Idle */
+
+ return 0;
+
+ if (j->ssr.high || j->ssr.low) {
+ return 0;
+ } else {
+ j->play_mode = -1;
+ j->flags.playing = 0;
+ j->rec_mode = -1;
+ j->flags.recording = 0;
+ return 1;
+ }
+}
+
+static int set_base_frame(IXJ *j, int size)
+{
+ unsigned short cmd;
+ int cnt;
+
+ idle(j);
+ j->cid_play_aec_level = j->aec_level;
+ aec_stop(j);
+ for (cnt = 0; cnt < 10; cnt++) {
+ if (idle(j))
+ break;
+ }
+ if (j->ssr.high || j->ssr.low)
+ return -1;
+ if (j->dsp.low != 0x20) {
+ switch (size) {
+ case 30:
+ cmd = 0x07F0;
+ /* Set Base Frame Size to 240 pg9-10 8021 */
+ break;
+ case 20:
+ cmd = 0x07A0;
+ /* Set Base Frame Size to 160 pg9-10 8021 */
+ break;
+ case 10:
+ cmd = 0x0750;
+ /* Set Base Frame Size to 80 pg9-10 8021 */
+ break;
+ default:
+ return -1;
+ }
+ } else {
+ if (size == 30)
+ return size;
+ else
+ return -1;
+ }
+ if (ixj_WriteDSPCommand(cmd, j)) {
+ j->baseframe.high = j->baseframe.low = 0xFF;
+ return -1;
+ } else {
+ j->baseframe.high = j->ssr.high;
+ j->baseframe.low = j->ssr.low;
+ /* If the status returned is 0x0000 (pg9-9 8021) the call failed */
+ if(j->baseframe.high == 0x00 && j->baseframe.low == 0x00) {
+ return -1;
+ }
+ }
+ ixj_aec_start(j, j->cid_play_aec_level);
+ return size;
+}
+
+static int set_rec_codec(IXJ *j, int rate)
+{
+ int retval = 0;
+
+ j->rec_codec = rate;
+
+ switch (rate) {
+ case G723_63:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->rec_frame_size = 12;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G723_53:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->rec_frame_size = 10;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS85:
+ if (j->dsp.low == 0x20 || j->flags.ts85_loaded) {
+ j->rec_frame_size = 16;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS48:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->rec_frame_size = 9;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS41:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->rec_frame_size = 8;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G728:
+ if (j->dsp.low != 0x20) {
+ j->rec_frame_size = 48;
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G729:
+ if (j->dsp.low != 0x20) {
+ if (!j->flags.g729_loaded) {
+ retval = 1;
+ break;
+ }
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 10;
+ break;
+ case 0x50:
+ j->rec_frame_size = 5;
+ break;
+ default:
+ j->rec_frame_size = 15;
+ break;
+ }
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G729B:
+ if (j->dsp.low != 0x20) {
+ if (!j->flags.g729_loaded) {
+ retval = 1;
+ break;
+ }
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 12;
+ break;
+ case 0x50:
+ j->rec_frame_size = 6;
+ break;
+ default:
+ j->rec_frame_size = 18;
+ break;
+ }
+ j->rec_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case ULAW:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 80;
+ break;
+ case 0x50:
+ j->rec_frame_size = 40;
+ break;
+ default:
+ j->rec_frame_size = 120;
+ break;
+ }
+ j->rec_mode = 4;
+ break;
+ case ALAW:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 80;
+ break;
+ case 0x50:
+ j->rec_frame_size = 40;
+ break;
+ default:
+ j->rec_frame_size = 120;
+ break;
+ }
+ j->rec_mode = 4;
+ break;
+ case LINEAR16:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 160;
+ break;
+ case 0x50:
+ j->rec_frame_size = 80;
+ break;
+ default:
+ j->rec_frame_size = 240;
+ break;
+ }
+ j->rec_mode = 5;
+ break;
+ case LINEAR8:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 80;
+ break;
+ case 0x50:
+ j->rec_frame_size = 40;
+ break;
+ default:
+ j->rec_frame_size = 120;
+ break;
+ }
+ j->rec_mode = 6;
+ break;
+ case WSS:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->rec_frame_size = 80;
+ break;
+ case 0x50:
+ j->rec_frame_size = 40;
+ break;
+ default:
+ j->rec_frame_size = 120;
+ break;
+ }
+ j->rec_mode = 7;
+ break;
+ default:
+ kfree(j->read_buffer);
+ j->rec_frame_size = 0;
+ j->rec_mode = -1;
+ j->read_buffer = NULL;
+ j->read_buffer_size = 0;
+ retval = 1;
+ break;
+ }
+ return retval;
+}
+
+static int ixj_record_start(IXJ *j)
+{
+ unsigned short cmd = 0x0000;
+
+ if (j->read_buffer) {
+ ixj_record_stop(j);
+ }
+ j->flags.recording = 1;
+ ixj_WriteDSPCommand(0x0FE0, j); /* Put the DSP in full power mode. */
+
+ if(ixjdebug & 0x0002)
+ printk("IXJ %d Starting Record Codec %d at %ld\n", j->board, j->rec_codec, jiffies);
+
+ if (!j->rec_mode) {
+ switch (j->rec_codec) {
+ case G723_63:
+ cmd = 0x5131;
+ break;
+ case G723_53:
+ cmd = 0x5132;
+ break;
+ case TS85:
+ cmd = 0x5130; /* TrueSpeech 8.5 */
+
+ break;
+ case TS48:
+ cmd = 0x5133; /* TrueSpeech 4.8 */
+
+ break;
+ case TS41:
+ cmd = 0x5134; /* TrueSpeech 4.1 */
+
+ break;
+ case G728:
+ cmd = 0x5135;
+ break;
+ case G729:
+ case G729B:
+ cmd = 0x5136;
+ break;
+ default:
+ return 1;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+ }
+ if (!j->read_buffer) {
+ if (!j->read_buffer)
+ j->read_buffer = kmalloc(j->rec_frame_size * 2, GFP_ATOMIC);
+ if (!j->read_buffer) {
+ printk("Read buffer allocation for ixj board %d failed!\n", j->board);
+ return -ENOMEM;
+ }
+ }
+ j->read_buffer_size = j->rec_frame_size * 2;
+
+ if (ixj_WriteDSPCommand(0x5102, j)) /* Set Poll sync mode */
+
+ return -1;
+
+ switch (j->rec_mode) {
+ case 0:
+ cmd = 0x1C03; /* Record C1 */
+
+ break;
+ case 4:
+ if (j->ver.low == 0x12) {
+ cmd = 0x1E03; /* Record C1 */
+
+ } else {
+ cmd = 0x1E01; /* Record C1 */
+
+ }
+ break;
+ case 5:
+ if (j->ver.low == 0x12) {
+ cmd = 0x1E83; /* Record C1 */
+
+ } else {
+ cmd = 0x1E81; /* Record C1 */
+
+ }
+ break;
+ case 6:
+ if (j->ver.low == 0x12) {
+ cmd = 0x1F03; /* Record C1 */
+
+ } else {
+ cmd = 0x1F01; /* Record C1 */
+
+ }
+ break;
+ case 7:
+ if (j->ver.low == 0x12) {
+ cmd = 0x1F83; /* Record C1 */
+ } else {
+ cmd = 0x1F81; /* Record C1 */
+ }
+ break;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+
+ if (j->flags.playing) {
+ ixj_aec_start(j, j->aec_level);
+ }
+ return 0;
+}
+
+static void ixj_record_stop(IXJ *j)
+{
+ if (ixjdebug & 0x0002)
+ printk("IXJ %d Stopping Record Codec %d at %ld\n", j->board, j->rec_codec, jiffies);
+
+ kfree(j->read_buffer);
+ j->read_buffer = NULL;
+ j->read_buffer_size = 0;
+ if (j->rec_mode > -1) {
+ ixj_WriteDSPCommand(0x5120, j);
+ j->rec_mode = -1;
+ }
+ j->flags.recording = 0;
+}
+static void ixj_vad(IXJ *j, int arg)
+{
+ if (arg)
+ ixj_WriteDSPCommand(0x513F, j);
+ else
+ ixj_WriteDSPCommand(0x513E, j);
+}
+
+static void set_rec_depth(IXJ *j, int depth)
+{
+ if (depth > 60)
+ depth = 60;
+ if (depth < 0)
+ depth = 0;
+ ixj_WriteDSPCommand(0x5180 + depth, j);
+}
+
+static void set_dtmf_prescale(IXJ *j, int volume)
+{
+ ixj_WriteDSPCommand(0xCF07, j);
+ ixj_WriteDSPCommand(volume, j);
+}
+
+static int get_dtmf_prescale(IXJ *j)
+{
+ ixj_WriteDSPCommand(0xCF05, j);
+ return j->ssr.high << 8 | j->ssr.low;
+}
+
+static void set_rec_volume(IXJ *j, int volume)
+{
+ if(j->aec_level == AEC_AGC) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone%d Setting AGC Threshold to 0x%4.4x\n", j->board, volume);
+ ixj_WriteDSPCommand(0xCF96, j);
+ ixj_WriteDSPCommand(volume, j);
+ } else {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone %d Setting Record Volume to 0x%4.4x\n", j->board, volume);
+ ixj_WriteDSPCommand(0xCF03, j);
+ ixj_WriteDSPCommand(volume, j);
+ }
+}
+
+static int set_rec_volume_linear(IXJ *j, int volume)
+{
+ int newvolume, dsprecmax;
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: /dev/phone %d Setting Linear Record Volume to 0x%4.4x\n", j->board, volume);
+ if(volume > 100 || volume < 0) {
+ return -1;
+ }
+
+ /* This should normalize the perceived volumes between the different cards caused by differences in the hardware */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ dsprecmax = 0x440;
+ break;
+ case QTI_LINEJACK:
+ dsprecmax = 0x180;
+ ixj_mixer(0x0203, j); /*Voice Left Volume unmute 6db */
+ ixj_mixer(0x0303, j); /*Voice Right Volume unmute 6db */
+ ixj_mixer(0x0C00, j); /*Mono1 unmute 12db */
+ break;
+ case QTI_PHONEJACK_LITE:
+ dsprecmax = 0x4C0;
+ break;
+ case QTI_PHONEJACK_PCI:
+ dsprecmax = 0x100;
+ break;
+ case QTI_PHONECARD:
+ dsprecmax = 0x400;
+ break;
+ default:
+ return -1;
+ }
+ newvolume = (dsprecmax * volume) / 100;
+ set_rec_volume(j, newvolume);
+ return 0;
+}
+
+static int get_rec_volume(IXJ *j)
+{
+ if(j->aec_level == AEC_AGC) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Getting AGC Threshold\n");
+ ixj_WriteDSPCommand(0xCF86, j);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "AGC Threshold is 0x%2.2x%2.2x\n", j->ssr.high, j->ssr.low);
+ return j->ssr.high << 8 | j->ssr.low;
+ } else {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Getting Record Volume\n");
+ ixj_WriteDSPCommand(0xCF01, j);
+ return j->ssr.high << 8 | j->ssr.low;
+ }
+}
+
+static int get_rec_volume_linear(IXJ *j)
+{
+ int volume, newvolume, dsprecmax;
+
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ dsprecmax = 0x440;
+ break;
+ case QTI_LINEJACK:
+ dsprecmax = 0x180;
+ break;
+ case QTI_PHONEJACK_LITE:
+ dsprecmax = 0x4C0;
+ break;
+ case QTI_PHONEJACK_PCI:
+ dsprecmax = 0x100;
+ break;
+ case QTI_PHONECARD:
+ dsprecmax = 0x400;
+ break;
+ default:
+ return -1;
+ }
+ volume = get_rec_volume(j);
+ newvolume = (volume * 100) / dsprecmax;
+ if(newvolume > 100)
+ newvolume = 100;
+ return newvolume;
+}
+
+static int get_rec_level(IXJ *j)
+{
+ int retval;
+
+ ixj_WriteDSPCommand(0xCF88, j);
+
+ retval = j->ssr.high << 8 | j->ssr.low;
+ retval = (retval * 256) / 240;
+ return retval;
+}
+
+static void ixj_aec_start(IXJ *j, int level)
+{
+ j->aec_level = level;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "AGC set = 0x%2.2x\n", j->aec_level);
+ if (!level) {
+ aec_stop(j);
+ } else {
+ if (j->rec_codec == G729 || j->play_codec == G729 || j->rec_codec == G729B || j->play_codec == G729B) {
+ ixj_WriteDSPCommand(0xE022, j); /* Move AEC filter buffer */
+
+ ixj_WriteDSPCommand(0x0300, j);
+ }
+ ixj_WriteDSPCommand(0xB001, j); /* AEC On */
+
+ ixj_WriteDSPCommand(0xE013, j); /* Advanced AEC C1 */
+
+ switch (level) {
+ case AEC_LOW:
+ ixj_WriteDSPCommand(0x0000, j); /* Advanced AEC C2 = off */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0xFFFF, j);
+
+ ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
+ ixj_WriteDSPCommand(0x0000, j); /* to off */
+
+ break;
+
+ case AEC_MED:
+ ixj_WriteDSPCommand(0x0600, j); /* Advanced AEC C2 = on medium */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0x0080, j);
+
+ ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
+ ixj_WriteDSPCommand(0x0000, j); /* to off */
+
+ break;
+
+ case AEC_HIGH:
+ ixj_WriteDSPCommand(0x0C00, j); /* Advanced AEC C2 = on high */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0x0080, j);
+
+ ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
+ ixj_WriteDSPCommand(0x0000, j); /* to off */
+
+ break;
+
+ case AEC_AGC:
+ /* First we have to put the AEC into advance auto mode so that AGC will not conflict with it */
+ ixj_WriteDSPCommand(0x0002, j); /* Attenuation scaling factor of 2 */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0x0100, j); /* Higher Threshold Floor */
+
+ ixj_WriteDSPCommand(0xE012, j); /* Set Train and Lock */
+
+ if(j->cardtype == QTI_LINEJACK || j->cardtype == QTI_PHONECARD)
+ ixj_WriteDSPCommand(0x0224, j);
+ else
+ ixj_WriteDSPCommand(0x1224, j);
+
+ ixj_WriteDSPCommand(0xE014, j);
+ ixj_WriteDSPCommand(0x0003, j); /* Lock threshold at 3dB */
+
+ ixj_WriteDSPCommand(0xE338, j); /* Set Echo Suppresser Attenuation to 0dB */
+
+ /* Now we can set the AGC initial parameters and turn it on */
+ ixj_WriteDSPCommand(0xCF90, j); /* Set AGC Minimum gain */
+ ixj_WriteDSPCommand(0x0020, j); /* to 0.125 (-18dB) */
+
+ ixj_WriteDSPCommand(0xCF91, j); /* Set AGC Maximum gain */
+ ixj_WriteDSPCommand(0x1000, j); /* to 16 (24dB) */
+
+ ixj_WriteDSPCommand(0xCF92, j); /* Set AGC start gain */
+ ixj_WriteDSPCommand(0x0800, j); /* to 8 (+18dB) */
+
+ ixj_WriteDSPCommand(0xCF93, j); /* Set AGC hold time */
+ ixj_WriteDSPCommand(0x1F40, j); /* to 2 seconds (units are 250us) */
+
+ ixj_WriteDSPCommand(0xCF94, j); /* Set AGC Attack Time Constant */
+ ixj_WriteDSPCommand(0x0005, j); /* to 8ms */
+
+ ixj_WriteDSPCommand(0xCF95, j); /* Set AGC Decay Time Constant */
+ ixj_WriteDSPCommand(0x000D, j); /* to 4096ms */
+
+ ixj_WriteDSPCommand(0xCF96, j); /* Set AGC Attack Threshold */
+ ixj_WriteDSPCommand(0x1200, j); /* to 25% */
+
+ ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
+ ixj_WriteDSPCommand(0x0001, j); /* to on */
+
+ break;
+
+ case AEC_AUTO:
+ ixj_WriteDSPCommand(0x0002, j); /* Attenuation scaling factor of 2 */
+
+ ixj_WriteDSPCommand(0xE011, j);
+ ixj_WriteDSPCommand(0x0100, j); /* Higher Threshold Floor */
+
+ ixj_WriteDSPCommand(0xE012, j); /* Set Train and Lock */
+
+ if(j->cardtype == QTI_LINEJACK || j->cardtype == QTI_PHONECARD)
+ ixj_WriteDSPCommand(0x0224, j);
+ else
+ ixj_WriteDSPCommand(0x1224, j);
+
+ ixj_WriteDSPCommand(0xE014, j);
+ ixj_WriteDSPCommand(0x0003, j); /* Lock threshold at 3dB */
+
+ ixj_WriteDSPCommand(0xE338, j); /* Set Echo Suppresser Attenuation to 0dB */
+
+ break;
+ }
+ }
+}
+
+static void aec_stop(IXJ *j)
+{
+ j->aec_level = AEC_OFF;
+ if (j->rec_codec == G729 || j->play_codec == G729 || j->rec_codec == G729B || j->play_codec == G729B) {
+ ixj_WriteDSPCommand(0xE022, j); /* Move AEC filter buffer back */
+
+ ixj_WriteDSPCommand(0x0700, j);
+ }
+ if (j->play_mode != -1 && j->rec_mode != -1)
+ {
+ ixj_WriteDSPCommand(0xB002, j); /* AEC Stop */
+ }
+}
+
+static int set_play_codec(IXJ *j, int rate)
+{
+ int retval = 0;
+
+ j->play_codec = rate;
+
+ switch (rate) {
+ case G723_63:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->play_frame_size = 12;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G723_53:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->play_frame_size = 10;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS85:
+ if (j->dsp.low == 0x20 || j->flags.ts85_loaded) {
+ j->play_frame_size = 16;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS48:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->play_frame_size = 9;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case TS41:
+ if (j->ver.low != 0x12 || ixj_convert_loaded) {
+ j->play_frame_size = 8;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G728:
+ if (j->dsp.low != 0x20) {
+ j->play_frame_size = 48;
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G729:
+ if (j->dsp.low != 0x20) {
+ if (!j->flags.g729_loaded) {
+ retval = 1;
+ break;
+ }
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 10;
+ break;
+ case 0x50:
+ j->play_frame_size = 5;
+ break;
+ default:
+ j->play_frame_size = 15;
+ break;
+ }
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case G729B:
+ if (j->dsp.low != 0x20) {
+ if (!j->flags.g729_loaded) {
+ retval = 1;
+ break;
+ }
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 12;
+ break;
+ case 0x50:
+ j->play_frame_size = 6;
+ break;
+ default:
+ j->play_frame_size = 18;
+ break;
+ }
+ j->play_mode = 0;
+ } else {
+ retval = 1;
+ }
+ break;
+ case ULAW:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 80;
+ break;
+ case 0x50:
+ j->play_frame_size = 40;
+ break;
+ default:
+ j->play_frame_size = 120;
+ break;
+ }
+ j->play_mode = 2;
+ break;
+ case ALAW:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 80;
+ break;
+ case 0x50:
+ j->play_frame_size = 40;
+ break;
+ default:
+ j->play_frame_size = 120;
+ break;
+ }
+ j->play_mode = 2;
+ break;
+ case LINEAR16:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 160;
+ break;
+ case 0x50:
+ j->play_frame_size = 80;
+ break;
+ default:
+ j->play_frame_size = 240;
+ break;
+ }
+ j->play_mode = 6;
+ break;
+ case LINEAR8:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 80;
+ break;
+ case 0x50:
+ j->play_frame_size = 40;
+ break;
+ default:
+ j->play_frame_size = 120;
+ break;
+ }
+ j->play_mode = 4;
+ break;
+ case WSS:
+ switch (j->baseframe.low) {
+ case 0xA0:
+ j->play_frame_size = 80;
+ break;
+ case 0x50:
+ j->play_frame_size = 40;
+ break;
+ default:
+ j->play_frame_size = 120;
+ break;
+ }
+ j->play_mode = 5;
+ break;
+ default:
+ kfree(j->write_buffer);
+ j->play_frame_size = 0;
+ j->play_mode = -1;
+ j->write_buffer = NULL;
+ j->write_buffer_size = 0;
+ retval = 1;
+ break;
+ }
+ return retval;
+}
+
+static int ixj_play_start(IXJ *j)
+{
+ unsigned short cmd = 0x0000;
+
+ if (j->write_buffer) {
+ ixj_play_stop(j);
+ }
+
+ if(ixjdebug & 0x0002)
+ printk("IXJ %d Starting Play Codec %d at %ld\n", j->board, j->play_codec, jiffies);
+
+ j->flags.playing = 1;
+ ixj_WriteDSPCommand(0x0FE0, j); /* Put the DSP in full power mode. */
+
+ j->flags.play_first_frame = 1;
+ j->drybuffer = 0;
+
+ if (!j->play_mode) {
+ switch (j->play_codec) {
+ case G723_63:
+ cmd = 0x5231;
+ break;
+ case G723_53:
+ cmd = 0x5232;
+ break;
+ case TS85:
+ cmd = 0x5230; /* TrueSpeech 8.5 */
+
+ break;
+ case TS48:
+ cmd = 0x5233; /* TrueSpeech 4.8 */
+
+ break;
+ case TS41:
+ cmd = 0x5234; /* TrueSpeech 4.1 */
+
+ break;
+ case G728:
+ cmd = 0x5235;
+ break;
+ case G729:
+ case G729B:
+ cmd = 0x5236;
+ break;
+ default:
+ return 1;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+ }
+ j->write_buffer = kmalloc(j->play_frame_size * 2, GFP_ATOMIC);
+ if (!j->write_buffer) {
+ printk("Write buffer allocation for ixj board %d failed!\n", j->board);
+ return -ENOMEM;
+ }
+/* j->write_buffers_empty = 2; */
+ j->write_buffers_empty = 1;
+ j->write_buffer_size = j->play_frame_size * 2;
+ j->write_buffer_end = j->write_buffer + j->play_frame_size * 2;
+ j->write_buffer_rp = j->write_buffer_wp = j->write_buffer;
+
+ if (ixj_WriteDSPCommand(0x5202, j)) /* Set Poll sync mode */
+
+ return -1;
+
+ switch (j->play_mode) {
+ case 0:
+ cmd = 0x2C03;
+ break;
+ case 2:
+ if (j->ver.low == 0x12) {
+ cmd = 0x2C23;
+ } else {
+ cmd = 0x2C21;
+ }
+ break;
+ case 4:
+ if (j->ver.low == 0x12) {
+ cmd = 0x2C43;
+ } else {
+ cmd = 0x2C41;
+ }
+ break;
+ case 5:
+ if (j->ver.low == 0x12) {
+ cmd = 0x2C53;
+ } else {
+ cmd = 0x2C51;
+ }
+ break;
+ case 6:
+ if (j->ver.low == 0x12) {
+ cmd = 0x2C63;
+ } else {
+ cmd = 0x2C61;
+ }
+ break;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+
+ if (ixj_WriteDSPCommand(0x2000, j)) /* Playback C2 */
+ return -1;
+
+ if (ixj_WriteDSPCommand(0x2000 + j->play_frame_size, j)) /* Playback C3 */
+ return -1;
+
+ if (j->flags.recording) {
+ ixj_aec_start(j, j->aec_level);
+ }
+
+ return 0;
+}
+
+static void ixj_play_stop(IXJ *j)
+{
+ if (ixjdebug & 0x0002)
+ printk("IXJ %d Stopping Play Codec %d at %ld\n", j->board, j->play_codec, jiffies);
+
+ kfree(j->write_buffer);
+ j->write_buffer = NULL;
+ j->write_buffer_size = 0;
+ if (j->play_mode > -1) {
+ ixj_WriteDSPCommand(0x5221, j); /* Stop playback and flush buffers. 8022 reference page 9-40 */
+
+ j->play_mode = -1;
+ }
+ j->flags.playing = 0;
+}
+
+static inline int get_play_level(IXJ *j)
+{
+ int retval;
+
+ ixj_WriteDSPCommand(0xCF8F, j); /* 8022 Reference page 9-38 */
+ return j->ssr.high << 8 | j->ssr.low;
+ retval = j->ssr.high << 8 | j->ssr.low;
+ retval = (retval * 256) / 240;
+ return retval;
+}
+
+static unsigned int ixj_poll(struct file *file_p, poll_table * wait)
+{
+ unsigned int mask = 0;
+
+ IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ poll_wait(file_p, &(j->poll_q), wait);
+ if (j->read_buffer_ready > 0)
+ mask |= POLLIN | POLLRDNORM; /* readable */
+ if (j->write_buffers_empty > 0)
+ mask |= POLLOUT | POLLWRNORM; /* writable */
+ if (j->ex.bytes)
+ mask |= POLLPRI;
+ return mask;
+}
+
+static int ixj_play_tone(IXJ *j, char tone)
+{
+ if (!j->tone_state) {
+ if(ixjdebug & 0x0002) {
+ printk("IXJ %d starting tone %d at %ld\n", j->board, tone, jiffies);
+ }
+ if (j->dsp.low == 0x20) {
+ idle(j);
+ }
+ j->tone_start_jif = jiffies;
+
+ j->tone_state = 1;
+ }
+
+ j->tone_index = tone;
+ if (ixj_WriteDSPCommand(0x6000 + j->tone_index, j))
+ return -1;
+
+ return 0;
+}
+
+static int ixj_set_tone_on(unsigned short arg, IXJ *j)
+{
+ j->tone_on_time = arg;
+
+ if (ixj_WriteDSPCommand(0x6E04, j)) /* Set Tone On Period */
+
+ return -1;
+
+ if (ixj_WriteDSPCommand(arg, j))
+ return -1;
+
+ return 0;
+}
+
+static int SCI_WaitHighSCI(IXJ *j)
+{
+ int cnt;
+
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ if (!j->pld_scrr.bits.sci) {
+ for (cnt = 0; cnt < 10; cnt++) {
+ udelay(32);
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+
+ if ((j->pld_scrr.bits.sci))
+ return 1;
+ }
+ if (ixjdebug & 0x0001)
+ printk(KERN_INFO "SCI Wait High failed %x\n", j->pld_scrr.byte);
+ return 0;
+ } else
+ return 1;
+}
+
+static int SCI_WaitLowSCI(IXJ *j)
+{
+ int cnt;
+
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ if (j->pld_scrr.bits.sci) {
+ for (cnt = 0; cnt < 10; cnt++) {
+ udelay(32);
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+
+ if (!(j->pld_scrr.bits.sci))
+ return 1;
+ }
+ if (ixjdebug & 0x0001)
+ printk(KERN_INFO "SCI Wait Low failed %x\n", j->pld_scrr.byte);
+ return 0;
+ } else
+ return 1;
+}
+
+static int SCI_Control(IXJ *j, int control)
+{
+ switch (control) {
+ case SCI_End:
+ j->pld_scrw.bits.c0 = 0; /* Set PLD Serial control interface */
+
+ j->pld_scrw.bits.c1 = 0; /* to no selection */
+
+ break;
+ case SCI_Enable_DAA:
+ j->pld_scrw.bits.c0 = 1; /* Set PLD Serial control interface */
+
+ j->pld_scrw.bits.c1 = 0; /* to write to DAA */
+
+ break;
+ case SCI_Enable_Mixer:
+ j->pld_scrw.bits.c0 = 0; /* Set PLD Serial control interface */
+
+ j->pld_scrw.bits.c1 = 1; /* to write to mixer */
+
+ break;
+ case SCI_Enable_EEPROM:
+ j->pld_scrw.bits.c0 = 1; /* Set PLD Serial control interface */
+
+ j->pld_scrw.bits.c1 = 1; /* to write to EEPROM */
+
+ break;
+ default:
+ return 0;
+ break;
+ }
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+
+ switch (control) {
+ case SCI_End:
+ return 1;
+ break;
+ case SCI_Enable_DAA:
+ case SCI_Enable_Mixer:
+ case SCI_Enable_EEPROM:
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+ break;
+ default:
+ return 0;
+ break;
+ }
+ return 1;
+}
+
+static int SCI_Prepare(IXJ *j)
+{
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ return 1;
+}
+
+static int ixj_get_mixer(long val, IXJ *j)
+{
+ int reg = (val & 0x1F00) >> 8;
+ return j->mix.vol[reg];
+}
+
+static int ixj_mixer(long val, IXJ *j)
+{
+ BYTES bytes;
+
+ bytes.high = (val & 0x1F00) >> 8;
+ bytes.low = val & 0x00FF;
+
+ /* save mixer value so we can get back later on */
+ j->mix.vol[bytes.high] = bytes.low;
+
+ outb_p(bytes.high & 0x1F, j->XILINXbase + 0x03); /* Load Mixer Address */
+
+ outb_p(bytes.low, j->XILINXbase + 0x02); /* Load Mixer Data */
+
+ SCI_Control(j, SCI_Enable_Mixer);
+
+ SCI_Control(j, SCI_End);
+
+ return 0;
+}
+
+static int daa_load(BYTES * p_bytes, IXJ *j)
+{
+ outb_p(p_bytes->high, j->XILINXbase + 0x03);
+ outb_p(p_bytes->low, j->XILINXbase + 0x02);
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+ else
+ return 1;
+}
+
+static int ixj_daa_cr4(IXJ *j, char reg)
+{
+ BYTES bytes;
+
+ switch (j->daa_mode) {
+ case SOP_PU_SLEEP:
+ bytes.high = 0x14;
+ break;
+ case SOP_PU_RINGING:
+ bytes.high = 0x54;
+ break;
+ case SOP_PU_CONVERSATION:
+ bytes.high = 0x94;
+ break;
+ case SOP_PU_PULSEDIALING:
+ bytes.high = 0xD4;
+ break;
+ }
+
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = reg;
+
+ switch (j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGX) {
+ case 0:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 0;
+ break;
+ case 1:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 2;
+ break;
+ case 2:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 1;
+ break;
+ case 3:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 3;
+ break;
+ }
+
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg;
+
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ return 1;
+}
+
+static char daa_int_read(IXJ *j)
+{
+ BYTES bytes;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x38;
+ bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low != ALISDAA_ID_BYTE) {
+ if (ixjdebug & 0x0001)
+ printk("Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
+ return 0;
+ }
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.reg = bytes.high;
+
+ return 1;
+}
+
+static char daa_CR_read(IXJ *j, int cr)
+{
+ IXJ_WORD wdata;
+ BYTES bytes;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ switch (j->daa_mode) {
+ case SOP_PU_SLEEP:
+ bytes.high = 0x30 + cr;
+ break;
+ case SOP_PU_RINGING:
+ bytes.high = 0x70 + cr;
+ break;
+ case SOP_PU_CONVERSATION:
+ bytes.high = 0xB0 + cr;
+ break;
+ case SOP_PU_PULSEDIALING:
+ default:
+ bytes.high = 0xF0 + cr;
+ break;
+ }
+
+ bytes.low = 0x00;
+
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low != ALISDAA_ID_BYTE) {
+ if (ixjdebug & 0x0001)
+ printk("Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
+ return 0;
+ }
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ wdata.word = inw_p(j->XILINXbase + 0x02);
+
+ switch(cr){
+ case 5:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr5.reg = wdata.bytes.high;
+ break;
+ case 4:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = wdata.bytes.high;
+ break;
+ case 3:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = wdata.bytes.high;
+ break;
+ case 2:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = wdata.bytes.high;
+ break;
+ case 1:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = wdata.bytes.high;
+ break;
+ case 0:
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = wdata.bytes.high;
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+static int ixj_daa_cid_reset(IXJ *j)
+{
+ int i;
+ BYTES bytes;
+
+ if (ixjdebug & 0x0002)
+ printk("DAA Clearing CID ram\n");
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x58;
+ bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+
+ for (i = 0; i < ALISDAA_CALLERID_SIZE - 1; i += 2) {
+ bytes.high = bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+
+ if (i < ALISDAA_CALLERID_SIZE - 1)
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+
+ }
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ if (ixjdebug & 0x0002)
+ printk("DAA CID ram cleared\n");
+
+ return 1;
+}
+
+static int ixj_daa_cid_read(IXJ *j)
+{
+ int i;
+ BYTES bytes;
+ char CID[ALISDAA_CALLERID_SIZE];
+ bool mContinue;
+ char *pIn, *pOut;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x78;
+ bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low != ALISDAA_ID_BYTE) {
+ if (ixjdebug & 0x0001)
+ printk("DAA Get Version Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
+ return 0;
+ }
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i += 2) {
+ bytes.high = bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_WaitHighSCI(j))
+ return 0;
+
+ CID[i + 0] = inb_p(j->XILINXbase + 0x03);
+ CID[i + 1] = inb_p(j->XILINXbase + 0x02);
+ }
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ pIn = CID;
+ pOut = j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID;
+ mContinue = true;
+ while (mContinue) {
+ if ((pIn[1] & 0x03) == 0x01) {
+ pOut[0] = pIn[0];
+ }
+ if ((pIn[2] & 0x0c) == 0x04) {
+ pOut[1] = ((pIn[2] & 0x03) << 6) | ((pIn[1] & 0xfc) >> 2);
+ }
+ if ((pIn[3] & 0x30) == 0x10) {
+ pOut[2] = ((pIn[3] & 0x0f) << 4) | ((pIn[2] & 0xf0) >> 4);
+ }
+ if ((pIn[4] & 0xc0) == 0x40) {
+ pOut[3] = ((pIn[4] & 0x3f) << 2) | ((pIn[3] & 0xc0) >> 6);
+ } else {
+ mContinue = false;
+ }
+ pIn += 5, pOut += 4;
+ }
+ memset(&j->cid, 0, sizeof(PHONE_CID));
+ pOut = j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID;
+ pOut += 4;
+ strncpy(j->cid.month, pOut, 2);
+ pOut += 2;
+ strncpy(j->cid.day, pOut, 2);
+ pOut += 2;
+ strncpy(j->cid.hour, pOut, 2);
+ pOut += 2;
+ strncpy(j->cid.min, pOut, 2);
+ pOut += 3;
+ j->cid.numlen = *pOut;
+ pOut += 1;
+ strncpy(j->cid.number, pOut, j->cid.numlen);
+ pOut += j->cid.numlen + 1;
+ j->cid.namelen = *pOut;
+ pOut += 1;
+ strncpy(j->cid.name, pOut, j->cid.namelen);
+
+ ixj_daa_cid_reset(j);
+ return 1;
+}
+
+static char daa_get_version(IXJ *j)
+{
+ BYTES bytes;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x35;
+ bytes.low = 0x00;
+ outb_p(bytes.high, j->XILINXbase + 0x03);
+ outb_p(bytes.low, j->XILINXbase + 0x02);
+
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low != ALISDAA_ID_BYTE) {
+ if (ixjdebug & 0x0001)
+ printk("DAA Get Version Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
+ return 0;
+ }
+ if (!SCI_Control(j, SCI_Enable_DAA))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ bytes.high = inb_p(j->XILINXbase + 0x03);
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (ixjdebug & 0x0002)
+ printk("DAA CR5 Byte high = 0x%x low = 0x%x\n", bytes.high, bytes.low);
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr5.reg = bytes.high;
+ return bytes.high;
+}
+
+static int daa_set_mode(IXJ *j, int mode)
+{
+ /* NOTE:
+ The DAA *MUST* be in the conversation mode if the
+ PSTN line is to be seized (PSTN line off-hook).
+ Taking the PSTN line off-hook while the DAA is in
+ a mode other than conversation mode will cause a
+ hardware failure of the ALIS-A part.
+
+ NOTE:
+ The DAA can only go to SLEEP, RINGING or PULSEDIALING modes
+ if the PSTN line is on-hook. Failure to have the PSTN line
+ in the on-hook state WILL CAUSE A HARDWARE FAILURE OF THE
+ ALIS-A part.
+ */
+
+ BYTES bytes;
+
+ j->flags.pstn_rmr = 0;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ switch (mode) {
+ case SOP_PU_RESET:
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0x10;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+
+ j->daa_mode = SOP_PU_SLEEP;
+ break;
+ case SOP_PU_SLEEP:
+ if(j->daa_mode == SOP_PU_SLEEP)
+ {
+ break;
+ }
+ if (ixjdebug & 0x0008)
+ printk(KERN_INFO "phone DAA: SOP_PU_SLEEP at %ld\n", jiffies);
+/* if(j->daa_mode == SOP_PU_CONVERSATION) */
+ {
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0x10;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+ }
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0x10;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+
+ j->daa_mode = SOP_PU_SLEEP;
+ j->flags.pstn_ringing = 0;
+ j->ex.bits.pstn_ring = 0;
+ j->pstn_sleeptil = jiffies + (hertz / 4);
+ wake_up_interruptible(&j->read_q); /* Wake any blocked readers */
+ wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
+ wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
+ break;
+ case SOP_PU_RINGING:
+ if (ixjdebug & 0x0008)
+ printk(KERN_INFO "phone DAA: SOP_PU_RINGING at %ld\n", jiffies);
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0x50;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+ j->daa_mode = SOP_PU_RINGING;
+ break;
+ case SOP_PU_CONVERSATION:
+ if (ixjdebug & 0x0008)
+ printk(KERN_INFO "phone DAA: SOP_PU_CONVERSATION at %ld\n", jiffies);
+ bytes.high = 0x90;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+ j->pld_slicw.bits.rly2 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->pld_scrw.bits.daafsyncen = 1; /* Turn on DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->daa_mode = SOP_PU_CONVERSATION;
+ j->flags.pstn_ringing = 0;
+ j->ex.bits.pstn_ring = 0;
+ j->pstn_sleeptil = jiffies;
+ j->pstn_ring_start = j->pstn_ring_stop = j->pstn_ring_int = 0;
+ break;
+ case SOP_PU_PULSEDIALING:
+ if (ixjdebug & 0x0008)
+ printk(KERN_INFO "phone DAA: SOP_PU_PULSEDIALING at %ld\n", jiffies);
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly2 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ bytes.high = 0xD0;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ daa_load(&bytes, j);
+ if (!SCI_Prepare(j))
+ return 0;
+ j->daa_mode = SOP_PU_PULSEDIALING;
+ break;
+ default:
+ break;
+ }
+ return 1;
+}
+
+static int ixj_daa_write(IXJ *j)
+{
+ BYTES bytes;
+
+ j->flags.pstncheck = 1;
+
+ daa_set_mode(j, SOP_PU_SLEEP);
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+
+ bytes.high = 0x14;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg;
+ bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x1F;
+ bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.XOP_xr6_W.reg;
+ bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg;
+ bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg;
+ bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.XOP_xr0_W.reg;
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Prepare(j))
+ return 0;
+
+ bytes.high = 0x00;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x01;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x02;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x03;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x04;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x05;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x06;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x07;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x08;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x09;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0A;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0B;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0C;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0D;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0E;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+ if (!SCI_WaitLowSCI(j))
+ return 0;
+
+ bytes.high = 0x0F;
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2];
+ bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1];
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0];
+ bytes.low = 0x00;
+ if (!daa_load(&bytes, j))
+ return 0;
+
+ udelay(32);
+ j->pld_scrr.byte = inb_p(j->XILINXbase);
+ if (!SCI_Control(j, SCI_End))
+ return 0;
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+
+ if (ixjdebug & 0x0002)
+ printk("DAA Coefficients Loaded\n");
+
+ j->flags.pstncheck = 0;
+ return 1;
+}
+
+static int ixj_set_tone_off(unsigned short arg, IXJ *j)
+{
+ j->tone_off_time = arg;
+ if (ixj_WriteDSPCommand(0x6E05, j)) /* Set Tone Off Period */
+
+ return -1;
+ if (ixj_WriteDSPCommand(arg, j))
+ return -1;
+ return 0;
+}
+
+static int ixj_get_tone_on(IXJ *j)
+{
+ if (ixj_WriteDSPCommand(0x6E06, j)) /* Get Tone On Period */
+
+ return -1;
+ return 0;
+}
+
+static int ixj_get_tone_off(IXJ *j)
+{
+ if (ixj_WriteDSPCommand(0x6E07, j)) /* Get Tone Off Period */
+
+ return -1;
+ return 0;
+}
+
+static void ixj_busytone(IXJ *j)
+{
+ j->flags.ringback = 0;
+ j->flags.dialtone = 0;
+ j->flags.busytone = 1;
+ ixj_set_tone_on(0x07D0, j);
+ ixj_set_tone_off(0x07D0, j);
+ ixj_play_tone(j, 27);
+}
+
+static void ixj_dialtone(IXJ *j)
+{
+ j->flags.ringback = 0;
+ j->flags.dialtone = 1;
+ j->flags.busytone = 0;
+ if (j->dsp.low == 0x20) {
+ return;
+ } else {
+ ixj_set_tone_on(0xFFFF, j);
+ ixj_set_tone_off(0x0000, j);
+ ixj_play_tone(j, 25);
+ }
+}
+
+static void ixj_cpt_stop(IXJ *j)
+{
+ if(j->tone_state || j->tone_cadence_state)
+ {
+ j->flags.dialtone = 0;
+ j->flags.busytone = 0;
+ j->flags.ringback = 0;
+ ixj_set_tone_on(0x0001, j);
+ ixj_set_tone_off(0x0000, j);
+ ixj_play_tone(j, 0);
+ j->tone_state = j->tone_cadence_state = 0;
+ if (j->cadence_t) {
+ kfree(j->cadence_t->ce);
+ kfree(j->cadence_t);
+ j->cadence_t = NULL;
+ }
+ }
+ if (j->play_mode == -1 && j->rec_mode == -1)
+ idle(j);
+ if (j->play_mode != -1 && j->dsp.low == 0x20)
+ ixj_play_start(j);
+ if (j->rec_mode != -1 && j->dsp.low == 0x20)
+ ixj_record_start(j);
+}
+
+static void ixj_ringback(IXJ *j)
+{
+ j->flags.busytone = 0;
+ j->flags.dialtone = 0;
+ j->flags.ringback = 1;
+ ixj_set_tone_on(0x0FA0, j);
+ ixj_set_tone_off(0x2EE0, j);
+ ixj_play_tone(j, 26);
+}
+
+static void ixj_testram(IXJ *j)
+{
+ ixj_WriteDSPCommand(0x3001, j); /* Test External SRAM */
+}
+
+static int ixj_build_cadence(IXJ *j, IXJ_CADENCE __user * cp)
+{
+ ixj_cadence *lcp;
+ IXJ_CADENCE_ELEMENT __user *cep;
+ IXJ_CADENCE_ELEMENT *lcep;
+ IXJ_TONE ti;
+ int err;
+
+ lcp = kmalloc(sizeof(ixj_cadence), GFP_KERNEL);
+ if (lcp == NULL)
+ return -ENOMEM;
+
+ err = -EFAULT;
+ if (copy_from_user(&lcp->elements_used,
+ &cp->elements_used, sizeof(int)))
+ goto out;
+ if (copy_from_user(&lcp->termination,
+ &cp->termination, sizeof(IXJ_CADENCE_TERM)))
+ goto out;
+ if (get_user(cep, &cp->ce))
+ goto out;
+
+ err = -EINVAL;
+ if ((unsigned)lcp->elements_used >= ~0U/sizeof(IXJ_CADENCE_ELEMENT))
+ goto out;
+
+ err = -ENOMEM;
+ lcep = kmalloc(sizeof(IXJ_CADENCE_ELEMENT) * lcp->elements_used, GFP_KERNEL);
+ if (!lcep)
+ goto out;
+
+ err = -EFAULT;
+ if (copy_from_user(lcep, cep, sizeof(IXJ_CADENCE_ELEMENT) * lcp->elements_used))
+ goto out1;
+
+ if (j->cadence_t) {
+ kfree(j->cadence_t->ce);
+ kfree(j->cadence_t);
+ }
+ lcp->ce = (void *) lcep;
+ j->cadence_t = lcp;
+ j->tone_cadence_state = 0;
+ ixj_set_tone_on(lcp->ce[0].tone_on_time, j);
+ ixj_set_tone_off(lcp->ce[0].tone_off_time, j);
+ if (j->cadence_t->ce[j->tone_cadence_state].freq0) {
+ ti.tone_index = j->cadence_t->ce[j->tone_cadence_state].index;
+ ti.freq0 = j->cadence_t->ce[j->tone_cadence_state].freq0;
+ ti.gain0 = j->cadence_t->ce[j->tone_cadence_state].gain0;
+ ti.freq1 = j->cadence_t->ce[j->tone_cadence_state].freq1;
+ ti.gain1 = j->cadence_t->ce[j->tone_cadence_state].gain1;
+ ixj_init_tone(j, &ti);
+ }
+ ixj_play_tone(j, lcp->ce[0].index);
+ return 1;
+out1:
+ kfree(lcep);
+out:
+ kfree(lcp);
+ return err;
+}
+
+static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp)
+{
+ IXJ_FILTER_CADENCE *lcp;
+ lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
+ if (IS_ERR(lcp)) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n");
+ }
+ return PTR_ERR(lcp);
+ }
+ if (lcp->filter > 5) {
+ if(ixjdebug & 0x0001) {
+ printk(KERN_INFO "Cadence out of range\n");
+ }
+ kfree(lcp);
+ return -1;
+ }
+ j->cadence_f[lcp->filter].state = 0;
+ j->cadence_f[lcp->filter].enable = lcp->enable;
+ j->filter_en[lcp->filter] = j->cadence_f[lcp->filter].en_filter = lcp->en_filter;
+ j->cadence_f[lcp->filter].on1 = lcp->on1;
+ j->cadence_f[lcp->filter].on1min = 0;
+ j->cadence_f[lcp->filter].on1max = 0;
+ j->cadence_f[lcp->filter].off1 = lcp->off1;
+ j->cadence_f[lcp->filter].off1min = 0;
+ j->cadence_f[lcp->filter].off1max = 0;
+ j->cadence_f[lcp->filter].on2 = lcp->on2;
+ j->cadence_f[lcp->filter].on2min = 0;
+ j->cadence_f[lcp->filter].on2max = 0;
+ j->cadence_f[lcp->filter].off2 = lcp->off2;
+ j->cadence_f[lcp->filter].off2min = 0;
+ j->cadence_f[lcp->filter].off2max = 0;
+ j->cadence_f[lcp->filter].on3 = lcp->on3;
+ j->cadence_f[lcp->filter].on3min = 0;
+ j->cadence_f[lcp->filter].on3max = 0;
+ j->cadence_f[lcp->filter].off3 = lcp->off3;
+ j->cadence_f[lcp->filter].off3min = 0;
+ j->cadence_f[lcp->filter].off3max = 0;
+ if(ixjdebug & 0x0002) {
+ printk(KERN_INFO "Cadence %d loaded\n", lcp->filter);
+ }
+ kfree(lcp);
+ return 0;
+}
+
+static void add_caps(IXJ *j)
+{
+ j->caps = 0;
+ j->caplist[j->caps].cap = PHONE_VENDOR_QUICKNET;
+ strcpy(j->caplist[j->caps].desc, "Quicknet Technologies, Inc. (www.quicknet.net)");
+ j->caplist[j->caps].captype = vendor;
+ j->caplist[j->caps].handle = j->caps++;
+ j->caplist[j->caps].captype = device;
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneJACK");
+ break;
+ case QTI_LINEJACK:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet LineJACK");
+ break;
+ case QTI_PHONEJACK_LITE:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneJACK Lite");
+ break;
+ case QTI_PHONEJACK_PCI:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneJACK PCI");
+ break;
+ case QTI_PHONECARD:
+ strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneCARD");
+ break;
+ }
+ j->caplist[j->caps].cap = j->cardtype;
+ j->caplist[j->caps].handle = j->caps++;
+ strcpy(j->caplist[j->caps].desc, "POTS");
+ j->caplist[j->caps].captype = port;
+ j->caplist[j->caps].cap = pots;
+ j->caplist[j->caps].handle = j->caps++;
+
+ /* add devices that can do speaker/mic */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ case QTI_LINEJACK:
+ case QTI_PHONEJACK_PCI:
+ case QTI_PHONECARD:
+ strcpy(j->caplist[j->caps].desc, "SPEAKER");
+ j->caplist[j->caps].captype = port;
+ j->caplist[j->caps].cap = speaker;
+ j->caplist[j->caps].handle = j->caps++;
+ default:
+ break;
+ }
+
+ /* add devices that can do handset */
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ strcpy(j->caplist[j->caps].desc, "HANDSET");
+ j->caplist[j->caps].captype = port;
+ j->caplist[j->caps].cap = handset;
+ j->caplist[j->caps].handle = j->caps++;
+ break;
+ default:
+ break;
+ }
+
+ /* add devices that can do PSTN */
+ switch (j->cardtype) {
+ case QTI_LINEJACK:
+ strcpy(j->caplist[j->caps].desc, "PSTN");
+ j->caplist[j->caps].captype = port;
+ j->caplist[j->caps].cap = pstn;
+ j->caplist[j->caps].handle = j->caps++;
+ break;
+ default:
+ break;
+ }
+
+ /* add codecs - all cards can do uLaw, linear 8/16, and Windows sound system */
+ strcpy(j->caplist[j->caps].desc, "ULAW");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = ULAW;
+ j->caplist[j->caps].handle = j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "LINEAR 16 bit");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = LINEAR16;
+ j->caplist[j->caps].handle = j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "LINEAR 8 bit");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = LINEAR8;
+ j->caplist[j->caps].handle = j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "Windows Sound System");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = WSS;
+ j->caplist[j->caps].handle = j->caps++;
+
+ /* software ALAW codec, made from ULAW */
+ strcpy(j->caplist[j->caps].desc, "ALAW");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = ALAW;
+ j->caplist[j->caps].handle = j->caps++;
+
+ /* version 12 of the 8020 does the following codecs in a broken way */
+ if (j->dsp.low != 0x20 || j->ver.low != 0x12) {
+ strcpy(j->caplist[j->caps].desc, "G.723.1 6.3kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G723_63;
+ j->caplist[j->caps].handle = j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "G.723.1 5.3kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G723_53;
+ j->caplist[j->caps].handle = j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "TrueSpeech 4.8kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = TS48;
+ j->caplist[j->caps].handle = j->caps++;
+
+ strcpy(j->caplist[j->caps].desc, "TrueSpeech 4.1kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = TS41;
+ j->caplist[j->caps].handle = j->caps++;
+ }
+
+ /* 8020 chips can do TS8.5 native, and 8021/8022 can load it */
+ if (j->dsp.low == 0x20 || j->flags.ts85_loaded) {
+ strcpy(j->caplist[j->caps].desc, "TrueSpeech 8.5kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = TS85;
+ j->caplist[j->caps].handle = j->caps++;
+ }
+
+ /* 8021 chips can do G728 */
+ if (j->dsp.low == 0x21) {
+ strcpy(j->caplist[j->caps].desc, "G.728 16kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G728;
+ j->caplist[j->caps].handle = j->caps++;
+ }
+
+ /* 8021/8022 chips can do G729 if loaded */
+ if (j->dsp.low != 0x20 && j->flags.g729_loaded) {
+ strcpy(j->caplist[j->caps].desc, "G.729A 8kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G729;
+ j->caplist[j->caps].handle = j->caps++;
+ }
+ if (j->dsp.low != 0x20 && j->flags.g729_loaded) {
+ strcpy(j->caplist[j->caps].desc, "G.729B 8kbps");
+ j->caplist[j->caps].captype = codec;
+ j->caplist[j->caps].cap = G729B;
+ j->caplist[j->caps].handle = j->caps++;
+ }
+}
+
+static int capabilities_check(IXJ *j, struct phone_capability *pcreq)
+{
+ int cnt;
+ int retval = 0;
+ for (cnt = 0; cnt < j->caps; cnt++) {
+ if (pcreq->captype == j->caplist[cnt].captype
+ && pcreq->cap == j->caplist[cnt].cap) {
+ retval = 1;
+ break;
+ }
+ }
+ return retval;
+}
+
+static long do_ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg)
+{
+ IXJ_TONE ti;
+ IXJ_FILTER jf;
+ IXJ_FILTER_RAW jfr;
+ void __user *argp = (void __user *)arg;
+ struct inode *inode = file_p->f_path.dentry->d_inode;
+ unsigned int minor = iminor(inode);
+ unsigned int raise, mant;
+ int board = NUM(inode);
+
+ IXJ *j = get_ixj(NUM(inode));
+
+ int retval = 0;
+
+ /*
+ * Set up locks to ensure that only one process is talking to the DSP at a time.
+ * This is necessary to keep the DSP from locking up.
+ */
+ while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
+ schedule_timeout_interruptible(1);
+ if (ixjdebug & 0x0040)
+ printk("phone%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
+ if (minor >= IXJMAX) {
+ clear_bit(board, &j->busyflags);
+ return -ENODEV;
+ }
+ /*
+ * Check ioctls only root can use.
+ */
+ if (!capable(CAP_SYS_ADMIN)) {
+ switch (cmd) {
+ case IXJCTL_TESTRAM:
+ case IXJCTL_HZ:
+ retval = -EPERM;
+ }
+ }
+ switch (cmd) {
+ case IXJCTL_TESTRAM:
+ ixj_testram(j);
+ retval = (j->ssr.high << 8) + j->ssr.low;
+ break;
+ case IXJCTL_CARDTYPE:
+ retval = j->cardtype;
+ break;
+ case IXJCTL_SERIAL:
+ retval = j->serial;
+ break;
+ case IXJCTL_VERSION:
+ {
+ char arg_str[100];
+ snprintf(arg_str, sizeof(arg_str),
+ "\nDriver version %i.%i.%i", IXJ_VER_MAJOR,
+ IXJ_VER_MINOR, IXJ_BLD_VER);
+ if (copy_to_user(argp, arg_str, strlen(arg_str)))
+ retval = -EFAULT;
+ }
+ break;
+ case PHONE_RING_CADENCE:
+ j->ring_cadence = arg;
+ break;
+ case IXJCTL_CIDCW:
+ if(arg) {
+ if (copy_from_user(&j->cid_send, argp, sizeof(PHONE_CID))) {
+ retval = -EFAULT;
+ break;
+ }
+ } else {
+ memset(&j->cid_send, 0, sizeof(PHONE_CID));
+ }
+ ixj_write_cidcw(j);
+ break;
+ /* Binary compatbility */
+ case OLD_PHONE_RING_START:
+ arg = 0;
+ /* Fall through */
+ case PHONE_RING_START:
+ if(arg) {
+ if (copy_from_user(&j->cid_send, argp, sizeof(PHONE_CID))) {
+ retval = -EFAULT;
+ break;
+ }
+ ixj_write_cid(j);
+ } else {
+ memset(&j->cid_send, 0, sizeof(PHONE_CID));
+ }
+ ixj_ring_start(j);
+ break;
+ case PHONE_RING_STOP:
+ j->flags.cringing = 0;
+ if(j->cadence_f[5].enable) {
+ j->cadence_f[5].state = 0;
+ }
+ ixj_ring_off(j);
+ break;
+ case PHONE_RING:
+ retval = ixj_ring(j);
+ break;
+ case PHONE_EXCEPTION:
+ retval = j->ex.bytes;
+ if(j->ex.bits.flash) {
+ j->flash_end = 0;
+ j->ex.bits.flash = 0;
+ }
+ j->ex.bits.pstn_ring = 0;
+ j->ex.bits.caller_id = 0;
+ j->ex.bits.pstn_wink = 0;
+ j->ex.bits.f0 = 0;
+ j->ex.bits.f1 = 0;
+ j->ex.bits.f2 = 0;
+ j->ex.bits.f3 = 0;
+ j->ex.bits.fc0 = 0;
+ j->ex.bits.fc1 = 0;
+ j->ex.bits.fc2 = 0;
+ j->ex.bits.fc3 = 0;
+ j->ex.bits.reserved = 0;
+ break;
+ case PHONE_HOOKSTATE:
+ j->ex.bits.hookstate = 0;
+ retval = j->hookstate; //j->r_hook;
+ break;
+ case IXJCTL_SET_LED:
+ LED_SetState(arg, j);
+ break;
+ case PHONE_FRAME:
+ retval = set_base_frame(j, arg);
+ break;
+ case PHONE_REC_CODEC:
+ retval = set_rec_codec(j, arg);
+ break;
+ case PHONE_VAD:
+ ixj_vad(j, arg);
+ break;
+ case PHONE_REC_START:
+ ixj_record_start(j);
+ break;
+ case PHONE_REC_STOP:
+ ixj_record_stop(j);
+ break;
+ case PHONE_REC_DEPTH:
+ set_rec_depth(j, arg);
+ break;
+ case PHONE_REC_VOLUME:
+ if(arg == -1) {
+ retval = get_rec_volume(j);
+ }
+ else {
+ set_rec_volume(j, arg);
+ retval = arg;
+ }
+ break;
+ case PHONE_REC_VOLUME_LINEAR:
+ if(arg == -1) {
+ retval = get_rec_volume_linear(j);
+ }
+ else {
+ set_rec_volume_linear(j, arg);
+ retval = arg;
+ }
+ break;
+ case IXJCTL_DTMF_PRESCALE:
+ if(arg == -1) {
+ retval = get_dtmf_prescale(j);
+ }
+ else {
+ set_dtmf_prescale(j, arg);
+ retval = arg;
+ }
+ break;
+ case PHONE_REC_LEVEL:
+ retval = get_rec_level(j);
+ break;
+ case IXJCTL_SC_RXG:
+ retval = ixj_siadc(j, arg);
+ break;
+ case IXJCTL_SC_TXG:
+ retval = ixj_sidac(j, arg);
+ break;
+ case IXJCTL_AEC_START:
+ ixj_aec_start(j, arg);
+ break;
+ case IXJCTL_AEC_STOP:
+ aec_stop(j);
+ break;
+ case IXJCTL_AEC_GET_LEVEL:
+ retval = j->aec_level;
+ break;
+ case PHONE_PLAY_CODEC:
+ retval = set_play_codec(j, arg);
+ break;
+ case PHONE_PLAY_START:
+ retval = ixj_play_start(j);
+ break;
+ case PHONE_PLAY_STOP:
+ ixj_play_stop(j);
+ break;
+ case PHONE_PLAY_DEPTH:
+ set_play_depth(j, arg);
+ break;
+ case PHONE_PLAY_VOLUME:
+ if(arg == -1) {
+ retval = get_play_volume(j);
+ }
+ else {
+ set_play_volume(j, arg);
+ retval = arg;
+ }
+ break;
+ case PHONE_PLAY_VOLUME_LINEAR:
+ if(arg == -1) {
+ retval = get_play_volume_linear(j);
+ }
+ else {
+ set_play_volume_linear(j, arg);
+ retval = arg;
+ }
+ break;
+ case PHONE_PLAY_LEVEL:
+ retval = get_play_level(j);
+ break;
+ case IXJCTL_DSP_TYPE:
+ retval = (j->dsp.high << 8) + j->dsp.low;
+ break;
+ case IXJCTL_DSP_VERSION:
+ retval = (j->ver.high << 8) + j->ver.low;
+ break;
+ case IXJCTL_HZ:
+ hertz = arg;
+ break;
+ case IXJCTL_RATE:
+ if (arg > hertz)
+ retval = -1;
+ else
+ samplerate = arg;
+ break;
+ case IXJCTL_DRYBUFFER_READ:
+ put_user(j->drybuffer, (unsigned long __user *) argp);
+ break;
+ case IXJCTL_DRYBUFFER_CLEAR:
+ j->drybuffer = 0;
+ break;
+ case IXJCTL_FRAMES_READ:
+ put_user(j->framesread, (unsigned long __user *) argp);
+ break;
+ case IXJCTL_FRAMES_WRITTEN:
+ put_user(j->frameswritten, (unsigned long __user *) argp);
+ break;
+ case IXJCTL_READ_WAIT:
+ put_user(j->read_wait, (unsigned long __user *) argp);
+ break;
+ case IXJCTL_WRITE_WAIT:
+ put_user(j->write_wait, (unsigned long __user *) argp);
+ break;
+ case PHONE_MAXRINGS:
+ j->maxrings = arg;
+ break;
+ case PHONE_SET_TONE_ON_TIME:
+ ixj_set_tone_on(arg, j);
+ break;
+ case PHONE_SET_TONE_OFF_TIME:
+ ixj_set_tone_off(arg, j);
+ break;
+ case PHONE_GET_TONE_ON_TIME:
+ if (ixj_get_tone_on(j)) {
+ retval = -1;
+ } else {
+ retval = (j->ssr.high << 8) + j->ssr.low;
+ }
+ break;
+ case PHONE_GET_TONE_OFF_TIME:
+ if (ixj_get_tone_off(j)) {
+ retval = -1;
+ } else {
+ retval = (j->ssr.high << 8) + j->ssr.low;
+ }
+ break;
+ case PHONE_PLAY_TONE:
+ if (!j->tone_state)
+ retval = ixj_play_tone(j, arg);
+ else
+ retval = -1;
+ break;
+ case PHONE_GET_TONE_STATE:
+ retval = j->tone_state;
+ break;
+ case PHONE_DTMF_READY:
+ retval = j->ex.bits.dtmf_ready;
+ break;
+ case PHONE_GET_DTMF:
+ if (ixj_hookstate(j)) {
+ if (j->dtmf_rp != j->dtmf_wp) {
+ retval = j->dtmfbuffer[j->dtmf_rp];
+ j->dtmf_rp++;
+ if (j->dtmf_rp == 79)
+ j->dtmf_rp = 0;
+ if (j->dtmf_rp == j->dtmf_wp) {
+ j->ex.bits.dtmf_ready = j->dtmf_rp = j->dtmf_wp = 0;
+ }
+ }
+ }
+ break;
+ case PHONE_GET_DTMF_ASCII:
+ if (ixj_hookstate(j)) {
+ if (j->dtmf_rp != j->dtmf_wp) {
+ switch (j->dtmfbuffer[j->dtmf_rp]) {
+ case 10:
+ retval = 42; /* '*'; */
+
+ break;
+ case 11:
+ retval = 48; /*'0'; */
+
+ break;
+ case 12:
+ retval = 35; /*'#'; */
+
+ break;
+ case 28:
+ retval = 65; /*'A'; */
+
+ break;
+ case 29:
+ retval = 66; /*'B'; */
+
+ break;
+ case 30:
+ retval = 67; /*'C'; */
+
+ break;
+ case 31:
+ retval = 68; /*'D'; */
+
+ break;
+ default:
+ retval = 48 + j->dtmfbuffer[j->dtmf_rp];
+ break;
+ }
+ j->dtmf_rp++;
+ if (j->dtmf_rp == 79)
+ j->dtmf_rp = 0;
+ if(j->dtmf_rp == j->dtmf_wp)
+ {
+ j->ex.bits.dtmf_ready = j->dtmf_rp = j->dtmf_wp = 0;
+ }
+ }
+ }
+ break;
+ case PHONE_DTMF_OOB:
+ j->flags.dtmf_oob = arg;
+ break;
+ case PHONE_DIALTONE:
+ ixj_dialtone(j);
+ break;
+ case PHONE_BUSY:
+ ixj_busytone(j);
+ break;
+ case PHONE_RINGBACK:
+ ixj_ringback(j);
+ break;
+ case PHONE_WINK:
+ if(j->cardtype == QTI_PHONEJACK)
+ retval = -1;
+ else
+ retval = ixj_wink(j);
+ break;
+ case PHONE_CPT_STOP:
+ ixj_cpt_stop(j);
+ break;
+ case PHONE_QUERY_CODEC:
+ {
+ struct phone_codec_data pd;
+ int val;
+ int proto_size[] = {
+ -1,
+ 12, 10, 16, 9, 8, 48, 5,
+ 40, 40, 80, 40, 40, 6
+ };
+ if(copy_from_user(&pd, argp, sizeof(pd))) {
+ retval = -EFAULT;
+ break;
+ }
+ if(pd.type<1 || pd.type>13) {
+ retval = -EPROTONOSUPPORT;
+ break;
+ }
+ if(pd.type<G729)
+ val=proto_size[pd.type];
+ else switch(j->baseframe.low)
+ {
+ case 0xA0:val=2*proto_size[pd.type];break;
+ case 0x50:val=proto_size[pd.type];break;
+ default:val=proto_size[pd.type]*3;break;
+ }
+ pd.buf_min=pd.buf_max=pd.buf_opt=val;
+ if(copy_to_user(argp, &pd, sizeof(pd)))
+ retval = -EFAULT;
+ break;
+ }
+ case IXJCTL_DSP_IDLE:
+ idle(j);
+ break;
+ case IXJCTL_MIXER:
+ if ((arg & 0xff) == 0xff)
+ retval = ixj_get_mixer(arg, j);
+ else
+ ixj_mixer(arg, j);
+ break;
+ case IXJCTL_DAA_COEFF_SET:
+ switch (arg) {
+ case DAA_US:
+ DAA_Coeff_US(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_UK:
+ DAA_Coeff_UK(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_FRANCE:
+ DAA_Coeff_France(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_GERMANY:
+ DAA_Coeff_Germany(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_AUSTRALIA:
+ DAA_Coeff_Australia(j);
+ retval = ixj_daa_write(j);
+ break;
+ case DAA_JAPAN:
+ DAA_Coeff_Japan(j);
+ retval = ixj_daa_write(j);
+ break;
+ default:
+ retval = 1;
+ break;
+ }
+ break;
+ case IXJCTL_DAA_AGAIN:
+ ixj_daa_cr4(j, arg | 0x02);
+ break;
+ case IXJCTL_PSTN_LINETEST:
+ retval = ixj_linetest(j);
+ break;
+ case IXJCTL_VMWI:
+ ixj_write_vmwi(j, arg);
+ break;
+ case IXJCTL_CID:
+ if (copy_to_user(argp, &j->cid, sizeof(PHONE_CID)))
+ retval = -EFAULT;
+ j->ex.bits.caller_id = 0;
+ break;
+ case IXJCTL_WINK_DURATION:
+ j->winktime = arg;
+ break;
+ case IXJCTL_PORT:
+ if (arg)
+ retval = ixj_set_port(j, arg);
+ else
+ retval = j->port;
+ break;
+ case IXJCTL_POTS_PSTN:
+ retval = ixj_set_pots(j, arg);
+ break;
+ case PHONE_CAPABILITIES:
+ add_caps(j);
+ retval = j->caps;
+ break;
+ case PHONE_CAPABILITIES_LIST:
+ add_caps(j);
+ if (copy_to_user(argp, j->caplist, sizeof(struct phone_capability) * j->caps))
+ retval = -EFAULT;
+ break;
+ case PHONE_CAPABILITIES_CHECK:
+ {
+ struct phone_capability cap;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ retval = -EFAULT;
+ else {
+ add_caps(j);
+ retval = capabilities_check(j, &cap);
+ }
+ }
+ break;
+ case PHONE_PSTN_SET_STATE:
+ daa_set_mode(j, arg);
+ break;
+ case PHONE_PSTN_GET_STATE:
+ retval = j->daa_mode;
+ j->ex.bits.pstn_ring = 0;
+ break;
+ case IXJCTL_SET_FILTER:
+ if (copy_from_user(&jf, argp, sizeof(jf)))
+ retval = -EFAULT;
+ else
+ retval = ixj_init_filter(j, &jf);
+ break;
+ case IXJCTL_SET_FILTER_RAW:
+ if (copy_from_user(&jfr, argp, sizeof(jfr)))
+ retval = -EFAULT;
+ else
+ retval = ixj_init_filter_raw(j, &jfr);
+ break;
+ case IXJCTL_GET_FILTER_HIST:
+ if(arg<0||arg>3)
+ retval = -EINVAL;
+ else
+ retval = j->filter_hist[arg];
+ break;
+ case IXJCTL_INIT_TONE:
+ if (copy_from_user(&ti, argp, sizeof(ti)))
+ retval = -EFAULT;
+ else
+ retval = ixj_init_tone(j, &ti);
+ break;
+ case IXJCTL_TONE_CADENCE:
+ retval = ixj_build_cadence(j, argp);
+ break;
+ case IXJCTL_FILTER_CADENCE:
+ retval = ixj_build_filter_cadence(j, argp);
+ break;
+ case IXJCTL_SIGCTL:
+ if (copy_from_user(&j->sigdef, argp, sizeof(IXJ_SIGDEF))) {
+ retval = -EFAULT;
+ break;
+ }
+ j->ixj_signals[j->sigdef.event] = j->sigdef.signal;
+ if(j->sigdef.event < 33) {
+ raise = 1;
+ for(mant = 0; mant < j->sigdef.event; mant++){
+ raise *= 2;
+ }
+ if(j->sigdef.signal)
+ j->ex_sig.bytes |= raise;
+ else
+ j->ex_sig.bytes &= (raise^0xffff);
+ }
+ break;
+ case IXJCTL_INTERCOM_STOP:
+ if(arg < 0 || arg >= IXJMAX)
+ return -EINVAL;
+ j->intercom = -1;
+ ixj_record_stop(j);
+ ixj_play_stop(j);
+ idle(j);
+ get_ixj(arg)->intercom = -1;
+ ixj_record_stop(get_ixj(arg));
+ ixj_play_stop(get_ixj(arg));
+ idle(get_ixj(arg));
+ break;
+ case IXJCTL_INTERCOM_START:
+ if(arg < 0 || arg >= IXJMAX)
+ return -EINVAL;
+ j->intercom = arg;
+ ixj_record_start(j);
+ ixj_play_start(j);
+ get_ixj(arg)->intercom = board;
+ ixj_play_start(get_ixj(arg));
+ ixj_record_start(get_ixj(arg));
+ break;
+ }
+ if (ixjdebug & 0x0040)
+ printk("phone%d ioctl end, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
+ clear_bit(board, &j->busyflags);
+ return retval;
+}
+
+static long ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ mutex_lock(&ixj_mutex);
+ ret = do_ixj_ioctl(file_p, cmd, arg);
+ mutex_unlock(&ixj_mutex);
+ return ret;
+}
+
+static int ixj_fasync(int fd, struct file *file_p, int mode)
+{
+ IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
+
+ return fasync_helper(fd, file_p, mode, &j->async_queue);
+}
+
+static const struct file_operations ixj_fops =
+{
+ .owner = THIS_MODULE,
+ .read = ixj_enhanced_read,
+ .write = ixj_enhanced_write,
+ .poll = ixj_poll,
+ .unlocked_ioctl = ixj_ioctl,
+ .release = ixj_release,
+ .fasync = ixj_fasync,
+ .llseek = default_llseek,
+};
+
+static int ixj_linetest(IXJ *j)
+{
+ j->flags.pstncheck = 1; /* Testing */
+ j->flags.pstn_present = 0; /* Assume the line is not there */
+
+ daa_int_read(j); /*Clear DAA Interrupt flags */
+ /* */
+ /* Hold all relays in the normally de-energized position. */
+ /* */
+
+ j->pld_slicw.bits.rly1 = 0;
+ j->pld_slicw.bits.rly2 = 0;
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicr.byte = inb_p(j->XILINXbase + 0x01);
+ if (j->pld_slicr.bits.potspstn) {
+ j->flags.pots_pstn = 1;
+ j->flags.pots_correct = 0;
+ LED_SetState(0x4, j);
+ } else {
+ j->flags.pots_pstn = 0;
+ j->pld_slicw.bits.rly1 = 0;
+ j->pld_slicw.bits.rly2 = 0;
+ j->pld_slicw.bits.rly3 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ daa_set_mode(j, SOP_PU_CONVERSATION);
+ msleep(1000);
+ daa_int_read(j);
+ daa_set_mode(j, SOP_PU_RESET);
+ if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
+ j->flags.pots_correct = 0; /* Should not be line voltage on POTS port. */
+ LED_SetState(0x4, j);
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ } else {
+ j->flags.pots_correct = 1;
+ LED_SetState(0x8, j);
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.rly2 = 0;
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ }
+ }
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ daa_set_mode(j, SOP_PU_CONVERSATION);
+ msleep(1000);
+ daa_int_read(j);
+ daa_set_mode(j, SOP_PU_RESET);
+ if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
+ j->pstn_sleeptil = jiffies + (hertz / 4);
+ j->flags.pstn_present = 1;
+ } else {
+ j->flags.pstn_present = 0;
+ }
+ if (j->flags.pstn_present) {
+ if (j->flags.pots_correct) {
+ LED_SetState(0xA, j);
+ } else {
+ LED_SetState(0x6, j);
+ }
+ } else {
+ if (j->flags.pots_correct) {
+ LED_SetState(0x9, j);
+ } else {
+ LED_SetState(0x5, j);
+ }
+ }
+ j->flags.pstncheck = 0; /* Testing */
+ return j->flags.pstn_present;
+}
+
+static int ixj_selfprobe(IXJ *j)
+{
+ unsigned short cmd;
+ int cnt;
+ BYTES bytes;
+
+ init_waitqueue_head(&j->poll_q);
+ init_waitqueue_head(&j->read_q);
+ init_waitqueue_head(&j->write_q);
+
+ while(atomic_read(&j->DSPWrite) > 0)
+ atomic_dec(&j->DSPWrite);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Write IDLE to Software Control Register\n");
+ ixj_WriteDSPCommand(0x0FE0, j); /* Put the DSP in full power mode. */
+
+ if (ixj_WriteDSPCommand(0x0000, j)) /* Write IDLE to Software Control Register */
+ return -1;
+/* The read values of the SSR should be 0x00 for the IDLE command */
+ if (j->ssr.low || j->ssr.high)
+ return -1;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Get Device ID Code\n");
+ if (ixj_WriteDSPCommand(0x3400, j)) /* Get Device ID Code */
+ return -1;
+ j->dsp.low = j->ssr.low;
+ j->dsp.high = j->ssr.high;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Get Device Version Code\n");
+ if (ixj_WriteDSPCommand(0x3800, j)) /* Get Device Version Code */
+ return -1;
+ j->ver.low = j->ssr.low;
+ j->ver.high = j->ssr.high;
+ if (!j->cardtype) {
+ if (j->dsp.low == 0x21) {
+ bytes.high = bytes.low = inb_p(j->XILINXbase + 0x02);
+ outb_p(bytes.low ^ 0xFF, j->XILINXbase + 0x02);
+/* Test for Internet LineJACK or Internet PhoneJACK Lite */
+ bytes.low = inb_p(j->XILINXbase + 0x02);
+ if (bytes.low == bytes.high) /* Register is read only on */
+ /* Internet PhoneJack Lite */
+ {
+ j->cardtype = QTI_PHONEJACK_LITE;
+ if (!request_region(j->XILINXbase, 4, "ixj control")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
+ return -1;
+ }
+ j->pld_slicw.pcib.e1 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase);
+ } else {
+ j->cardtype = QTI_LINEJACK;
+
+ if (!request_region(j->XILINXbase, 8, "ixj control")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
+ return -1;
+ }
+ }
+ } else if (j->dsp.low == 0x22) {
+ j->cardtype = QTI_PHONEJACK_PCI;
+ request_region(j->XILINXbase, 4, "ixj control");
+ j->pld_slicw.pcib.e1 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase);
+ } else
+ j->cardtype = QTI_PHONEJACK;
+ } else {
+ switch (j->cardtype) {
+ case QTI_PHONEJACK:
+ if (!j->dsp.low != 0x20) {
+ j->dsp.high = 0x80;
+ j->dsp.low = 0x20;
+ ixj_WriteDSPCommand(0x3800, j);
+ j->ver.low = j->ssr.low;
+ j->ver.high = j->ssr.high;
+ }
+ break;
+ case QTI_LINEJACK:
+ if (!request_region(j->XILINXbase, 8, "ixj control")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
+ return -1;
+ }
+ break;
+ case QTI_PHONEJACK_LITE:
+ case QTI_PHONEJACK_PCI:
+ if (!request_region(j->XILINXbase, 4, "ixj control")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
+ return -1;
+ }
+ j->pld_slicw.pcib.e1 = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase);
+ break;
+ case QTI_PHONECARD:
+ break;
+ }
+ }
+ if (j->dsp.low == 0x20 || j->cardtype == QTI_PHONEJACK_LITE || j->cardtype == QTI_PHONEJACK_PCI) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Write CODEC config to Software Control Register\n");
+ if (ixj_WriteDSPCommand(0xC462, j)) /* Write CODEC config to Software Control Register */
+ return -1;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Write CODEC timing to Software Control Register\n");
+ if (j->cardtype == QTI_PHONEJACK) {
+ cmd = 0x9FF2;
+ } else {
+ cmd = 0x9FF5;
+ }
+ if (ixj_WriteDSPCommand(cmd, j)) /* Write CODEC timing to Software Control Register */
+ return -1;
+ } else {
+ if (set_base_frame(j, 30) != 30)
+ return -1;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Write CODEC config to Software Control Register\n");
+ if (j->cardtype == QTI_PHONECARD) {
+ if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to Software Control Register */
+ return -1;
+ }
+ if (j->cardtype == QTI_LINEJACK) {
+ if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to Software Control Register */
+ return -1;
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Turn on the PLD Clock at 8Khz\n");
+ j->pld_clock.byte = 0;
+ outb_p(j->pld_clock.byte, j->XILINXbase + 0x04);
+ }
+ }
+
+ if (j->dsp.low == 0x20) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Configure GPIO pins\n");
+ j->gpio.bytes.high = 0x09;
+/* bytes.low = 0xEF; 0xF7 */
+ j->gpio.bits.gpio1 = 1;
+ j->gpio.bits.gpio2 = 1;
+ j->gpio.bits.gpio3 = 0;
+ j->gpio.bits.gpio4 = 1;
+ j->gpio.bits.gpio5 = 1;
+ j->gpio.bits.gpio6 = 1;
+ j->gpio.bits.gpio7 = 1;
+ ixj_WriteDSPCommand(j->gpio.word, j); /* Set GPIO pin directions */
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Enable SLIC\n");
+ j->gpio.bytes.high = 0x0B;
+ j->gpio.bytes.low = 0x00;
+ j->gpio.bits.gpio1 = 0;
+ j->gpio.bits.gpio2 = 1;
+ j->gpio.bits.gpio5 = 0;
+ ixj_WriteDSPCommand(j->gpio.word, j); /* send the ring stop signal */
+ j->port = PORT_POTS;
+ } else {
+ if (j->cardtype == QTI_LINEJACK) {
+ LED_SetState(0x1, j);
+ msleep(100);
+ LED_SetState(0x2, j);
+ msleep(100);
+ LED_SetState(0x4, j);
+ msleep(100);
+ LED_SetState(0x8, j);
+ msleep(100);
+ LED_SetState(0x0, j);
+ daa_get_version(j);
+ if (ixjdebug & 0x0002)
+ printk("Loading DAA Coefficients\n");
+ DAA_Coeff_US(j);
+ if (!ixj_daa_write(j)) {
+ printk("DAA write failed on board %d\n", j->board);
+ return -1;
+ }
+ if(!ixj_daa_cid_reset(j)) {
+ printk("DAA CID reset failed on board %d\n", j->board);
+ return -1;
+ }
+ j->flags.pots_correct = 0;
+ j->flags.pstn_present = 0;
+ ixj_linetest(j);
+ if (j->flags.pots_correct) {
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly1 = 1;
+ j->pld_slicw.bits.spken = 1;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+/* SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j); */
+ j->port = PORT_POTS;
+ }
+ ixj_set_port(j, PORT_PSTN);
+ ixj_set_pots(j, 1);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Enable Mixer\n");
+ ixj_mixer(0x0000, j); /*Master Volume Left unmute 0db */
+ ixj_mixer(0x0100, j); /*Master Volume Right unmute 0db */
+
+ ixj_mixer(0x0203, j); /*Voice Left Volume unmute 6db */
+ ixj_mixer(0x0303, j); /*Voice Right Volume unmute 6db */
+
+ ixj_mixer(0x0480, j); /*FM Left mute */
+ ixj_mixer(0x0580, j); /*FM Right mute */
+
+ ixj_mixer(0x0680, j); /*CD Left mute */
+ ixj_mixer(0x0780, j); /*CD Right mute */
+
+ ixj_mixer(0x0880, j); /*Line Left mute */
+ ixj_mixer(0x0980, j); /*Line Right mute */
+
+ ixj_mixer(0x0A80, j); /*Aux left mute */
+ ixj_mixer(0x0B80, j); /*Aux right mute */
+
+ ixj_mixer(0x0C00, j); /*Mono1 unmute 12db */
+ ixj_mixer(0x0D80, j); /*Mono2 mute */
+
+ ixj_mixer(0x0E80, j); /*Mic mute */
+
+ ixj_mixer(0x0F00, j); /*Mono Out Volume unmute 0db */
+
+ ixj_mixer(0x1000, j); /*Voice Left and Right out only */
+ ixj_mixer(0x110C, j);
+
+
+ ixj_mixer(0x1200, j); /*Mono1 switch on mixer left */
+ ixj_mixer(0x1401, j);
+
+ ixj_mixer(0x1300, j); /*Mono1 switch on mixer right */
+ ixj_mixer(0x1501, j);
+
+ ixj_mixer(0x1700, j); /*Clock select */
+
+ ixj_mixer(0x1800, j); /*ADC input from mixer */
+
+ ixj_mixer(0x1901, j); /*Mic gain 30db */
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Setting Default US Ring Cadence Detection\n");
+ j->cadence_f[4].state = 0;
+ j->cadence_f[4].on1 = 0; /*Cadence Filter 4 is used for PSTN ring cadence */
+ j->cadence_f[4].off1 = 0;
+ j->cadence_f[4].on2 = 0;
+ j->cadence_f[4].off2 = 0;
+ j->cadence_f[4].on3 = 0;
+ j->cadence_f[4].off3 = 0; /* These should represent standard US ring pulse. */
+ j->pstn_last_rmr = jiffies;
+
+ } else {
+ if (j->cardtype == QTI_PHONECARD) {
+ ixj_WriteDSPCommand(0xCF07, j);
+ ixj_WriteDSPCommand(0x00B0, j);
+ ixj_set_port(j, PORT_SPEAKER);
+ } else {
+ ixj_set_port(j, PORT_POTS);
+ SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
+/* SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j); */
+ }
+ }
+ }
+
+ j->intercom = -1;
+ j->framesread = j->frameswritten = 0;
+ j->read_wait = j->write_wait = 0;
+ j->rxreadycheck = j->txreadycheck = 0;
+
+ /* initialise the DTMF prescale to a sensible value */
+ if (j->cardtype == QTI_LINEJACK) {
+ set_dtmf_prescale(j, 0x10);
+ } else {
+ set_dtmf_prescale(j, 0x40);
+ }
+ set_play_volume(j, 0x100);
+ set_rec_volume(j, 0x100);
+
+ if (ixj_WriteDSPCommand(0x0000, j)) /* Write IDLE to Software Control Register */
+ return -1;
+/* The read values of the SSR should be 0x00 for the IDLE command */
+ if (j->ssr.low || j->ssr.high)
+ return -1;
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Enable Line Monitor\n");
+
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "Set Line Monitor to Asyncronous Mode\n");
+
+ if (ixj_WriteDSPCommand(0x7E01, j)) /* Asynchronous Line Monitor */
+ return -1;
+
+ if (ixjdebug & 0x002)
+ printk(KERN_INFO "Enable DTMF Detectors\n");
+
+ if (ixj_WriteDSPCommand(0x5151, j)) /* Enable DTMF detection */
+ return -1;
+
+ if (ixj_WriteDSPCommand(0x6E01, j)) /* Set Asyncronous Tone Generation */
+ return -1;
+
+ set_rec_depth(j, 2); /* Set Record Channel Limit to 2 frames */
+
+ set_play_depth(j, 2); /* Set Playback Channel Limit to 2 frames */
+
+ j->ex.bits.dtmf_ready = 0;
+ j->dtmf_state = 0;
+ j->dtmf_wp = j->dtmf_rp = 0;
+ j->rec_mode = j->play_mode = -1;
+ j->flags.ringing = 0;
+ j->maxrings = MAXRINGS;
+ j->ring_cadence = USA_RING_CADENCE;
+ j->drybuffer = 0;
+ j->winktime = 320;
+ j->flags.dtmf_oob = 0;
+ for (cnt = 0; cnt < 4; cnt++)
+ j->cadence_f[cnt].enable = 0;
+ /* must be a device on the specified address */
+ ixj_WriteDSPCommand(0x0FE3, j); /* Put the DSP in 1/5 power mode. */
+
+ /* Set up the default signals for events */
+ for (cnt = 0; cnt < 35; cnt++)
+ j->ixj_signals[cnt] = SIGIO;
+
+ /* Set the excetion signal enable flags */
+ j->ex_sig.bits.dtmf_ready = j->ex_sig.bits.hookstate = j->ex_sig.bits.flash = j->ex_sig.bits.pstn_ring =
+ j->ex_sig.bits.caller_id = j->ex_sig.bits.pstn_wink = j->ex_sig.bits.f0 = j->ex_sig.bits.f1 = j->ex_sig.bits.f2 =
+ j->ex_sig.bits.f3 = j->ex_sig.bits.fc0 = j->ex_sig.bits.fc1 = j->ex_sig.bits.fc2 = j->ex_sig.bits.fc3 = 1;
+#ifdef IXJ_DYN_ALLOC
+ j->fskdata = NULL;
+#endif
+ j->fskdcnt = 0;
+ j->cidcw_wait = 0;
+
+ /* Register with the Telephony for Linux subsystem */
+ j->p.f_op = &ixj_fops;
+ j->p.open = ixj_open;
+ j->p.board = j->board;
+ phone_register_device(&j->p, PHONE_UNIT_ANY);
+
+ ixj_init_timer(j);
+ ixj_add_timer(j);
+ return 0;
+}
+
+/*
+ * Exported service for pcmcia card handling
+ */
+
+IXJ *ixj_pcmcia_probe(unsigned long dsp, unsigned long xilinx)
+{
+ IXJ *j = ixj_alloc();
+
+ j->board = 0;
+
+ j->DSPbase = dsp;
+ j->XILINXbase = xilinx;
+ j->cardtype = QTI_PHONECARD;
+ ixj_selfprobe(j);
+ return j;
+}
+
+EXPORT_SYMBOL(ixj_pcmcia_probe); /* Fpr PCMCIA */
+
+static int ixj_get_status_proc(char *buf)
+{
+ int len;
+ int cnt;
+ IXJ *j;
+ len = 0;
+ len += sprintf(buf + len, "\nDriver version %i.%i.%i", IXJ_VER_MAJOR, IXJ_VER_MINOR, IXJ_BLD_VER);
+ len += sprintf(buf + len, "\nsizeof IXJ struct %Zd bytes", sizeof(IXJ));
+ len += sprintf(buf + len, "\nsizeof DAA struct %Zd bytes", sizeof(DAA_REGS));
+ len += sprintf(buf + len, "\nUsing old telephony API");
+ len += sprintf(buf + len, "\nDebug Level %d\n", ixjdebug);
+
+ for (cnt = 0; cnt < IXJMAX; cnt++) {
+ j = get_ixj(cnt);
+ if(j==NULL)
+ continue;
+ if (j->DSPbase) {
+ len += sprintf(buf + len, "\nCard Num %d", cnt);
+ len += sprintf(buf + len, "\nDSP Base Address 0x%4.4x", j->DSPbase);
+ if (j->cardtype != QTI_PHONEJACK)
+ len += sprintf(buf + len, "\nXILINX Base Address 0x%4.4x", j->XILINXbase);
+ len += sprintf(buf + len, "\nDSP Type %2.2x%2.2x", j->dsp.high, j->dsp.low);
+ len += sprintf(buf + len, "\nDSP Version %2.2x.%2.2x", j->ver.high, j->ver.low);
+ len += sprintf(buf + len, "\nSerial Number %8.8x", j->serial);
+ switch (j->cardtype) {
+ case (QTI_PHONEJACK):
+ len += sprintf(buf + len, "\nCard Type = Internet PhoneJACK");
+ break;
+ case (QTI_LINEJACK):
+ len += sprintf(buf + len, "\nCard Type = Internet LineJACK");
+ if (j->flags.g729_loaded)
+ len += sprintf(buf + len, " w/G.729 A/B");
+ len += sprintf(buf + len, " Country = %d", j->daa_country);
+ break;
+ case (QTI_PHONEJACK_LITE):
+ len += sprintf(buf + len, "\nCard Type = Internet PhoneJACK Lite");
+ if (j->flags.g729_loaded)
+ len += sprintf(buf + len, " w/G.729 A/B");
+ break;
+ case (QTI_PHONEJACK_PCI):
+ len += sprintf(buf + len, "\nCard Type = Internet PhoneJACK PCI");
+ if (j->flags.g729_loaded)
+ len += sprintf(buf + len, " w/G.729 A/B");
+ break;
+ case (QTI_PHONECARD):
+ len += sprintf(buf + len, "\nCard Type = Internet PhoneCARD");
+ if (j->flags.g729_loaded)
+ len += sprintf(buf + len, " w/G.729 A/B");
+ len += sprintf(buf + len, "\nSmart Cable %spresent", j->pccr1.bits.drf ? "not " : "");
+ if (!j->pccr1.bits.drf)
+ len += sprintf(buf + len, "\nSmart Cable type %d", j->flags.pcmciasct);
+ len += sprintf(buf + len, "\nSmart Cable state %d", j->flags.pcmciastate);
+ break;
+ default:
+ len += sprintf(buf + len, "\nCard Type = %d", j->cardtype);
+ break;
+ }
+ len += sprintf(buf + len, "\nReaders %d", j->readers);
+ len += sprintf(buf + len, "\nWriters %d", j->writers);
+ add_caps(j);
+ len += sprintf(buf + len, "\nCapabilities %d", j->caps);
+ if (j->dsp.low != 0x20)
+ len += sprintf(buf + len, "\nDSP Processor load %d", j->proc_load);
+ if (j->flags.cidsent)
+ len += sprintf(buf + len, "\nCaller ID data sent");
+ else
+ len += sprintf(buf + len, "\nCaller ID data not sent");
+
+ len += sprintf(buf + len, "\nPlay CODEC ");
+ switch (j->play_codec) {
+ case G723_63:
+ len += sprintf(buf + len, "G.723.1 6.3");
+ break;
+ case G723_53:
+ len += sprintf(buf + len, "G.723.1 5.3");
+ break;
+ case TS85:
+ len += sprintf(buf + len, "TrueSpeech 8.5");
+ break;
+ case TS48:
+ len += sprintf(buf + len, "TrueSpeech 4.8");
+ break;
+ case TS41:
+ len += sprintf(buf + len, "TrueSpeech 4.1");
+ break;
+ case G728:
+ len += sprintf(buf + len, "G.728");
+ break;
+ case G729:
+ len += sprintf(buf + len, "G.729");
+ break;
+ case G729B:
+ len += sprintf(buf + len, "G.729B");
+ break;
+ case ULAW:
+ len += sprintf(buf + len, "uLaw");
+ break;
+ case ALAW:
+ len += sprintf(buf + len, "aLaw");
+ break;
+ case LINEAR16:
+ len += sprintf(buf + len, "16 bit Linear");
+ break;
+ case LINEAR8:
+ len += sprintf(buf + len, "8 bit Linear");
+ break;
+ case WSS:
+ len += sprintf(buf + len, "Windows Sound System");
+ break;
+ default:
+ len += sprintf(buf + len, "NO CODEC CHOSEN");
+ break;
+ }
+ len += sprintf(buf + len, "\nRecord CODEC ");
+ switch (j->rec_codec) {
+ case G723_63:
+ len += sprintf(buf + len, "G.723.1 6.3");
+ break;
+ case G723_53:
+ len += sprintf(buf + len, "G.723.1 5.3");
+ break;
+ case TS85:
+ len += sprintf(buf + len, "TrueSpeech 8.5");
+ break;
+ case TS48:
+ len += sprintf(buf + len, "TrueSpeech 4.8");
+ break;
+ case TS41:
+ len += sprintf(buf + len, "TrueSpeech 4.1");
+ break;
+ case G728:
+ len += sprintf(buf + len, "G.728");
+ break;
+ case G729:
+ len += sprintf(buf + len, "G.729");
+ break;
+ case G729B:
+ len += sprintf(buf + len, "G.729B");
+ break;
+ case ULAW:
+ len += sprintf(buf + len, "uLaw");
+ break;
+ case ALAW:
+ len += sprintf(buf + len, "aLaw");
+ break;
+ case LINEAR16:
+ len += sprintf(buf + len, "16 bit Linear");
+ break;
+ case LINEAR8:
+ len += sprintf(buf + len, "8 bit Linear");
+ break;
+ case WSS:
+ len += sprintf(buf + len, "Windows Sound System");
+ break;
+ default:
+ len += sprintf(buf + len, "NO CODEC CHOSEN");
+ break;
+ }
+ len += sprintf(buf + len, "\nAEC ");
+ switch (j->aec_level) {
+ case AEC_OFF:
+ len += sprintf(buf + len, "Off");
+ break;
+ case AEC_LOW:
+ len += sprintf(buf + len, "Low");
+ break;
+ case AEC_MED:
+ len += sprintf(buf + len, "Med");
+ break;
+ case AEC_HIGH:
+ len += sprintf(buf + len, "High");
+ break;
+ case AEC_AUTO:
+ len += sprintf(buf + len, "Auto");
+ break;
+ case AEC_AGC:
+ len += sprintf(buf + len, "AEC/AGC");
+ break;
+ default:
+ len += sprintf(buf + len, "unknown(%i)", j->aec_level);
+ break;
+ }
+
+ len += sprintf(buf + len, "\nRec volume 0x%x", get_rec_volume(j));
+ len += sprintf(buf + len, "\nPlay volume 0x%x", get_play_volume(j));
+ len += sprintf(buf + len, "\nDTMF prescale 0x%x", get_dtmf_prescale(j));
+
+ len += sprintf(buf + len, "\nHook state %d", j->hookstate); /* j->r_hook); */
+
+ if (j->cardtype == QTI_LINEJACK) {
+ len += sprintf(buf + len, "\nPOTS Correct %d", j->flags.pots_correct);
+ len += sprintf(buf + len, "\nPSTN Present %d", j->flags.pstn_present);
+ len += sprintf(buf + len, "\nPSTN Check %d", j->flags.pstncheck);
+ len += sprintf(buf + len, "\nPOTS to PSTN %d", j->flags.pots_pstn);
+ switch (j->daa_mode) {
+ case SOP_PU_SLEEP:
+ len += sprintf(buf + len, "\nDAA PSTN On Hook");
+ break;
+ case SOP_PU_RINGING:
+ len += sprintf(buf + len, "\nDAA PSTN Ringing");
+ len += sprintf(buf + len, "\nRinging state = %d", j->cadence_f[4].state);
+ break;
+ case SOP_PU_CONVERSATION:
+ len += sprintf(buf + len, "\nDAA PSTN Off Hook");
+ break;
+ case SOP_PU_PULSEDIALING:
+ len += sprintf(buf + len, "\nDAA PSTN Pulse Dialing");
+ break;
+ }
+ len += sprintf(buf + len, "\nDAA RMR = %d", j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR);
+ len += sprintf(buf + len, "\nDAA VDD OK = %d", j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK);
+ len += sprintf(buf + len, "\nDAA CR0 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg);
+ len += sprintf(buf + len, "\nDAA CR1 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg);
+ len += sprintf(buf + len, "\nDAA CR2 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg);
+ len += sprintf(buf + len, "\nDAA CR3 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg);
+ len += sprintf(buf + len, "\nDAA CR4 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg);
+ len += sprintf(buf + len, "\nDAA CR5 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr5.reg);
+ len += sprintf(buf + len, "\nDAA XR0 = 0x%02x", j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.reg);
+ len += sprintf(buf + len, "\nDAA ringstop %ld - jiffies %ld", j->pstn_ring_stop, jiffies);
+ }
+ switch (j->port) {
+ case PORT_POTS:
+ len += sprintf(buf + len, "\nPort POTS");
+ break;
+ case PORT_PSTN:
+ len += sprintf(buf + len, "\nPort PSTN");
+ break;
+ case PORT_SPEAKER:
+ len += sprintf(buf + len, "\nPort SPEAKER/MIC");
+ break;
+ case PORT_HANDSET:
+ len += sprintf(buf + len, "\nPort HANDSET");
+ break;
+ }
+ if (j->dsp.low == 0x21 || j->dsp.low == 0x22) {
+ len += sprintf(buf + len, "\nSLIC state ");
+ switch (SLIC_GetState(j)) {
+ case PLD_SLIC_STATE_OC:
+ len += sprintf(buf + len, "OC");
+ break;
+ case PLD_SLIC_STATE_RINGING:
+ len += sprintf(buf + len, "RINGING");
+ break;
+ case PLD_SLIC_STATE_ACTIVE:
+ len += sprintf(buf + len, "ACTIVE");
+ break;
+ case PLD_SLIC_STATE_OHT: /* On-hook transmit */
+ len += sprintf(buf + len, "OHT");
+ break;
+ case PLD_SLIC_STATE_TIPOPEN:
+ len += sprintf(buf + len, "TIPOPEN");
+ break;
+ case PLD_SLIC_STATE_STANDBY:
+ len += sprintf(buf + len, "STANDBY");
+ break;
+ case PLD_SLIC_STATE_APR: /* Active polarity reversal */
+ len += sprintf(buf + len, "APR");
+ break;
+ case PLD_SLIC_STATE_OHTPR: /* OHT polarity reversal */
+ len += sprintf(buf + len, "OHTPR");
+ break;
+ default:
+ len += sprintf(buf + len, "%d", SLIC_GetState(j));
+ break;
+ }
+ }
+ len += sprintf(buf + len, "\nBase Frame %2.2x.%2.2x", j->baseframe.high, j->baseframe.low);
+ len += sprintf(buf + len, "\nCID Base Frame %2d", j->cid_base_frame_size);
+#ifdef PERFMON_STATS
+ len += sprintf(buf + len, "\nTimer Checks %ld", j->timerchecks);
+ len += sprintf(buf + len, "\nRX Ready Checks %ld", j->rxreadycheck);
+ len += sprintf(buf + len, "\nTX Ready Checks %ld", j->txreadycheck);
+ len += sprintf(buf + len, "\nFrames Read %ld", j->framesread);
+ len += sprintf(buf + len, "\nFrames Written %ld", j->frameswritten);
+ len += sprintf(buf + len, "\nDry Buffer %ld", j->drybuffer);
+ len += sprintf(buf + len, "\nRead Waits %ld", j->read_wait);
+ len += sprintf(buf + len, "\nWrite Waits %ld", j->write_wait);
+ len += sprintf(buf + len, "\nStatus Waits %ld", j->statuswait);
+ len += sprintf(buf + len, "\nStatus Wait Fails %ld", j->statuswaitfail);
+ len += sprintf(buf + len, "\nPControl Waits %ld", j->pcontrolwait);
+ len += sprintf(buf + len, "\nPControl Wait Fails %ld", j->pcontrolwaitfail);
+ len += sprintf(buf + len, "\nIs Control Ready Checks %ld", j->iscontrolready);
+ len += sprintf(buf + len, "\nIs Control Ready Check failures %ld", j->iscontrolreadyfail);
+
+#endif
+ len += sprintf(buf + len, "\n");
+ }
+ }
+ return len;
+}
+
+static int ixj_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = ixj_get_status_proc(page);
+ if (len <= off+count) *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len>count) len = count;
+ if (len<0) len = 0;
+ return len;
+}
+
+
+static void cleanup(void)
+{
+ int cnt;
+ IXJ *j;
+
+ for (cnt = 0; cnt < IXJMAX; cnt++) {
+ j = get_ixj(cnt);
+ if(j != NULL && j->DSPbase) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Deleting timer for /dev/phone%d\n", cnt);
+ del_timer(&j->timer);
+ if (j->cardtype == QTI_LINEJACK) {
+ j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
+
+ outb_p(j->pld_scrw.byte, j->XILINXbase);
+ j->pld_slicw.bits.rly1 = 0;
+ j->pld_slicw.bits.rly2 = 0;
+ j->pld_slicw.bits.rly3 = 0;
+ outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
+ LED_SetState(0x0, j);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Releasing XILINX address for /dev/phone%d\n", cnt);
+ release_region(j->XILINXbase, 8);
+ } else if (j->cardtype == QTI_PHONEJACK_LITE || j->cardtype == QTI_PHONEJACK_PCI) {
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Releasing XILINX address for /dev/phone%d\n", cnt);
+ release_region(j->XILINXbase, 4);
+ }
+ kfree(j->read_buffer);
+ kfree(j->write_buffer);
+ if (j->dev)
+ pnp_device_detach(j->dev);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Unregistering /dev/phone%d from LTAPI\n", cnt);
+ phone_unregister_device(&j->p);
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Releasing DSP address for /dev/phone%d\n", cnt);
+ release_region(j->DSPbase, 16);
+#ifdef IXJ_DYN_ALLOC
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Freeing memory for /dev/phone%d\n", cnt);
+ kfree(j);
+ ixj[cnt] = NULL;
+#endif
+ }
+ }
+ if (ixjdebug & 0x0002)
+ printk(KERN_INFO "IXJ: Removing /proc/ixj\n");
+ remove_proc_entry ("ixj", NULL);
+}
+
+/* Typedefs */
+typedef struct {
+ BYTE length;
+ DWORD bits;
+} DATABLOCK;
+
+static void PCIEE_WriteBit(WORD wEEPROMAddress, BYTE lastLCC, BYTE byData)
+{
+ lastLCC = lastLCC & 0xfb;
+ lastLCC = lastLCC | (byData ? 4 : 0);
+ outb(lastLCC, wEEPROMAddress); /*set data out bit as appropriate */
+
+ mdelay(1);
+ lastLCC = lastLCC | 0x01;
+ outb(lastLCC, wEEPROMAddress); /*SK rising edge */
+
+ byData = byData << 1;
+ lastLCC = lastLCC & 0xfe;
+ mdelay(1);
+ outb(lastLCC, wEEPROMAddress); /*after delay, SK falling edge */
+
+}
+
+static BYTE PCIEE_ReadBit(WORD wEEPROMAddress, BYTE lastLCC)
+{
+ mdelay(1);
+ lastLCC = lastLCC | 0x01;
+ outb(lastLCC, wEEPROMAddress); /*SK rising edge */
+
+ lastLCC = lastLCC & 0xfe;
+ mdelay(1);
+ outb(lastLCC, wEEPROMAddress); /*after delay, SK falling edge */
+
+ return ((inb(wEEPROMAddress) >> 3) & 1);
+}
+
+static bool PCIEE_ReadWord(WORD wAddress, WORD wLoc, WORD * pwResult)
+{
+ BYTE lastLCC;
+ WORD wEEPROMAddress = wAddress + 3;
+ DWORD i;
+ BYTE byResult;
+ *pwResult = 0;
+ lastLCC = inb(wEEPROMAddress);
+ lastLCC = lastLCC | 0x02;
+ lastLCC = lastLCC & 0xfe;
+ outb(lastLCC, wEEPROMAddress); /* CS hi, SK lo */
+
+ mdelay(1); /* delay */
+
+ PCIEE_WriteBit(wEEPROMAddress, lastLCC, 1);
+ PCIEE_WriteBit(wEEPROMAddress, lastLCC, 1);
+ PCIEE_WriteBit(wEEPROMAddress, lastLCC, 0);
+ for (i = 0; i < 8; i++) {
+ PCIEE_WriteBit(wEEPROMAddress, lastLCC, wLoc & 0x80 ? 1 : 0);
+ wLoc <<= 1;
+ }
+
+ for (i = 0; i < 16; i++) {
+ byResult = PCIEE_ReadBit(wEEPROMAddress, lastLCC);
+ *pwResult = (*pwResult << 1) | byResult;
+ }
+
+ mdelay(1); /* another delay */
+
+ lastLCC = lastLCC & 0xfd;
+ outb(lastLCC, wEEPROMAddress); /* negate CS */
+
+ return 0;
+}
+
+static DWORD PCIEE_GetSerialNumber(WORD wAddress)
+{
+ WORD wLo, wHi;
+ if (PCIEE_ReadWord(wAddress, 62, &wLo))
+ return 0;
+ if (PCIEE_ReadWord(wAddress, 63, &wHi))
+ return 0;
+ return (((DWORD) wHi << 16) | wLo);
+}
+
+static int dspio[IXJMAX + 1] =
+{
+ 0,
+};
+static int xio[IXJMAX + 1] =
+{
+ 0,
+};
+
+module_param_array(dspio, int, NULL, 0);
+module_param_array(xio, int, NULL, 0);
+MODULE_DESCRIPTION("Quicknet VoIP Telephony card module - www.quicknet.net");
+MODULE_AUTHOR("Ed Okerson <eokerson@quicknet.net>");
+MODULE_LICENSE("GPL");
+
+static void __exit ixj_exit(void)
+{
+ cleanup();
+}
+
+static IXJ *new_ixj(unsigned long port)
+{
+ IXJ *res;
+ if (!request_region(port, 16, "ixj DSP")) {
+ printk(KERN_INFO "ixj: can't get I/O address 0x%lx\n", port);
+ return NULL;
+ }
+ res = ixj_alloc();
+ if (!res) {
+ release_region(port, 16);
+ printk(KERN_INFO "ixj: out of memory\n");
+ return NULL;
+ }
+ res->DSPbase = port;
+ return res;
+}
+
+static int __init ixj_probe_isapnp(int *cnt)
+{
+ int probe = 0;
+ int func = 0x110;
+ struct pnp_dev *dev = NULL, *old_dev = NULL;
+
+ while (1) {
+ do {
+ IXJ *j;
+ int result;
+
+ old_dev = dev;
+ dev = pnp_find_dev(NULL, ISAPNP_VENDOR('Q', 'T', 'I'),
+ ISAPNP_FUNCTION(func), old_dev);
+ if (!dev || !dev->card)
+ break;
+ result = pnp_device_attach(dev);
+ if (result < 0) {
+ printk("pnp attach failed %d \n", result);
+ break;
+ }
+ if (pnp_activate_dev(dev) < 0) {
+ printk("pnp activate failed (out of resources?)\n");
+ pnp_device_detach(dev);
+ return -ENOMEM;
+ }
+
+ if (!pnp_port_valid(dev, 0)) {
+ pnp_device_detach(dev);
+ return -ENODEV;
+ }
+
+ j = new_ixj(pnp_port_start(dev, 0));
+ if (!j)
+ break;
+
+ if (func != 0x110)
+ j->XILINXbase = pnp_port_start(dev, 1); /* get real port */
+
+ switch (func) {
+ case (0x110):
+ j->cardtype = QTI_PHONEJACK;
+ break;
+ case (0x310):
+ j->cardtype = QTI_LINEJACK;
+ break;
+ case (0x410):
+ j->cardtype = QTI_PHONEJACK_LITE;
+ break;
+ }
+ j->board = *cnt;
+ probe = ixj_selfprobe(j);
+ if(!probe) {
+ j->serial = dev->card->serial;
+ j->dev = dev;
+ switch (func) {
+ case 0x110:
+ printk(KERN_INFO "ixj: found Internet PhoneJACK at 0x%x\n", j->DSPbase);
+ break;
+ case 0x310:
+ printk(KERN_INFO "ixj: found Internet LineJACK at 0x%x\n", j->DSPbase);
+ break;
+ case 0x410:
+ printk(KERN_INFO "ixj: found Internet PhoneJACK Lite at 0x%x\n", j->DSPbase);
+ break;
+ }
+ }
+ ++*cnt;
+ } while (dev);
+ if (func == 0x410)
+ break;
+ if (func == 0x310)
+ func = 0x410;
+ if (func == 0x110)
+ func = 0x310;
+ dev = NULL;
+ }
+ return probe;
+}
+
+static int __init ixj_probe_isa(int *cnt)
+{
+ int i, probe;
+
+ /* Use passed parameters for older kernels without PnP */
+ for (i = 0; i < IXJMAX; i++) {
+ if (dspio[i]) {
+ IXJ *j = new_ixj(dspio[i]);
+
+ if (!j)
+ break;
+
+ j->XILINXbase = xio[i];
+ j->cardtype = 0;
+
+ j->board = *cnt;
+ probe = ixj_selfprobe(j);
+ j->dev = NULL;
+ ++*cnt;
+ }
+ }
+ return 0;
+}
+
+static int __init ixj_probe_pci(int *cnt)
+{
+ struct pci_dev *pci = NULL;
+ int i, probe = 0;
+ IXJ *j = NULL;
+
+ for (i = 0; i < IXJMAX - *cnt; i++) {
+ pci = pci_get_device(PCI_VENDOR_ID_QUICKNET,
+ PCI_DEVICE_ID_QUICKNET_XJ, pci);
+ if (!pci)
+ break;
+
+ if (pci_enable_device(pci))
+ break;
+ j = new_ixj(pci_resource_start(pci, 0));
+ if (!j)
+ break;
+
+ j->serial = (PCIEE_GetSerialNumber)pci_resource_start(pci, 2);
+ j->XILINXbase = j->DSPbase + 0x10;
+ j->cardtype = QTI_PHONEJACK_PCI;
+ j->board = *cnt;
+ probe = ixj_selfprobe(j);
+ if (!probe)
+ printk(KERN_INFO "ixj: found Internet PhoneJACK PCI at 0x%x\n", j->DSPbase);
+ ++*cnt;
+ }
+ pci_dev_put(pci);
+ return probe;
+}
+
+static int __init ixj_init(void)
+{
+ int cnt = 0;
+ int probe = 0;
+
+ cnt = 0;
+
+ /* These might be no-ops, see above. */
+ if ((probe = ixj_probe_isapnp(&cnt)) < 0) {
+ return probe;
+ }
+ if ((probe = ixj_probe_isa(&cnt)) < 0) {
+ return probe;
+ }
+ if ((probe = ixj_probe_pci(&cnt)) < 0) {
+ return probe;
+ }
+ printk(KERN_INFO "ixj driver initialized.\n");
+ create_proc_read_entry ("ixj", 0, NULL, ixj_read_proc, NULL);
+ return probe;
+}
+
+module_init(ixj_init);
+module_exit(ixj_exit);
+
+static void DAA_Coeff_US(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_US;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 0E,32,E2,2F,C2,5A,C0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x03;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0x4B;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0x5D;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0xCD;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0xC5;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 72,85,00,0E,2B,3A,D0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x71;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x1A;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 03,8F,48,F2,8F,48,70,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x05;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0x72;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x3F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x3B;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0x30;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 04,8F,38,7F,9B,EA,B0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x05;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x87;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xF9;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x3E;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 16,55,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x41;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 52,D3,11,42 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x25;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0xC7;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 00,42,48,81,B3,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xA5;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,F2,33,A0,68,AB,8A,AD */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x2B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0xE8;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0xAB;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0xCC;
+/* Bytes for TH-filter part 3 (02): 00,88,DA,54,A4,BA,2D,BB */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0xD2;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0xA9;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x3B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0xA6;
+/* ; (10K, 0.68uF) */
+ /* */
+ /* Bytes for Ringing part 1 (03):1B,3B,9B,BA,D4,1C,B3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+ /* Bytes for Ringing part 2 (06):13,42,A6,BA,D4,73,CA,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+
+ /* Levelmetering Ringing (0D):B2,45,0F,8E */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x35;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
+
+ /* Bytes for Ringing part 1 (03):1B,3B,9B,BA,D4,1C,B3,23 */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1C; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0xB3; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0xAB; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0xAB; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x54; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x2D; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0x62; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x2D; */
+ /* Bytes for Ringing part 2 (06):13,42,A6,BA,D4,73,CA,D5 */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x2D; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0x62; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBB; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x2A; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7D; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD4; */
+/* */
+ /* Levelmetering Ringing (0D):B2,45,0F,8E */
+/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x05; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F; */
+/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E; */
+
+ /* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* */
+ /* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FE ; CLK gen. by crystal */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):03 ; SEL Bit==0, HP-disabled */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):3C Cadence, RING, Caller ID, VDD_OK */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x3C;
+/* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):32 ; B-Filter Off == 1 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x3B; /*0x32; */
+ /* Ext. Reg. 4 (Cadence) (xr4):00 */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):40 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
+ /* */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+static void DAA_Coeff_UK(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_UK;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 00,C2,BB,A8,CB,81,A0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xC2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xBB;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0xA8;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xCB;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 40,00,00,0A,A4,33,E0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x40;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0xA4;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 07,9B,ED,24,B2,A2,A0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x9B;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0xED;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0xB2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 0F,92,F2,B2,87,D2,30,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x92;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xF2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0xB2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x87;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xD2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0x30;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 1B,A5,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xA5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): E2,27,10,D6 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0x27;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 80,2D,38,8B,D0,00,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x2D;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x38;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x8B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xD0;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,5A,53,F0,0B,5F,84,D4 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x53;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0xF0;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x0B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x5F;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x84;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0xD4;
+/* Bytes for TH-filter part 3 (02): 00,88,6A,A4,8F,52,F5,32 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0x6A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0xA4;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0xF5;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0x32;
+/* ; idle */
+ /* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):AA,35,0F,8E ; 25Hz 30V less possible? */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x35;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):36 ; */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x36;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):46 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x46; /* 0x46 ??? Should it be 0x00? */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+
+static void DAA_Coeff_France(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_FRANCE;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 02,A2,43,2C,22,AF,A0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0x43;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0x2C;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0xAF;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 67,CE,00,0C,22,33,E0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x67;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0xCE;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x2C;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 07,9A,28,F6,23,4A,B0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x9A;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0x28;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0xF6;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x23;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x4A;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 03,8F,F9,2F,9E,FA,20,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x03;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xF9;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x2F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x9E;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xFA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 16,B5,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x16;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 52,C7,10,D6 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0xC7;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 00,42,48,81,A6,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,AC,2A,30,78,AC,8A,2C */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xAC;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x2A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0x30;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x78;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0xAC;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x8A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0x2C;
+/* Bytes for TH-filter part 3 (02): 00,88,DA,A5,22,BA,2C,45 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0xA5;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x2C;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0x45;
+/* ; idle */
+ /* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):32,45,B5,84 ; 50Hz 20V */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x84;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):36 ; */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x36;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):46 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x46; /* 0x46 ??? Should it be 0x00? */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+
+static void DAA_Coeff_Germany(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_GERMANY;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 00,CE,BB,B8,D2,81,B0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xCE;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xBB;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0xB8;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xD2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 45,8F,00,0C,D2,3A,D0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0C;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0xD2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xD0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 07,AA,E2,34,24,89,20,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x89;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 02,87,FA,37,9A,CA,B0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x87;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xFA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x37;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x9A;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 72,D5,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x72;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xD5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 72,42,13,4B */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x72;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x13;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0x4B;
+/* Bytes for TH-filter part 1 (00): 80,52,48,81,AD,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xAD;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,42,5A,20,E8,1A,81,27 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0xE8;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x1A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0x27;
+/* Bytes for TH-filter part 3 (02): 00,88,63,26,BD,4B,A3,C2 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0x63;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x26;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0xBD;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0x4B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0xC2;
+/* ; (10K, 0.68uF) */
+ /* Bytes for Ringing part 1 (03):1B,3B,9B,BA,D4,1C,B3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x9B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0xD4;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x1C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):13,42,A6,BA,D4,73,CA,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x13;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0xD4;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x73;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):B2,45,0F,8E */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xB2;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF ; all Filters enabled, CLK from ext. source */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 ; Manual Ring, Ring metering enabled */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 ; Analog Gain 0dB, FSC internal */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; SEL Bit==0, HP-enabled */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C ; Ring, CID, VDDOK Interrupts enabled */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):32 ; B-Filter Off==1, U0=3.5V, R=200Ohm */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x32;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):40 ; VDD=4.25 V */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+
+static void DAA_Coeff_Australia(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_AUSTRALIA;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 00,A3,AA,28,B3,82,D0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0x28;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0x82;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xD0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 70,96,00,09,32,6B,C0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x70;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x96;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x6B;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xC0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 07,96,E2,34,32,9B,30,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x96;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x9B;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0x30;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 0F,9A,E9,2F,22,CC,A0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x9A;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xE9;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x2F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xCC;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): CB,45,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0xCB;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 1B,67,10,D6 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0x67;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 80,52,48,81,AF,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xAF;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,DB,52,B0,38,01,82,AC */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xDB;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0xB0;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x38;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x01;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x82;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0xAC;
+/* Bytes for TH-filter part 3 (02): 00,88,4A,3E,2C,3B,24,46 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0x4A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x3E;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x2C;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0x3B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x24;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0x46;
+/* ; idle */
+ /* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):32,45,B5,84 ; 50Hz 20V */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x45;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x84;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):2B ; */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x2B;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):40 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
+
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+
+ /* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+static void DAA_Coeff_Japan(IXJ *j)
+{
+ int i;
+
+ j->daa_country = DAA_JAPAN;
+ /*----------------------------------------------- */
+ /* CAO */
+ for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
+ j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
+ }
+
+/* Bytes for IM-filter part 1 (04): 06,BD,E2,2D,BA,F9,A0,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x06;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xBD;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xE2;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0x2D;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0xF9;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
+/* Bytes for IM-filter part 2 (05): 6F,F7,00,0E,34,33,E0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x6F;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0xF7;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
+ j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
+/* Bytes for FRX-filter (08): 02,8F,68,77,9C,58,F0,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0x68;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x77;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x9C;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x58;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0xF0;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
+/* Bytes for FRR-filter (07): 03,8F,38,73,87,EA,20,08 */
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x03;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x8F;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0x38;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x73;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x87;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xEA;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
+/* Bytes for AX-filter (0A): 51,C5,DD,CA */
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x51;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xC5;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
+ j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
+/* Bytes for AR-filter (09): 25,A7,10,D6 */
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x25;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0xA7;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
+ j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
+/* Bytes for TH-filter part 1 (00): 00,42,48,81,AE,80,00,98 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x42;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xAE;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
+/* Bytes for TH-filter part 2 (01): 02,AB,2A,20,99,5B,89,28 */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xAB;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x2A;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0x20;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x5B;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x89;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0x28;
+/* Bytes for TH-filter part 3 (02): 00,88,DA,25,34,C5,4C,BA */
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x25;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x34;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0xC5;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x4C;
+ j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0xBA;
+/* ; idle */
+ /* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
+/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
+ j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
+/* Levelmetering Ringing (0D):AA,35,0F,8E ; 25Hz 30V ????????? */
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x35;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
+ j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
+/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
+/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
+ j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
+/* ;CR Registers */
+ /* Config. Reg. 0 (filters) (cr0):FF */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
+/* Config. Reg. 1 (dialing) (cr1):05 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
+/* Config. Reg. 2 (caller ID) (cr2):04 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
+/* Config. Reg. 3 (testloops) (cr3):00 ; */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
+/* Config. Reg. 4 (analog gain) (cr4):02 */
+ j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
+ /* Config. Reg. 5 (Version) (cr5):02 */
+ /* Config. Reg. 6 (Reserved) (cr6):00 */
+ /* Config. Reg. 7 (Reserved) (cr7):00 */
+ /* ;xr Registers */
+ /* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
+
+ j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
+ /* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
+ /* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
+
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
+/* Ext. Reg. 3 (DC Char) (xr3):22 ; */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x22;
+/* Ext. Reg. 4 (Cadence) (xr4):00 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
+/* Ext. Reg. 5 (Ring timer) (xr5):22 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
+/* Ext. Reg. 6 (Power State) (xr6):00 */
+ j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
+/* Ext. Reg. 7 (Vdd) (xr7):40 */
+ j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
+ /* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
+ /* 12,33,5A,C3 ; 770 Hz */
+ /* 13,3C,5B,32 ; 852 Hz */
+ /* 1D,1B,5C,CC ; 941 Hz */
+
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
+/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
+ /* EC,1D,52,22 ; 1336 Hz */
+ /* AA,AC,51,D2 ; 1477 Hz */
+ /* 9B,3B,51,25 ; 1633 Hz */
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
+ j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
+}
+
+static s16 tone_table[][19] =
+{
+ { /* f20_50[] 11 */
+ 32538, /* A1 = 1.985962 */
+ -32325, /* A2 = -0.986511 */
+ -343, /* B2 = -0.010493 */
+ 0, /* B1 = 0 */
+ 343, /* B0 = 0.010493 */
+ 32619, /* A1 = 1.990906 */
+ -32520, /* A2 = -0.992462 */
+ 19179, /* B2 = 0.585327 */
+ -19178, /* B1 = -1.170593 */
+ 19179, /* B0 = 0.585327 */
+ 32723, /* A1 = 1.997314 */
+ -32686, /* A2 = -0.997528 */
+ 9973, /* B2 = 0.304352 */
+ -9955, /* B1 = -0.607605 */
+ 9973, /* B0 = 0.304352 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f133_200[] 12 */
+ 32072, /* A1 = 1.95752 */
+ -31896, /* A2 = -0.973419 */
+ -435, /* B2 = -0.013294 */
+ 0, /* B1 = 0 */
+ 435, /* B0 = 0.013294 */
+ 32188, /* A1 = 1.9646 */
+ -32400, /* A2 = -0.98877 */
+ 15139, /* B2 = 0.462036 */
+ -14882, /* B1 = -0.908356 */
+ 15139, /* B0 = 0.462036 */
+ 32473, /* A1 = 1.981995 */
+ -32524, /* A2 = -0.992584 */
+ 23200, /* B2 = 0.708008 */
+ -23113, /* B1 = -1.410706 */
+ 23200, /* B0 = 0.708008 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f300 13 */
+ 31769, /* A1 = -1.939026 */
+ -32584, /* A2 = 0.994385 */
+ -475, /* B2 = -0.014522 */
+ 0, /* B1 = 0.000000 */
+ 475, /* B0 = 0.014522 */
+ 31789, /* A1 = -1.940247 */
+ -32679, /* A2 = 0.997284 */
+ 17280, /* B2 = 0.527344 */
+ -16865, /* B1 = -1.029358 */
+ 17280, /* B0 = 0.527344 */
+ 31841, /* A1 = -1.943481 */
+ -32681, /* A2 = 0.997345 */
+ 543, /* B2 = 0.016579 */
+ -525, /* B1 = -0.032097 */
+ 543, /* B0 = 0.016579 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f300_420[] 14 */
+ 30750, /* A1 = 1.876892 */
+ -31212, /* A2 = -0.952515 */
+ -804, /* B2 = -0.024541 */
+ 0, /* B1 = 0 */
+ 804, /* B0 = 0.024541 */
+ 30686, /* A1 = 1.872925 */
+ -32145, /* A2 = -0.980988 */
+ 14747, /* B2 = 0.450043 */
+ -13703, /* B1 = -0.836395 */
+ 14747, /* B0 = 0.450043 */
+ 31651, /* A1 = 1.931824 */
+ -32321, /* A2 = -0.986389 */
+ 24425, /* B2 = 0.745422 */
+ -23914, /* B1 = -1.459595 */
+ 24427, /* B0 = 0.745483 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f330 15 */
+ 31613, /* A1 = -1.929565 */
+ -32646, /* A2 = 0.996277 */
+ -185, /* B2 = -0.005657 */
+ 0, /* B1 = 0.000000 */
+ 185, /* B0 = 0.005657 */
+ 31620, /* A1 = -1.929932 */
+ -32713, /* A2 = 0.998352 */
+ 19253, /* B2 = 0.587585 */
+ -18566, /* B1 = -1.133179 */
+ 19253, /* B0 = 0.587585 */
+ 31674, /* A1 = -1.933228 */
+ -32715, /* A2 = 0.998413 */
+ 2575, /* B2 = 0.078590 */
+ -2495, /* B1 = -0.152283 */
+ 2575, /* B0 = 0.078590 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f300_425[] 16 */
+ 30741, /* A1 = 1.876282 */
+ -31475, /* A2 = -0.960541 */
+ -703, /* B2 = -0.021484 */
+ 0, /* B1 = 0 */
+ 703, /* B0 = 0.021484 */
+ 30688, /* A1 = 1.873047 */
+ -32248, /* A2 = -0.984161 */
+ 14542, /* B2 = 0.443787 */
+ -13523, /* B1 = -0.825439 */
+ 14542, /* B0 = 0.443817 */
+ 31494, /* A1 = 1.922302 */
+ -32366, /* A2 = -0.987762 */
+ 21577, /* B2 = 0.658508 */
+ -21013, /* B1 = -1.282532 */
+ 21577, /* B0 = 0.658508 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f330_440[] 17 */
+ 30627, /* A1 = 1.869324 */
+ -31338, /* A2 = -0.95636 */
+ -843, /* B2 = -0.025749 */
+ 0, /* B1 = 0 */
+ 843, /* B0 = 0.025749 */
+ 30550, /* A1 = 1.864685 */
+ -32221, /* A2 = -0.983337 */
+ 13594, /* B2 = 0.414886 */
+ -12589, /* B1 = -0.768402 */
+ 13594, /* B0 = 0.414886 */
+ 31488, /* A1 = 1.921936 */
+ -32358, /* A2 = -0.987518 */
+ 24684, /* B2 = 0.753296 */
+ -24029, /* B1 = -1.466614 */
+ 24684, /* B0 = 0.753296 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f340 18 */
+ 31546, /* A1 = -1.925476 */
+ -32646, /* A2 = 0.996277 */
+ -445, /* B2 = -0.013588 */
+ 0, /* B1 = 0.000000 */
+ 445, /* B0 = 0.013588 */
+ 31551, /* A1 = -1.925781 */
+ -32713, /* A2 = 0.998352 */
+ 23884, /* B2 = 0.728882 */
+ -22979, /* B1 = -1.402527 */
+ 23884, /* B0 = 0.728882 */
+ 31606, /* A1 = -1.929138 */
+ -32715, /* A2 = 0.998413 */
+ 863, /* B2 = 0.026367 */
+ -835, /* B1 = -0.050985 */
+ 863, /* B0 = 0.026367 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f350_400[] 19 */
+ 31006, /* A1 = 1.892517 */
+ -32029, /* A2 = -0.977448 */
+ -461, /* B2 = -0.014096 */
+ 0, /* B1 = 0 */
+ 461, /* B0 = 0.014096 */
+ 30999, /* A1 = 1.892029 */
+ -32487, /* A2 = -0.991455 */
+ 11325, /* B2 = 0.345612 */
+ -10682, /* B1 = -0.651978 */
+ 11325, /* B0 = 0.345612 */
+ 31441, /* A1 = 1.919067 */
+ -32526, /* A2 = -0.992615 */
+ 24324, /* B2 = 0.74231 */
+ -23535, /* B1 = -1.436523 */
+ 24324, /* B0 = 0.74231 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f350_440[] */
+ 30634, /* A1 = 1.869751 */
+ -31533, /* A2 = -0.962341 */
+ -680, /* B2 = -0.020782 */
+ 0, /* B1 = 0 */
+ 680, /* B0 = 0.020782 */
+ 30571, /* A1 = 1.865906 */
+ -32277, /* A2 = -0.985016 */
+ 12894, /* B2 = 0.393524 */
+ -11945, /* B1 = -0.729065 */
+ 12894, /* B0 = 0.393524 */
+ 31367, /* A1 = 1.91449 */
+ -32379, /* A2 = -0.988129 */
+ 23820, /* B2 = 0.726929 */
+ -23104, /* B1 = -1.410217 */
+ 23820, /* B0 = 0.726929 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f350_450[] */
+ 30552, /* A1 = 1.864807 */
+ -31434, /* A2 = -0.95929 */
+ -690, /* B2 = -0.021066 */
+ 0, /* B1 = 0 */
+ 690, /* B0 = 0.021066 */
+ 30472, /* A1 = 1.859924 */
+ -32248, /* A2 = -0.984161 */
+ 13385, /* B2 = 0.408478 */
+ -12357, /* B1 = -0.754242 */
+ 13385, /* B0 = 0.408478 */
+ 31358, /* A1 = 1.914001 */
+ -32366, /* A2 = -0.987732 */
+ 26488, /* B2 = 0.80835 */
+ -25692, /* B1 = -1.568176 */
+ 26490, /* B0 = 0.808411 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f360 */
+ 31397, /* A1 = -1.916321 */
+ -32623, /* A2 = 0.995605 */
+ -117, /* B2 = -0.003598 */
+ 0, /* B1 = 0.000000 */
+ 117, /* B0 = 0.003598 */
+ 31403, /* A1 = -1.916687 */
+ -32700, /* A2 = 0.997925 */
+ 3388, /* B2 = 0.103401 */
+ -3240, /* B1 = -0.197784 */
+ 3388, /* B0 = 0.103401 */
+ 31463, /* A1 = -1.920410 */
+ -32702, /* A2 = 0.997986 */
+ 13346, /* B2 = 0.407288 */
+ -12863, /* B1 = -0.785126 */
+ 13346, /* B0 = 0.407288 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f380_420[] */
+ 30831, /* A1 = 1.881775 */
+ -32064, /* A2 = -0.978546 */
+ -367, /* B2 = -0.01122 */
+ 0, /* B1 = 0 */
+ 367, /* B0 = 0.01122 */
+ 30813, /* A1 = 1.880737 */
+ -32456, /* A2 = -0.990509 */
+ 11068, /* B2 = 0.337769 */
+ -10338, /* B1 = -0.631042 */
+ 11068, /* B0 = 0.337769 */
+ 31214, /* A1 = 1.905212 */
+ -32491, /* A2 = -0.991577 */
+ 16374, /* B2 = 0.499695 */
+ -15781, /* B1 = -0.963196 */
+ 16374, /* B0 = 0.499695 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f392 */
+ 31152, /* A1 = -1.901428 */
+ -32613, /* A2 = 0.995300 */
+ -314, /* B2 = -0.009605 */
+ 0, /* B1 = 0.000000 */
+ 314, /* B0 = 0.009605 */
+ 31156, /* A1 = -1.901672 */
+ -32694, /* A2 = 0.997742 */
+ 28847, /* B2 = 0.880371 */
+ -2734, /* B1 = -0.166901 */
+ 28847, /* B0 = 0.880371 */
+ 31225, /* A1 = -1.905823 */
+ -32696, /* A2 = 0.997803 */
+ 462, /* B2 = 0.014108 */
+ -442, /* B1 = -0.027019 */
+ 462, /* B0 = 0.014108 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f400_425[] */
+ 30836, /* A1 = 1.882141 */
+ -32296, /* A2 = -0.985596 */
+ -324, /* B2 = -0.009903 */
+ 0, /* B1 = 0 */
+ 324, /* B0 = 0.009903 */
+ 30825, /* A1 = 1.881409 */
+ -32570, /* A2 = -0.993958 */
+ 16847, /* B2 = 0.51416 */
+ -15792, /* B1 = -0.963898 */
+ 16847, /* B0 = 0.51416 */
+ 31106, /* A1 = 1.89856 */
+ -32584, /* A2 = -0.994415 */
+ 9579, /* B2 = 0.292328 */
+ -9164, /* B1 = -0.559357 */
+ 9579, /* B0 = 0.292328 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f400_440[] */
+ 30702, /* A1 = 1.873962 */
+ -32134, /* A2 = -0.980682 */
+ -517, /* B2 = -0.015793 */
+ 0, /* B1 = 0 */
+ 517, /* B0 = 0.015793 */
+ 30676, /* A1 = 1.872375 */
+ -32520, /* A2 = -0.992462 */
+ 8144, /* B2 = 0.24855 */
+ -7596, /* B1 = -0.463684 */
+ 8144, /* B0 = 0.24855 */
+ 31084, /* A1 = 1.897217 */
+ -32547, /* A2 = -0.993256 */
+ 22713, /* B2 = 0.693176 */
+ -21734, /* B1 = -1.326599 */
+ 22713, /* B0 = 0.693176 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f400_450[] */
+ 30613, /* A1 = 1.86853 */
+ -32031, /* A2 = -0.977509 */
+ -618, /* B2 = -0.018866 */
+ 0, /* B1 = 0 */
+ 618, /* B0 = 0.018866 */
+ 30577, /* A1 = 1.866272 */
+ -32491, /* A2 = -0.991577 */
+ 9612, /* B2 = 0.293335 */
+ -8935, /* B1 = -0.54541 */
+ 9612, /* B0 = 0.293335 */
+ 31071, /* A1 = 1.896484 */
+ -32524, /* A2 = -0.992584 */
+ 21596, /* B2 = 0.659058 */
+ -20667, /* B1 = -1.261414 */
+ 21596, /* B0 = 0.659058 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f420 */
+ 30914, /* A1 = -1.886841 */
+ -32584, /* A2 = 0.994385 */
+ -426, /* B2 = -0.013020 */
+ 0, /* B1 = 0.000000 */
+ 426, /* B0 = 0.013020 */
+ 30914, /* A1 = -1.886841 */
+ -32679, /* A2 = 0.997314 */
+ 17520, /* B2 = 0.534668 */
+ -16471, /* B1 = -1.005310 */
+ 17520, /* B0 = 0.534668 */
+ 31004, /* A1 = -1.892334 */
+ -32683, /* A2 = 0.997406 */
+ 819, /* B2 = 0.025023 */
+ -780, /* B1 = -0.047619 */
+ 819, /* B0 = 0.025023 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+#if 0
+ { /* f425 */
+ 30881, /* A1 = -1.884827 */
+ -32603, /* A2 = 0.994965 */
+ -496, /* B2 = -0.015144 */
+ 0, /* B1 = 0.000000 */
+ 496, /* B0 = 0.015144 */
+ 30880, /* A1 = -1.884766 */
+ -32692, /* A2 = 0.997711 */
+ 24767, /* B2 = 0.755859 */
+ -23290, /* B1 = -1.421509 */
+ 24767, /* B0 = 0.755859 */
+ 30967, /* A1 = -1.890076 */
+ -32694, /* A2 = 0.997772 */
+ 728, /* B2 = 0.022232 */
+ -691, /* B1 = -0.042194 */
+ 728, /* B0 = 0.022232 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+#else
+ {
+ 30850,
+ -32534,
+ -504,
+ 0,
+ 504,
+ 30831,
+ -32669,
+ 24303,
+ -22080,
+ 24303,
+ 30994,
+ -32673,
+ 1905,
+ -1811,
+ 1905,
+ 5,
+ 129,
+ 17,
+ 0xff5
+ },
+#endif
+ { /* f425_450[] */
+ 30646, /* A1 = 1.870544 */
+ -32327, /* A2 = -0.986572 */
+ -287, /* B2 = -0.008769 */
+ 0, /* B1 = 0 */
+ 287, /* B0 = 0.008769 */
+ 30627, /* A1 = 1.869324 */
+ -32607, /* A2 = -0.995087 */
+ 13269, /* B2 = 0.404968 */
+ -12376, /* B1 = -0.755432 */
+ 13269, /* B0 = 0.404968 */
+ 30924, /* A1 = 1.887512 */
+ -32619, /* A2 = -0.995453 */
+ 19950, /* B2 = 0.608826 */
+ -18940, /* B1 = -1.156006 */
+ 19950, /* B0 = 0.608826 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f425_475[] */
+ 30396, /* A1 = 1.855225 */
+ -32014, /* A2 = -0.97699 */
+ -395, /* B2 = -0.012055 */
+ 0, /* B1 = 0 */
+ 395, /* B0 = 0.012055 */
+ 30343, /* A1 = 1.85199 */
+ -32482, /* A2 = -0.991302 */
+ 17823, /* B2 = 0.543945 */
+ -16431, /* B1 = -1.002869 */
+ 17823, /* B0 = 0.543945 */
+ 30872, /* A1 = 1.884338 */
+ -32516, /* A2 = -0.99231 */
+ 18124, /* B2 = 0.553101 */
+ -17246, /* B1 = -1.052673 */
+ 18124, /* B0 = 0.553101 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f435 */
+ 30796, /* A1 = -1.879639 */
+ -32603, /* A2 = 0.994965 */
+ -254, /* B2 = -0.007762 */
+ 0, /* B1 = 0.000000 */
+ 254, /* B0 = 0.007762 */
+ 30793, /* A1 = -1.879456 */
+ -32692, /* A2 = 0.997711 */
+ 18934, /* B2 = 0.577820 */
+ -17751, /* B1 = -1.083496 */
+ 18934, /* B0 = 0.577820 */
+ 30882, /* A1 = -1.884888 */
+ -32694, /* A2 = 0.997772 */
+ 1858, /* B2 = 0.056713 */
+ -1758, /* B1 = -0.107357 */
+ 1858, /* B0 = 0.056713 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f440_450[] */
+ 30641, /* A1 = 1.870239 */
+ -32458, /* A2 = -0.99057 */
+ -155, /* B2 = -0.004735 */
+ 0, /* B1 = 0 */
+ 155, /* B0 = 0.004735 */
+ 30631, /* A1 = 1.869568 */
+ -32630, /* A2 = -0.995789 */
+ 11453, /* B2 = 0.349548 */
+ -10666, /* B1 = -0.651001 */
+ 11453, /* B0 = 0.349548 */
+ 30810, /* A1 = 1.880554 */
+ -32634, /* A2 = -0.995941 */
+ 12237, /* B2 = 0.373474 */
+ -11588, /* B1 = -0.707336 */
+ 12237, /* B0 = 0.373474 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f440_480[] */
+ 30367, /* A1 = 1.853455 */
+ -32147, /* A2 = -0.981079 */
+ -495, /* B2 = -0.015113 */
+ 0, /* B1 = 0 */
+ 495, /* B0 = 0.015113 */
+ 30322, /* A1 = 1.850769 */
+ -32543, /* A2 = -0.993134 */
+ 10031, /* B2 = 0.306152 */
+ -9252, /* B1 = -0.564728 */
+ 10031, /* B0 = 0.306152 */
+ 30770, /* A1 = 1.878052 */
+ -32563, /* A2 = -0.993774 */
+ 22674, /* B2 = 0.691956 */
+ -21465, /* B1 = -1.31012 */
+ 22674, /* B0 = 0.691956 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f445 */
+ 30709, /* A1 = -1.874329 */
+ -32603, /* A2 = 0.994965 */
+ -83, /* B2 = -0.002545 */
+ 0, /* B1 = 0.000000 */
+ 83, /* B0 = 0.002545 */
+ 30704, /* A1 = -1.874084 */
+ -32692, /* A2 = 0.997711 */
+ 10641, /* B2 = 0.324738 */
+ -9947, /* B1 = -0.607147 */
+ 10641, /* B0 = 0.324738 */
+ 30796, /* A1 = -1.879639 */
+ -32694, /* A2 = 0.997772 */
+ 10079, /* B2 = 0.307587 */
+ 9513, /* B1 = 0.580688 */
+ 10079, /* B0 = 0.307587 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f450 */
+ 30664, /* A1 = -1.871643 */
+ -32603, /* A2 = 0.994965 */
+ -164, /* B2 = -0.005029 */
+ 0, /* B1 = 0.000000 */
+ 164, /* B0 = 0.005029 */
+ 30661, /* A1 = -1.871399 */
+ -32692, /* A2 = 0.997711 */
+ 15294, /* B2 = 0.466736 */
+ -14275, /* B1 = -0.871307 */
+ 15294, /* B0 = 0.466736 */
+ 30751, /* A1 = -1.876953 */
+ -32694, /* A2 = 0.997772 */
+ 3548, /* B2 = 0.108284 */
+ -3344, /* B1 = -0.204155 */
+ 3548, /* B0 = 0.108284 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f452 */
+ 30653, /* A1 = -1.870911 */
+ -32615, /* A2 = 0.995361 */
+ -209, /* B2 = -0.006382 */
+ 0, /* B1 = 0.000000 */
+ 209, /* B0 = 0.006382 */
+ 30647, /* A1 = -1.870605 */
+ -32702, /* A2 = 0.997986 */
+ 18971, /* B2 = 0.578979 */
+ -17716, /* B1 = -1.081299 */
+ 18971, /* B0 = 0.578979 */
+ 30738, /* A1 = -1.876099 */
+ -32702, /* A2 = 0.998016 */
+ 2967, /* B2 = 0.090561 */
+ -2793, /* B1 = -0.170502 */
+ 2967, /* B0 = 0.090561 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f475 */
+ 30437, /* A1 = -1.857727 */
+ -32603, /* A2 = 0.994965 */
+ -264, /* B2 = -0.008062 */
+ 0, /* B1 = 0.000000 */
+ 264, /* B0 = 0.008062 */
+ 30430, /* A1 = -1.857300 */
+ -32692, /* A2 = 0.997711 */
+ 21681, /* B2 = 0.661682 */
+ -20082, /* B1 = -1.225708 */
+ 21681, /* B0 = 0.661682 */
+ 30526, /* A1 = -1.863220 */
+ -32694, /* A2 = 0.997742 */
+ 1559, /* B2 = 0.047600 */
+ -1459, /* B1 = -0.089096 */
+ 1559, /* B0 = 0.047600 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f480_620[] */
+ 28975, /* A1 = 1.768494 */
+ -30955, /* A2 = -0.944672 */
+ -1026, /* B2 = -0.03133 */
+ 0, /* B1 = 0 */
+ 1026, /* B0 = 0.03133 */
+ 28613, /* A1 = 1.746399 */
+ -32089, /* A2 = -0.979309 */
+ 14214, /* B2 = 0.433807 */
+ -12202, /* B1 = -0.744812 */
+ 14214, /* B0 = 0.433807 */
+ 30243, /* A1 = 1.845947 */
+ -32238, /* A2 = -0.983856 */
+ 24825, /* B2 = 0.757629 */
+ -23402, /* B1 = -1.428345 */
+ 24825, /* B0 = 0.757629 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f494 */
+ 30257, /* A1 = -1.846741 */
+ -32605, /* A2 = 0.995056 */
+ -249, /* B2 = -0.007625 */
+ 0, /* B1 = 0.000000 */
+ 249, /* B0 = 0.007625 */
+ 30247, /* A1 = -1.846191 */
+ -32694, /* A2 = 0.997772 */
+ 18088, /* B2 = 0.552002 */
+ -16652, /* B1 = -1.016418 */
+ 18088, /* B0 = 0.552002 */
+ 30348, /* A1 = -1.852295 */
+ -32696, /* A2 = 0.997803 */
+ 2099, /* B2 = 0.064064 */
+ -1953, /* B1 = -0.119202 */
+ 2099, /* B0 = 0.064064 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f500 */
+ 30202, /* A1 = -1.843431 */
+ -32624, /* A2 = 0.995622 */
+ -413, /* B2 = -0.012622 */
+ 0, /* B1 = 0.000000 */
+ 413, /* B0 = 0.012622 */
+ 30191, /* A1 = -1.842721 */
+ -32714, /* A2 = 0.998364 */
+ 25954, /* B2 = 0.792057 */
+ -23890, /* B1 = -1.458131 */
+ 25954, /* B0 = 0.792057 */
+ 30296, /* A1 = -1.849172 */
+ -32715, /* A2 = 0.998397 */
+ 2007, /* B2 = 0.061264 */
+ -1860, /* B1 = -0.113568 */
+ 2007, /* B0 = 0.061264 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f520 */
+ 30001, /* A1 = -1.831116 */
+ -32613, /* A2 = 0.995270 */
+ -155, /* B2 = -0.004750 */
+ 0, /* B1 = 0.000000 */
+ 155, /* B0 = 0.004750 */
+ 29985, /* A1 = -1.830200 */
+ -32710, /* A2 = 0.998260 */
+ 6584, /* B2 = 0.200928 */
+ -6018, /* B1 = -0.367355 */
+ 6584, /* B0 = 0.200928 */
+ 30105, /* A1 = -1.837524 */
+ -32712, /* A2 = 0.998291 */
+ 23812, /* B2 = 0.726685 */
+ -21936, /* B1 = -1.338928 */
+ 23812, /* B0 = 0.726685 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f523 */
+ 29964, /* A1 = -1.828918 */
+ -32601, /* A2 = 0.994904 */
+ -101, /* B2 = -0.003110 */
+ 0, /* B1 = 0.000000 */
+ 101, /* B0 = 0.003110 */
+ 29949, /* A1 = -1.827942 */
+ -32700, /* A2 = 0.997925 */
+ 11041, /* B2 = 0.336975 */
+ -10075, /* B1 = -0.614960 */
+ 11041, /* B0 = 0.336975 */
+ 30070, /* A1 = -1.835388 */
+ -32702, /* A2 = 0.997986 */
+ 16762, /* B2 = 0.511536 */
+ -15437, /* B1 = -0.942230 */
+ 16762, /* B0 = 0.511536 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f525 */
+ 29936, /* A1 = -1.827209 */
+ -32584, /* A2 = 0.994415 */
+ -91, /* B2 = -0.002806 */
+ 0, /* B1 = 0.000000 */
+ 91, /* B0 = 0.002806 */
+ 29921, /* A1 = -1.826233 */
+ -32688, /* A2 = 0.997559 */
+ 11449, /* B2 = 0.349396 */
+ -10426, /* B1 = -0.636383 */
+ 11449, /* B0 = 0.349396 */
+ 30045, /* A1 = -1.833862 */
+ -32688, /* A2 = 0.997589 */
+ 13055, /* B2 = 0.398407 */
+ -12028, /* B1 = -0.734161 */
+ 13055, /* B0 = 0.398407 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f540_660[] */
+ 28499, /* A1 = 1.739441 */
+ -31129, /* A2 = -0.949982 */
+ -849, /* B2 = -0.025922 */
+ 0, /* B1 = 0 */
+ 849, /* B0 = 0.025922 */
+ 28128, /* A1 = 1.716797 */
+ -32130, /* A2 = -0.98056 */
+ 14556, /* B2 = 0.444214 */
+ -12251, /* B1 = -0.747772 */
+ 14556, /* B0 = 0.444244 */
+ 29667, /* A1 = 1.81073 */
+ -32244, /* A2 = -0.984039 */
+ 23038, /* B2 = 0.703064 */
+ -21358, /* B1 = -1.303589 */
+ 23040, /* B0 = 0.703125 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f587 */
+ 29271, /* A1 = -1.786560 */
+ -32599, /* A2 = 0.994873 */
+ -490, /* B2 = -0.014957 */
+ 0, /* B1 = 0.000000 */
+ 490, /* B0 = 0.014957 */
+ 29246, /* A1 = -1.785095 */
+ -32700, /* A2 = 0.997925 */
+ 28961, /* B2 = 0.883850 */
+ -25796, /* B1 = -1.574463 */
+ 28961, /* B0 = 0.883850 */
+ 29383, /* A1 = -1.793396 */
+ -32700, /* A2 = 0.997955 */
+ 1299, /* B2 = 0.039650 */
+ -1169, /* B1 = -0.071396 */
+ 1299, /* B0 = 0.039650 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f590 */
+ 29230, /* A1 = -1.784058 */
+ -32584, /* A2 = 0.994415 */
+ -418, /* B2 = -0.012757 */
+ 0, /* B1 = 0.000000 */
+ 418, /* B0 = 0.012757 */
+ 29206, /* A1 = -1.782593 */
+ -32688, /* A2 = 0.997559 */
+ 36556, /* B2 = 1.115601 */
+ -32478, /* B1 = -1.982300 */
+ 36556, /* B0 = 1.115601 */
+ 29345, /* A1 = -1.791077 */
+ -32688, /* A2 = 0.997589 */
+ 897, /* B2 = 0.027397 */
+ -808, /* B1 = -0.049334 */
+ 897, /* B0 = 0.027397 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f600 */
+ 29116, /* A1 = -1.777100 */
+ -32603, /* A2 = 0.994965 */
+ -165, /* B2 = -0.005039 */
+ 0, /* B1 = 0.000000 */
+ 165, /* B0 = 0.005039 */
+ 29089, /* A1 = -1.775452 */
+ -32708, /* A2 = 0.998199 */
+ 6963, /* B2 = 0.212494 */
+ -6172, /* B1 = -0.376770 */
+ 6963, /* B0 = 0.212494 */
+ 29237, /* A1 = -1.784485 */
+ -32710, /* A2 = 0.998230 */
+ 24197, /* B2 = 0.738464 */
+ -21657, /* B1 = -1.321899 */
+ 24197, /* B0 = 0.738464 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f660 */
+ 28376, /* A1 = -1.731934 */
+ -32567, /* A2 = 0.993896 */
+ -363, /* B2 = -0.011102 */
+ 0, /* B1 = 0.000000 */
+ 363, /* B0 = 0.011102 */
+ 28337, /* A1 = -1.729614 */
+ -32683, /* A2 = 0.997434 */
+ 21766, /* B2 = 0.664246 */
+ -18761, /* B1 = -1.145081 */
+ 21766, /* B0 = 0.664246 */
+ 28513, /* A1 = -1.740356 */
+ -32686, /* A2 = 0.997498 */
+ 2509, /* B2 = 0.076584 */
+ -2196, /* B1 = -0.134041 */
+ 2509, /* B0 = 0.076584 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f700 */
+ 27844, /* A1 = -1.699463 */
+ -32563, /* A2 = 0.993744 */
+ -366, /* B2 = -0.011187 */
+ 0, /* B1 = 0.000000 */
+ 366, /* B0 = 0.011187 */
+ 27797, /* A1 = -1.696655 */
+ -32686, /* A2 = 0.997498 */
+ 22748, /* B2 = 0.694214 */
+ -19235, /* B1 = -1.174072 */
+ 22748, /* B0 = 0.694214 */
+ 27995, /* A1 = -1.708740 */
+ -32688, /* A2 = 0.997559 */
+ 2964, /* B2 = 0.090477 */
+ -2546, /* B1 = -0.155449 */
+ 2964, /* B0 = 0.090477 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f740 */
+ 27297, /* A1 = -1.666077 */
+ -32551, /* A2 = 0.993408 */
+ -345, /* B2 = -0.010540 */
+ 0, /* B1 = 0.000000 */
+ 345, /* B0 = 0.010540 */
+ 27240, /* A1 = -1.662598 */
+ -32683, /* A2 = 0.997406 */
+ 22560, /* B2 = 0.688477 */
+ -18688, /* B1 = -1.140625 */
+ 22560, /* B0 = 0.688477 */
+ 27461, /* A1 = -1.676147 */
+ -32684, /* A2 = 0.997467 */
+ 3541, /* B2 = 0.108086 */
+ -2985, /* B1 = -0.182220 */
+ 3541, /* B0 = 0.108086 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f750 */
+ 27155, /* A1 = -1.657410 */
+ -32551, /* A2 = 0.993408 */
+ -462, /* B2 = -0.014117 */
+ 0, /* B1 = 0.000000 */
+ 462, /* B0 = 0.014117 */
+ 27097, /* A1 = -1.653870 */
+ -32683, /* A2 = 0.997406 */
+ 32495, /* B2 = 0.991699 */
+ -26776, /* B1 = -1.634338 */
+ 32495, /* B0 = 0.991699 */
+ 27321, /* A1 = -1.667542 */
+ -32684, /* A2 = 0.997467 */
+ 1835, /* B2 = 0.056007 */
+ -1539, /* B1 = -0.093948 */
+ 1835, /* B0 = 0.056007 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f750_1450[] */
+ 19298, /* A1 = 1.177917 */
+ -24471, /* A2 = -0.746796 */
+ -4152, /* B2 = -0.126709 */
+ 0, /* B1 = 0 */
+ 4152, /* B0 = 0.126709 */
+ 12902, /* A1 = 0.787476 */
+ -29091, /* A2 = -0.887817 */
+ 12491, /* B2 = 0.38121 */
+ -1794, /* B1 = -0.109528 */
+ 12494, /* B0 = 0.381317 */
+ 26291, /* A1 = 1.604736 */
+ -30470, /* A2 = -0.929901 */
+ 28859, /* B2 = 0.880737 */
+ -26084, /* B1 = -1.592102 */
+ 28861, /* B0 = 0.880798 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f770 */
+ 26867, /* A1 = -1.639832 */
+ -32551, /* A2 = 0.993408 */
+ -123, /* B2 = -0.003755 */
+ 0, /* B1 = 0.000000 */
+ 123, /* B0 = 0.003755 */
+ 26805, /* A1 = -1.636108 */
+ -32683, /* A2 = 0.997406 */
+ 17297, /* B2 = 0.527863 */
+ -14096, /* B1 = -0.860382 */
+ 17297, /* B0 = 0.527863 */
+ 27034, /* A1 = -1.650085 */
+ -32684, /* A2 = 0.997467 */
+ 12958, /* B2 = 0.395477 */
+ -10756, /* B1 = -0.656525 */
+ 12958, /* B0 = 0.395477 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f800 */
+ 26413, /* A1 = -1.612122 */
+ -32547, /* A2 = 0.993286 */
+ -223, /* B2 = -0.006825 */
+ 0, /* B1 = 0.000000 */
+ 223, /* B0 = 0.006825 */
+ 26342, /* A1 = -1.607849 */
+ -32686, /* A2 = 0.997498 */
+ 6391, /* B2 = 0.195053 */
+ -5120, /* B1 = -0.312531 */
+ 6391, /* B0 = 0.195053 */
+ 26593, /* A1 = -1.623108 */
+ -32688, /* A2 = 0.997559 */
+ 23681, /* B2 = 0.722717 */
+ -19328, /* B1 = -1.179688 */
+ 23681, /* B0 = 0.722717 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f816 */
+ 26168, /* A1 = -1.597209 */
+ -32528, /* A2 = 0.992706 */
+ -235, /* B2 = -0.007182 */
+ 0, /* B1 = 0.000000 */
+ 235, /* B0 = 0.007182 */
+ 26092, /* A1 = -1.592590 */
+ -32675, /* A2 = 0.997192 */
+ 20823, /* B2 = 0.635498 */
+ -16510, /* B1 = -1.007751 */
+ 20823, /* B0 = 0.635498 */
+ 26363, /* A1 = -1.609070 */
+ -32677, /* A2 = 0.997253 */
+ 6739, /* B2 = 0.205688 */
+ -5459, /* B1 = -0.333206 */
+ 6739, /* B0 = 0.205688 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f850 */
+ 25641, /* A1 = -1.565063 */
+ -32536, /* A2 = 0.992950 */
+ -121, /* B2 = -0.003707 */
+ 0, /* B1 = 0.000000 */
+ 121, /* B0 = 0.003707 */
+ 25560, /* A1 = -1.560059 */
+ -32684, /* A2 = 0.997437 */
+ 18341, /* B2 = 0.559753 */
+ -14252, /* B1 = -0.869904 */
+ 18341, /* B0 = 0.559753 */
+ 25837, /* A1 = -1.577026 */
+ -32684, /* A2 = 0.997467 */
+ 16679, /* B2 = 0.509003 */
+ -13232, /* B1 = -0.807648 */
+ 16679, /* B0 = 0.509003 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f857_1645[] */
+ 16415, /* A1 = 1.001953 */
+ -23669, /* A2 = -0.722321 */
+ -4549, /* B2 = -0.138847 */
+ 0, /* B1 = 0 */
+ 4549, /* B0 = 0.138847 */
+ 8456, /* A1 = 0.516174 */
+ -28996, /* A2 = -0.884918 */
+ 13753, /* B2 = 0.419724 */
+ -12, /* B1 = -0.000763 */
+ 13757, /* B0 = 0.419846 */
+ 24632, /* A1 = 1.503418 */
+ -30271, /* A2 = -0.923828 */
+ 29070, /* B2 = 0.887146 */
+ -25265, /* B1 = -1.542114 */
+ 29073, /* B0 = 0.887268 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f900 */
+ 24806, /* A1 = -1.514099 */
+ -32501, /* A2 = 0.991852 */
+ -326, /* B2 = -0.009969 */
+ 0, /* B1 = 0.000000 */
+ 326, /* B0 = 0.009969 */
+ 24709, /* A1 = -1.508118 */
+ -32659, /* A2 = 0.996674 */
+ 20277, /* B2 = 0.618835 */
+ -15182, /* B1 = -0.926636 */
+ 20277, /* B0 = 0.618835 */
+ 25022, /* A1 = -1.527222 */
+ -32661, /* A2 = 0.996735 */
+ 4320, /* B2 = 0.131836 */
+ -3331, /* B1 = -0.203339 */
+ 4320, /* B0 = 0.131836 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f900_1300[] */
+ 19776, /* A1 = 1.207092 */
+ -27437, /* A2 = -0.837341 */
+ -2666, /* B2 = -0.081371 */
+ 0, /* B1 = 0 */
+ 2666, /* B0 = 0.081371 */
+ 16302, /* A1 = 0.995026 */
+ -30354, /* A2 = -0.926361 */
+ 10389, /* B2 = 0.317062 */
+ -3327, /* B1 = -0.203064 */
+ 10389, /* B0 = 0.317062 */
+ 24299, /* A1 = 1.483154 */
+ -30930, /* A2 = -0.943909 */
+ 25016, /* B2 = 0.763428 */
+ -21171, /* B1 = -1.292236 */
+ 25016, /* B0 = 0.763428 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f935_1215[] */
+ 20554, /* A1 = 1.254517 */
+ -28764, /* A2 = -0.877838 */
+ -2048, /* B2 = -0.062515 */
+ 0, /* B1 = 0 */
+ 2048, /* B0 = 0.062515 */
+ 18209, /* A1 = 1.11145 */
+ -30951, /* A2 = -0.94458 */
+ 9390, /* B2 = 0.286575 */
+ -3955, /* B1 = -0.241455 */
+ 9390, /* B0 = 0.286575 */
+ 23902, /* A1 = 1.458923 */
+ -31286, /* A2 = -0.954803 */
+ 23252, /* B2 = 0.709595 */
+ -19132, /* B1 = -1.167725 */
+ 23252, /* B0 = 0.709595 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f941_1477[] */
+ 17543, /* A1 = 1.07074 */
+ -26220, /* A2 = -0.800201 */
+ -3298, /* B2 = -0.100647 */
+ 0, /* B1 = 0 */
+ 3298, /* B0 = 0.100647 */
+ 12423, /* A1 = 0.75827 */
+ -30036, /* A2 = -0.916626 */
+ 12651, /* B2 = 0.386078 */
+ -2444, /* B1 = -0.14917 */
+ 12653, /* B0 = 0.386154 */
+ 23518, /* A1 = 1.435425 */
+ -30745, /* A2 = -0.938293 */
+ 27282, /* B2 = 0.832581 */
+ -22529, /* B1 = -1.375122 */
+ 27286, /* B0 = 0.832703 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f942 */
+ 24104, /* A1 = -1.471252 */
+ -32507, /* A2 = 0.992065 */
+ -351, /* B2 = -0.010722 */
+ 0, /* B1 = 0.000000 */
+ 351, /* B0 = 0.010722 */
+ 23996, /* A1 = -1.464600 */
+ -32671, /* A2 = 0.997040 */
+ 22848, /* B2 = 0.697266 */
+ -16639, /* B1 = -1.015564 */
+ 22848, /* B0 = 0.697266 */
+ 24332, /* A1 = -1.485168 */
+ -32673, /* A2 = 0.997101 */
+ 4906, /* B2 = 0.149727 */
+ -3672, /* B1 = -0.224174 */
+ 4906, /* B0 = 0.149727 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f950 */
+ 23967, /* A1 = -1.462830 */
+ -32507, /* A2 = 0.992065 */
+ -518, /* B2 = -0.015821 */
+ 0, /* B1 = 0.000000 */
+ 518, /* B0 = 0.015821 */
+ 23856, /* A1 = -1.456055 */
+ -32671, /* A2 = 0.997040 */
+ 26287, /* B2 = 0.802246 */
+ -19031, /* B1 = -1.161560 */
+ 26287, /* B0 = 0.802246 */
+ 24195, /* A1 = -1.476746 */
+ -32673, /* A2 = 0.997101 */
+ 2890, /* B2 = 0.088196 */
+ -2151, /* B1 = -0.131317 */
+ 2890, /* B0 = 0.088196 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f950_1400[] */
+ 18294, /* A1 = 1.116638 */
+ -26962, /* A2 = -0.822845 */
+ -2914, /* B2 = -0.088936 */
+ 0, /* B1 = 0 */
+ 2914, /* B0 = 0.088936 */
+ 14119, /* A1 = 0.861786 */
+ -30227, /* A2 = -0.922455 */
+ 11466, /* B2 = 0.349945 */
+ -2833, /* B1 = -0.172943 */
+ 11466, /* B0 = 0.349945 */
+ 23431, /* A1 = 1.430115 */
+ -30828, /* A2 = -0.940796 */
+ 25331, /* B2 = 0.773071 */
+ -20911, /* B1 = -1.276367 */
+ 25331, /* B0 = 0.773071 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f975 */
+ 23521, /* A1 = -1.435608 */
+ -32489, /* A2 = 0.991516 */
+ -193, /* B2 = -0.005915 */
+ 0, /* B1 = 0.000000 */
+ 193, /* B0 = 0.005915 */
+ 23404, /* A1 = -1.428467 */
+ -32655, /* A2 = 0.996582 */
+ 17740, /* B2 = 0.541412 */
+ -12567, /* B1 = -0.767029 */
+ 17740, /* B0 = 0.541412 */
+ 23753, /* A1 = -1.449829 */
+ -32657, /* A2 = 0.996613 */
+ 9090, /* B2 = 0.277405 */
+ -6662, /* B1 = -0.406647 */
+ 9090, /* B0 = 0.277405 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1000 */
+ 23071, /* A1 = -1.408203 */
+ -32489, /* A2 = 0.991516 */
+ -293, /* B2 = -0.008965 */
+ 0, /* B1 = 0.000000 */
+ 293, /* B0 = 0.008965 */
+ 22951, /* A1 = -1.400818 */
+ -32655, /* A2 = 0.996582 */
+ 5689, /* B2 = 0.173645 */
+ -3951, /* B1 = -0.241150 */
+ 5689, /* B0 = 0.173645 */
+ 23307, /* A1 = -1.422607 */
+ -32657, /* A2 = 0.996613 */
+ 18692, /* B2 = 0.570435 */
+ -13447, /* B1 = -0.820770 */
+ 18692, /* B0 = 0.570435 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1020 */
+ 22701, /* A1 = -1.385620 */
+ -32474, /* A2 = 0.991058 */
+ -292, /* B2 = -0.008933 */
+ 0, /*163840 , B1 = 10.000000 */
+ 292, /* B0 = 0.008933 */
+ 22564, /* A1 = -1.377258 */
+ -32655, /* A2 = 0.996552 */
+ 20756, /* B2 = 0.633423 */
+ -14176, /* B1 = -0.865295 */
+ 20756, /* B0 = 0.633423 */
+ 22960, /* A1 = -1.401428 */
+ -32657, /* A2 = 0.996613 */
+ 6520, /* B2 = 0.198990 */
+ -4619, /* B1 = -0.281937 */
+ 6520, /* B0 = 0.198990 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1050 */
+ 22142, /* A1 = -1.351501 */
+ -32474, /* A2 = 0.991058 */
+ -147, /* B2 = -0.004493 */
+ 0, /* B1 = 0.000000 */
+ 147, /* B0 = 0.004493 */
+ 22000, /* A1 = -1.342834 */
+ -32655, /* A2 = 0.996552 */
+ 15379, /* B2 = 0.469360 */
+ -10237, /* B1 = -0.624847 */
+ 15379, /* B0 = 0.469360 */
+ 22406, /* A1 = -1.367554 */
+ -32657, /* A2 = 0.996613 */
+ 17491, /* B2 = 0.533783 */
+ -12096, /* B1 = -0.738312 */
+ 17491, /* B0 = 0.533783 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1100_1750[] */
+ 12973, /* A1 = 0.79184 */
+ -24916, /* A2 = -0.760376 */
+ 6655, /* B2 = 0.203102 */
+ 367, /* B1 = 0.0224 */
+ 6657, /* B0 = 0.203171 */
+ 5915, /* A1 = 0.361053 */
+ -29560, /* A2 = -0.90213 */
+ -7777, /* B2 = -0.23735 */
+ 0, /* B1 = 0 */
+ 7777, /* B0 = 0.23735 */
+ 20510, /* A1 = 1.251892 */
+ -30260, /* A2 = -0.923462 */
+ 26662, /* B2 = 0.81366 */
+ -20573, /* B1 = -1.255737 */
+ 26668, /* B0 = 0.813843 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1140 */
+ 20392, /* A1 = -1.244629 */
+ -32460, /* A2 = 0.990601 */
+ -270, /* B2 = -0.008240 */
+ 0, /* B1 = 0.000000 */
+ 270, /* B0 = 0.008240 */
+ 20218, /* A1 = -1.234009 */
+ -32655, /* A2 = 0.996582 */
+ 21337, /* B2 = 0.651154 */
+ -13044, /* B1 = -0.796143 */
+ 21337, /* B0 = 0.651154 */
+ 20684, /* A1 = -1.262512 */
+ -32657, /* A2 = 0.996643 */
+ 8572, /* B2 = 0.261612 */
+ -5476, /* B1 = -0.334244 */
+ 8572, /* B0 = 0.261612 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1200 */
+ 19159, /* A1 = -1.169373 */
+ -32456, /* A2 = 0.990509 */
+ -335, /* B2 = -0.010252 */
+ 0, /* B1 = 0.000000 */
+ 335, /* B0 = 0.010252 */
+ 18966, /* A1 = -1.157593 */
+ -32661, /* A2 = 0.996735 */
+ 6802, /* B2 = 0.207588 */
+ -3900, /* B1 = -0.238098 */
+ 6802, /* B0 = 0.207588 */
+ 19467, /* A1 = -1.188232 */
+ -32661, /* A2 = 0.996765 */
+ 25035, /* B2 = 0.764008 */
+ -15049, /* B1 = -0.918579 */
+ 25035, /* B0 = 0.764008 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1209 */
+ 18976, /* A1 = -1.158264 */
+ -32439, /* A2 = 0.989990 */
+ -183, /* B2 = -0.005588 */
+ 0, /* B1 = 0.000000 */
+ 183, /* B0 = 0.005588 */
+ 18774, /* A1 = -1.145874 */
+ -32650, /* A2 = 0.996429 */
+ 15468, /* B2 = 0.472076 */
+ -8768, /* B1 = -0.535217 */
+ 15468, /* B0 = 0.472076 */
+ 19300, /* A1 = -1.177979 */
+ -32652, /* A2 = 0.996490 */
+ 19840, /* B2 = 0.605499 */
+ -11842, /* B1 = -0.722809 */
+ 19840, /* B0 = 0.605499 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1330 */
+ 16357, /* A1 = -0.998413 */
+ -32368, /* A2 = 0.987793 */
+ -217, /* B2 = -0.006652 */
+ 0, /* B1 = 0.000000 */
+ 217, /* B0 = 0.006652 */
+ 16107, /* A1 = -0.983126 */
+ -32601, /* A2 = 0.994904 */
+ 11602, /* B2 = 0.354065 */
+ -5555, /* B1 = -0.339111 */
+ 11602, /* B0 = 0.354065 */
+ 16722, /* A1 = -1.020630 */
+ -32603, /* A2 = 0.994965 */
+ 15574, /* B2 = 0.475311 */
+ -8176, /* B1 = -0.499069 */
+ 15574, /* B0 = 0.475311 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1336 */
+ 16234, /* A1 = -0.990875 */
+ 32404, /* A2 = -0.988922 */
+ -193, /* B2 = -0.005908 */
+ 0, /* B1 = 0.000000 */
+ 193, /* B0 = 0.005908 */
+ 15986, /* A1 = -0.975769 */
+ -32632, /* A2 = 0.995880 */
+ 18051, /* B2 = 0.550903 */
+ -8658, /* B1 = -0.528473 */
+ 18051, /* B0 = 0.550903 */
+ 16591, /* A1 = -1.012695 */
+ -32634, /* A2 = 0.995941 */
+ 15736, /* B2 = 0.480240 */
+ -8125, /* B1 = -0.495926 */
+ 15736, /* B0 = 0.480240 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1366 */
+ 15564, /* A1 = -0.949982 */
+ -32404, /* A2 = 0.988922 */
+ -269, /* B2 = -0.008216 */
+ 0, /* B1 = 0.000000 */
+ 269, /* B0 = 0.008216 */
+ 15310, /* A1 = -0.934479 */
+ -32632, /* A2 = 0.995880 */
+ 10815, /* B2 = 0.330063 */
+ -4962, /* B1 = -0.302887 */
+ 10815, /* B0 = 0.330063 */
+ 15924, /* A1 = -0.971924 */
+ -32634, /* A2 = 0.995941 */
+ 18880, /* B2 = 0.576172 */
+ -9364, /* B1 = -0.571594 */
+ 18880, /* B0 = 0.576172 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1380 */
+ 15247, /* A1 = -0.930603 */
+ -32397, /* A2 = 0.988708 */
+ -244, /* B2 = -0.007451 */
+ 0, /* B1 = 0.000000 */
+ 244, /* B0 = 0.007451 */
+ 14989, /* A1 = -0.914886 */
+ -32627, /* A2 = 0.995697 */
+ 18961, /* B2 = 0.578644 */
+ -8498, /* B1 = -0.518707 */
+ 18961, /* B0 = 0.578644 */
+ 15608, /* A1 = -0.952667 */
+ -32628, /* A2 = 0.995758 */
+ 11145, /* B2 = 0.340134 */
+ -5430, /* B1 = -0.331467 */
+ 11145, /* B0 = 0.340134 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1400 */
+ 14780, /* A1 = -0.902130 */
+ -32393, /* A2 = 0.988586 */
+ -396, /* B2 = -0.012086 */
+ 0, /* B1 = 0.000000 */
+ 396, /* B0 = 0.012086 */
+ 14510, /* A1 = -0.885651 */
+ -32630, /* A2 = 0.995819 */
+ 6326, /* B2 = 0.193069 */
+ -2747, /* B1 = -0.167671 */
+ 6326, /* B0 = 0.193069 */
+ 15154, /* A1 = -0.924957 */
+ -32632, /* A2 = 0.995850 */
+ 23235, /* B2 = 0.709076 */
+ -10983, /* B1 = -0.670380 */
+ 23235, /* B0 = 0.709076 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1477 */
+ 13005, /* A1 = -0.793793 */
+ -32368, /* A2 = 0.987823 */
+ -500, /* B2 = -0.015265 */
+ 0, /* B1 = 0.000000 */
+ 500, /* B0 = 0.015265 */
+ 12708, /* A1 = -0.775665 */
+ -32615, /* A2 = 0.995331 */
+ 11420, /* B2 = 0.348526 */
+ -4306, /* B1 = -0.262833 */
+ 11420, /* B0 = 0.348526 */
+ 13397, /* A1 = -0.817688 */
+ -32615, /* A2 = 0.995361 */
+ 9454, /* B2 = 0.288528 */
+ -3981, /* B1 = -0.243027 */
+ 9454, /* B0 = 0.288528 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1600 */
+ 10046, /* A1 = -0.613190 */
+ -32331, /* A2 = 0.986694 */
+ -455, /* B2 = -0.013915 */
+ 0, /* B1 = 0.000000 */
+ 455, /* B0 = 0.013915 */
+ 9694, /* A1 = -0.591705 */
+ -32601, /* A2 = 0.994934 */
+ 6023, /* B2 = 0.183815 */
+ -1708, /* B1 = -0.104279 */
+ 6023, /* B0 = 0.183815 */
+ 10478, /* A1 = -0.639587 */
+ -32603, /* A2 = 0.994965 */
+ 22031, /* B2 = 0.672333 */
+ -7342, /* B1 = -0.448151 */
+ 22031, /* B0 = 0.672333 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1633_1638[] */
+ 9181, /* A1 = 0.560394 */
+ -32256, /* A2 = -0.984375 */
+ -556, /* B2 = -0.016975 */
+ 0, /* B1 = 0 */
+ 556, /* B0 = 0.016975 */
+ 8757, /* A1 = 0.534515 */
+ -32574, /* A2 = -0.99408 */
+ 8443, /* B2 = 0.25769 */
+ -2135, /* B1 = -0.130341 */
+ 8443, /* B0 = 0.25769 */
+ 9691, /* A1 = 0.591522 */
+ -32574, /* A2 = -0.99411 */
+ 15446, /* B2 = 0.471375 */
+ -4809, /* B1 = -0.293579 */
+ 15446, /* B0 = 0.471375 */
+ 7, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1800 */
+ 5076, /* A1 = -0.309875 */
+ -32304, /* A2 = 0.985840 */
+ -508, /* B2 = -0.015503 */
+ 0, /* B1 = 0.000000 */
+ 508, /* B0 = 0.015503 */
+ 4646, /* A1 = -0.283600 */
+ -32605, /* A2 = 0.995026 */
+ 6742, /* B2 = 0.205780 */
+ -878, /* B1 = -0.053635 */
+ 6742, /* B0 = 0.205780 */
+ 5552, /* A1 = -0.338928 */
+ -32605, /* A2 = 0.995056 */
+ 23667, /* B2 = 0.722260 */
+ -4297, /* B1 = -0.262329 */
+ 23667, /* B0 = 0.722260 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+ { /* f1860 */
+ 3569, /* A1 = -0.217865 */
+ -32292, /* A2 = 0.985504 */
+ -239, /* B2 = -0.007322 */
+ 0, /* B1 = 0.000000 */
+ 239, /* B0 = 0.007322 */
+ 3117, /* A1 = -0.190277 */
+ -32603, /* A2 = 0.994965 */
+ 18658, /* B2 = 0.569427 */
+ -1557, /* B1 = -0.095032 */
+ 18658, /* B0 = 0.569427 */
+ 4054, /* A1 = -0.247437 */
+ -32603, /* A2 = 0.994965 */
+ 18886, /* B2 = 0.576385 */
+ -2566, /* B1 = -0.156647 */
+ 18886, /* B0 = 0.576385 */
+ 5, /* Internal filter scaling */
+ 159, /* Minimum in-band energy threshold */
+ 21, /* 21/32 in-band to broad-band ratio */
+ 0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
+ },
+};
+static int ixj_init_filter(IXJ *j, IXJ_FILTER * jf)
+{
+ unsigned short cmd;
+ int cnt, max;
+
+ if (jf->filter > 3) {
+ return -1;
+ }
+ if (ixj_WriteDSPCommand(0x5154 + jf->filter, j)) /* Select Filter */
+
+ return -1;
+ if (!jf->enable) {
+ if (ixj_WriteDSPCommand(0x5152, j)) /* Disable Filter */
+
+ return -1;
+ else
+ return 0;
+ } else {
+ if (ixj_WriteDSPCommand(0x5153, j)) /* Enable Filter */
+
+ return -1;
+ /* Select the filter (f0 - f3) to use. */
+ if (ixj_WriteDSPCommand(0x5154 + jf->filter, j))
+ return -1;
+ }
+ if (jf->freq < 12 && jf->freq > 3) {
+ /* Select the frequency for the selected filter. */
+ if (ixj_WriteDSPCommand(0x5170 + jf->freq, j))
+ return -1;
+ } else if (jf->freq > 11) {
+ /* We need to load a programmable filter set for undefined */
+ /* frequencies. So we will point the filter to a programmable set. */
+ /* Since there are only 4 filters and 4 programmable sets, we will */
+ /* just point the filter to the same number set and program it for the */
+ /* frequency we want. */
+ if (ixj_WriteDSPCommand(0x5170 + jf->filter, j))
+ return -1;
+ if (j->ver.low != 0x12) {
+ cmd = 0x515B;
+ max = 19;
+ } else {
+ cmd = 0x515E;
+ max = 15;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+ for (cnt = 0; cnt < max; cnt++) {
+ if (ixj_WriteDSPCommand(tone_table[jf->freq - 12][cnt], j))
+ return -1;
+ }
+ }
+ j->filter_en[jf->filter] = jf->enable;
+ return 0;
+}
+
+static int ixj_init_filter_raw(IXJ *j, IXJ_FILTER_RAW * jfr)
+{
+ unsigned short cmd;
+ int cnt, max;
+ if (jfr->filter > 3) {
+ return -1;
+ }
+ if (ixj_WriteDSPCommand(0x5154 + jfr->filter, j)) /* Select Filter */
+ return -1;
+
+ if (!jfr->enable) {
+ if (ixj_WriteDSPCommand(0x5152, j)) /* Disable Filter */
+ return -1;
+ else
+ return 0;
+ } else {
+ if (ixj_WriteDSPCommand(0x5153, j)) /* Enable Filter */
+ return -1;
+ /* Select the filter (f0 - f3) to use. */
+ if (ixj_WriteDSPCommand(0x5154 + jfr->filter, j))
+ return -1;
+ }
+ /* We need to load a programmable filter set for undefined */
+ /* frequencies. So we will point the filter to a programmable set. */
+ /* Since there are only 4 filters and 4 programmable sets, we will */
+ /* just point the filter to the same number set and program it for the */
+ /* frequency we want. */
+ if (ixj_WriteDSPCommand(0x5170 + jfr->filter, j))
+ return -1;
+ if (j->ver.low != 0x12) {
+ cmd = 0x515B;
+ max = 19;
+ } else {
+ cmd = 0x515E;
+ max = 15;
+ }
+ if (ixj_WriteDSPCommand(cmd, j))
+ return -1;
+ for (cnt = 0; cnt < max; cnt++) {
+ if (ixj_WriteDSPCommand(jfr->coeff[cnt], j))
+ return -1;
+ }
+ j->filter_en[jfr->filter] = jfr->enable;
+ return 0;
+}
+
+static int ixj_init_tone(IXJ *j, IXJ_TONE * ti)
+{
+ int freq0, freq1;
+ unsigned short data;
+ if (ti->freq0) {
+ freq0 = ti->freq0;
+ } else {
+ freq0 = 0x7FFF;
+ }
+
+ if (ti->freq1) {
+ freq1 = ti->freq1;
+ } else {
+ freq1 = 0x7FFF;
+ }
+
+ if(ti->tone_index > 12 && ti->tone_index < 28)
+ {
+ if (ixj_WriteDSPCommand(0x6800 + ti->tone_index, j))
+ return -1;
+ if (ixj_WriteDSPCommand(0x6000 + (ti->gain1 << 4) + ti->gain0, j))
+ return -1;
+ data = freq0;
+ if (ixj_WriteDSPCommand(data, j))
+ return -1;
+ data = freq1;
+ if (ixj_WriteDSPCommand(data, j))
+ return -1;
+ }
+ return freq0;
+}
+
diff --git a/drivers/staging/telephony/ixj.h b/drivers/staging/telephony/ixj.h
new file mode 100644
index 000000000000..2c841134f61c
--- /dev/null
+++ b/drivers/staging/telephony/ixj.h
@@ -0,0 +1,1322 @@
+/******************************************************************************
+ * ixj.h
+ *
+ *
+ * Device Driver for Quicknet Technologies, Inc.'s Telephony cards
+ * including the Internet PhoneJACK, Internet PhoneJACK Lite,
+ * Internet PhoneJACK PCI, Internet LineJACK, Internet PhoneCARD and
+ * SmartCABLE
+ *
+ * (c) Copyright 1999-2001 Quicknet Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Ed Okerson, <eokerson@quicknet.net>
+ *
+ * Contributors: Greg Herlein, <gherlein@quicknet.net>
+ * David W. Erhart, <derhart@quicknet.net>
+ * John Sellers, <jsellers@quicknet.net>
+ * Mike Preston, <mpreston@quicknet.net>
+ *
+ * More information about the hardware related to this driver can be found
+ * at our website: http://www.quicknet.net
+ *
+ * Fixes:
+ *
+ * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF QUICKNET
+ * TECHNOLOGIES, INC.HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * QUICKNET TECHNOLOGIES, INC. SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION
+ * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ *
+ *****************************************************************************/
+#define IXJ_VERSION 3031
+
+#include <linux/types.h>
+
+#include <linux/ixjuser.h>
+#include <linux/phonedev.h>
+
+typedef __u16 WORD;
+typedef __u32 DWORD;
+typedef __u8 BYTE;
+
+#ifndef IXJMAX
+#define IXJMAX 16
+#endif
+
+/******************************************************************************
+*
+* This structure when unioned with the structures below makes simple byte
+* access to the registers easier.
+*
+******************************************************************************/
+typedef struct {
+ unsigned char low;
+ unsigned char high;
+} BYTES;
+
+typedef union {
+ BYTES bytes;
+ short word;
+} IXJ_WORD;
+
+typedef struct{
+ unsigned int b0:1;
+ unsigned int b1:1;
+ unsigned int b2:1;
+ unsigned int b3:1;
+ unsigned int b4:1;
+ unsigned int b5:1;
+ unsigned int b6:1;
+ unsigned int b7:1;
+} IXJ_CBITS;
+
+typedef union{
+ IXJ_CBITS cbits;
+ char cbyte;
+} IXJ_CBYTE;
+
+/******************************************************************************
+*
+* This structure represents the Hardware Control Register of the CT8020/8021
+* The CT8020 is used in the Internet PhoneJACK, and the 8021 in the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int rxrdy:1;
+ unsigned int txrdy:1;
+ unsigned int status:1;
+ unsigned int auxstatus:1;
+ unsigned int rxdma:1;
+ unsigned int txdma:1;
+ unsigned int rxburst:1;
+ unsigned int txburst:1;
+ unsigned int dmadir:1;
+ unsigned int cont:1;
+ unsigned int irqn:1;
+ unsigned int t:5;
+} HCRBIT;
+
+typedef union {
+ HCRBIT bits;
+ BYTES bytes;
+} HCR;
+
+/******************************************************************************
+*
+* This structure represents the Hardware Status Register of the CT8020/8021
+* The CT8020 is used in the Internet PhoneJACK, and the 8021 in the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int controlrdy:1;
+ unsigned int auxctlrdy:1;
+ unsigned int statusrdy:1;
+ unsigned int auxstatusrdy:1;
+ unsigned int rxrdy:1;
+ unsigned int txrdy:1;
+ unsigned int restart:1;
+ unsigned int irqn:1;
+ unsigned int rxdma:1;
+ unsigned int txdma:1;
+ unsigned int cohostshutdown:1;
+ unsigned int t:5;
+} HSRBIT;
+
+typedef union {
+ HSRBIT bits;
+ BYTES bytes;
+} HSR;
+
+/******************************************************************************
+*
+* This structure represents the General Purpose IO Register of the CT8020/8021
+* The CT8020 is used in the Internet PhoneJACK, and the 8021 in the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int x:1;
+ unsigned int gpio1:1;
+ unsigned int gpio2:1;
+ unsigned int gpio3:1;
+ unsigned int gpio4:1;
+ unsigned int gpio5:1;
+ unsigned int gpio6:1;
+ unsigned int gpio7:1;
+ unsigned int xread:1;
+ unsigned int gpio1read:1;
+ unsigned int gpio2read:1;
+ unsigned int gpio3read:1;
+ unsigned int gpio4read:1;
+ unsigned int gpio5read:1;
+ unsigned int gpio6read:1;
+ unsigned int gpio7read:1;
+} GPIOBIT;
+
+typedef union {
+ GPIOBIT bits;
+ BYTES bytes;
+ unsigned short word;
+} GPIO;
+
+/******************************************************************************
+*
+* This structure represents the Line Monitor status response
+*
+******************************************************************************/
+typedef struct {
+ unsigned int digit:4;
+ unsigned int cpf_valid:1;
+ unsigned int dtmf_valid:1;
+ unsigned int peak:1;
+ unsigned int z:1;
+ unsigned int f0:1;
+ unsigned int f1:1;
+ unsigned int f2:1;
+ unsigned int f3:1;
+ unsigned int frame:4;
+} LMON;
+
+typedef union {
+ LMON bits;
+ BYTES bytes;
+} DTMF;
+
+typedef struct {
+ unsigned int z:7;
+ unsigned int dtmf_en:1;
+ unsigned int y:4;
+ unsigned int F3:1;
+ unsigned int F2:1;
+ unsigned int F1:1;
+ unsigned int F0:1;
+} CP;
+
+typedef union {
+ CP bits;
+ BYTES bytes;
+} CPTF;
+
+/******************************************************************************
+*
+* This structure represents the Status Control Register on the Internet
+* LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int c0:1;
+ unsigned int c1:1;
+ unsigned int stereo:1;
+ unsigned int daafsyncen:1;
+ unsigned int led1:1;
+ unsigned int led2:1;
+ unsigned int led3:1;
+ unsigned int led4:1;
+} PSCRWI; /* Internet LineJACK and Internet PhoneJACK Lite */
+
+typedef struct {
+ unsigned int eidp:1;
+ unsigned int eisd:1;
+ unsigned int x:6;
+} PSCRWP; /* Internet PhoneJACK PCI */
+
+typedef union {
+ PSCRWI bits;
+ PSCRWP pcib;
+ char byte;
+} PLD_SCRW;
+
+typedef struct {
+ unsigned int c0:1;
+ unsigned int c1:1;
+ unsigned int x:1;
+ unsigned int d0ee:1;
+ unsigned int mixerbusy:1;
+ unsigned int sci:1;
+ unsigned int dspflag:1;
+ unsigned int daaflag:1;
+} PSCRRI;
+
+typedef struct {
+ unsigned int eidp:1;
+ unsigned int eisd:1;
+ unsigned int x:4;
+ unsigned int dspflag:1;
+ unsigned int det:1;
+} PSCRRP;
+
+typedef union {
+ PSCRRI bits;
+ PSCRRP pcib;
+ char byte;
+} PLD_SCRR;
+
+/******************************************************************************
+*
+* These structures represents the SLIC Control Register on the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int c1:1;
+ unsigned int c2:1;
+ unsigned int c3:1;
+ unsigned int b2en:1;
+ unsigned int spken:1;
+ unsigned int rly1:1;
+ unsigned int rly2:1;
+ unsigned int rly3:1;
+} PSLICWRITE;
+
+typedef struct {
+ unsigned int state:3;
+ unsigned int b2en:1;
+ unsigned int spken:1;
+ unsigned int c3:1;
+ unsigned int potspstn:1;
+ unsigned int det:1;
+} PSLICREAD;
+
+typedef struct {
+ unsigned int c1:1;
+ unsigned int c2:1;
+ unsigned int c3:1;
+ unsigned int b2en:1;
+ unsigned int e1:1;
+ unsigned int mic:1;
+ unsigned int spk:1;
+ unsigned int x:1;
+} PSLICPCI;
+
+typedef union {
+ PSLICPCI pcib;
+ PSLICWRITE bits;
+ PSLICREAD slic;
+ char byte;
+} PLD_SLICW;
+
+typedef union {
+ PSLICPCI pcib;
+ PSLICREAD bits;
+ char byte;
+} PLD_SLICR;
+
+/******************************************************************************
+*
+* These structures represents the Clock Control Register on the
+* Internet LineJACK
+*
+******************************************************************************/
+typedef struct {
+ unsigned int clk0:1;
+ unsigned int clk1:1;
+ unsigned int clk2:1;
+ unsigned int x0:1;
+ unsigned int slic_e1:1;
+ unsigned int x1:1;
+ unsigned int x2:1;
+ unsigned int x3:1;
+} PCLOCK;
+
+typedef union {
+ PCLOCK bits;
+ char byte;
+} PLD_CLOCK;
+
+/******************************************************************************
+*
+* These structures deal with the mixer on the Internet LineJACK
+*
+******************************************************************************/
+
+typedef struct {
+ unsigned short vol[10];
+ unsigned int recsrc;
+ unsigned int modcnt;
+ unsigned short micpreamp;
+} MIX;
+
+/******************************************************************************
+*
+* These structures deal with the control logic on the Internet PhoneCARD
+*
+******************************************************************************/
+typedef struct {
+ unsigned int x0:4; /* unused bits */
+
+ unsigned int ed:1; /* Event Detect */
+
+ unsigned int drf:1; /* SmartCABLE Removal Flag 1=no cable */
+
+ unsigned int dspf:1; /* DSP Flag 1=DSP Ready */
+
+ unsigned int crr:1; /* Control Register Ready */
+
+} COMMAND_REG1;
+
+typedef union {
+ COMMAND_REG1 bits;
+ unsigned char byte;
+} PCMCIA_CR1;
+
+typedef struct {
+ unsigned int x0:4; /* unused bits */
+
+ unsigned int rstc:1; /* SmartCABLE Reset */
+
+ unsigned int pwr:1; /* SmartCABLE Power */
+
+ unsigned int x1:2; /* unused bits */
+
+} COMMAND_REG2;
+
+typedef union {
+ COMMAND_REG2 bits;
+ unsigned char byte;
+} PCMCIA_CR2;
+
+typedef struct {
+ unsigned int addr:5; /* R/W SmartCABLE Register Address */
+
+ unsigned int rw:1; /* Read / Write flag */
+
+ unsigned int dev:2; /* 2 bit SmartCABLE Device Address */
+
+} CONTROL_REG;
+
+typedef union {
+ CONTROL_REG bits;
+ unsigned char byte;
+} PCMCIA_SCCR;
+
+typedef struct {
+ unsigned int hsw:1;
+ unsigned int det:1;
+ unsigned int led2:1;
+ unsigned int led1:1;
+ unsigned int ring1:1;
+ unsigned int ring0:1;
+ unsigned int x:1;
+ unsigned int powerdown:1;
+} PCMCIA_SLIC_REG;
+
+typedef union {
+ PCMCIA_SLIC_REG bits;
+ unsigned char byte;
+} PCMCIA_SLIC;
+
+typedef struct {
+ unsigned int cpd:1; /* Chip Power Down */
+
+ unsigned int mpd:1; /* MIC Bias Power Down */
+
+ unsigned int hpd:1; /* Handset Drive Power Down */
+
+ unsigned int lpd:1; /* Line Drive Power Down */
+
+ unsigned int spd:1; /* Speaker Drive Power Down */
+
+ unsigned int x:2; /* unused bits */
+
+ unsigned int sr:1; /* Software Reset */
+
+} Si3CONTROL1;
+
+typedef union {
+ Si3CONTROL1 bits;
+ unsigned char byte;
+} Si3C1;
+
+typedef struct {
+ unsigned int al:1; /* Analog Loopback DAC analog -> ADC analog */
+
+ unsigned int dl2:1; /* Digital Loopback DAC -> ADC one bit */
+
+ unsigned int dl1:1; /* Digital Loopback ADC -> DAC one bit */
+
+ unsigned int pll:1; /* 1 = div 10, 0 = div 5 */
+
+ unsigned int hpd:1; /* HPF disable */
+
+ unsigned int x:3; /* unused bits */
+
+} Si3CONTROL2;
+
+typedef union {
+ Si3CONTROL2 bits;
+ unsigned char byte;
+} Si3C2;
+
+typedef struct {
+ unsigned int iir:1; /* 1 enables IIR, 0 enables FIR */
+
+ unsigned int him:1; /* Handset Input Mute */
+
+ unsigned int mcm:1; /* MIC In Mute */
+
+ unsigned int mcg:2; /* MIC In Gain */
+
+ unsigned int lim:1; /* Line In Mute */
+
+ unsigned int lig:2; /* Line In Gain */
+
+} Si3RXGAIN;
+
+typedef union {
+ Si3RXGAIN bits;
+ unsigned char byte;
+} Si3RXG;
+
+typedef struct {
+ unsigned int hom:1; /* Handset Out Mute */
+
+ unsigned int lom:1; /* Line Out Mute */
+
+ unsigned int rxg:5; /* RX PGA Gain */
+
+ unsigned int x:1; /* unused bit */
+
+} Si3ADCVOLUME;
+
+typedef union {
+ Si3ADCVOLUME bits;
+ unsigned char byte;
+} Si3ADC;
+
+typedef struct {
+ unsigned int srm:1; /* Speaker Right Mute */
+
+ unsigned int slm:1; /* Speaker Left Mute */
+
+ unsigned int txg:5; /* TX PGA Gain */
+
+ unsigned int x:1; /* unused bit */
+
+} Si3DACVOLUME;
+
+typedef union {
+ Si3DACVOLUME bits;
+ unsigned char byte;
+} Si3DAC;
+
+typedef struct {
+ unsigned int x:5; /* unused bit */
+
+ unsigned int losc:1; /* Line Out Short Circuit */
+
+ unsigned int srsc:1; /* Speaker Right Short Circuit */
+
+ unsigned int slsc:1; /* Speaker Left Short Circuit */
+
+} Si3STATUSREPORT;
+
+typedef union {
+ Si3STATUSREPORT bits;
+ unsigned char byte;
+} Si3STAT;
+
+typedef struct {
+ unsigned int sot:2; /* Speaker Out Attenuation */
+
+ unsigned int lot:2; /* Line Out Attenuation */
+
+ unsigned int x:4; /* unused bits */
+
+} Si3ANALOGATTN;
+
+typedef union {
+ Si3ANALOGATTN bits;
+ unsigned char byte;
+} Si3AATT;
+
+/******************************************************************************
+*
+* These structures deal with the DAA on the Internet LineJACK
+*
+******************************************************************************/
+
+typedef struct _DAA_REGS {
+ /*----------------------------------------------- */
+ /* SOP Registers */
+ /* */
+ BYTE bySOP;
+
+ union _SOP_REGS {
+ struct _SOP {
+ union /* SOP - CR0 Register */
+ {
+ BYTE reg;
+ struct _CR0_BITREGS {
+ BYTE CLK_EXT:1; /* cr0[0:0] */
+
+ BYTE RIP:1; /* cr0[1:1] */
+
+ BYTE AR:1; /* cr0[2:2] */
+
+ BYTE AX:1; /* cr0[3:3] */
+
+ BYTE FRR:1; /* cr0[4:4] */
+
+ BYTE FRX:1; /* cr0[5:5] */
+
+ BYTE IM:1; /* cr0[6:6] */
+
+ BYTE TH:1; /* cr0[7:7] */
+
+ } bitreg;
+ } cr0;
+
+ union /* SOP - CR1 Register */
+ {
+ BYTE reg;
+ struct _CR1_REGS {
+ BYTE RM:1; /* cr1[0:0] */
+
+ BYTE RMR:1; /* cr1[1:1] */
+
+ BYTE No_auto:1; /* cr1[2:2] */
+
+ BYTE Pulse:1; /* cr1[3:3] */
+
+ BYTE P_Tone1:1; /* cr1[4:4] */
+
+ BYTE P_Tone2:1; /* cr1[5:5] */
+
+ BYTE E_Tone1:1; /* cr1[6:6] */
+
+ BYTE E_Tone2:1; /* cr1[7:7] */
+
+ } bitreg;
+ } cr1;
+
+ union /* SOP - CR2 Register */
+ {
+ BYTE reg;
+ struct _CR2_REGS {
+ BYTE Call_II:1; /* CR2[0:0] */
+
+ BYTE Call_I:1; /* CR2[1:1] */
+
+ BYTE Call_en:1; /* CR2[2:2] */
+
+ BYTE Call_pon:1; /* CR2[3:3] */
+
+ BYTE IDR:1; /* CR2[4:4] */
+
+ BYTE COT_R:3; /* CR2[5:7] */
+
+ } bitreg;
+ } cr2;
+
+ union /* SOP - CR3 Register */
+ {
+ BYTE reg;
+ struct _CR3_REGS {
+ BYTE DHP_X:1; /* CR3[0:0] */
+
+ BYTE DHP_R:1; /* CR3[1:1] */
+
+ BYTE Cal_pctl:1; /* CR3[2:2] */
+
+ BYTE SEL:1; /* CR3[3:3] */
+
+ BYTE TestLoops:4; /* CR3[4:7] */
+
+ } bitreg;
+ } cr3;
+
+ union /* SOP - CR4 Register */
+ {
+ BYTE reg;
+ struct _CR4_REGS {
+ BYTE Fsc_en:1; /* CR4[0:0] */
+
+ BYTE Int_en:1; /* CR4[1:1] */
+
+ BYTE AGX:2; /* CR4[2:3] */
+
+ BYTE AGR_R:2; /* CR4[4:5] */
+
+ BYTE AGR_Z:2; /* CR4[6:7] */
+
+ } bitreg;
+ } cr4;
+
+ union /* SOP - CR5 Register */
+ {
+ BYTE reg;
+ struct _CR5_REGS {
+ BYTE V_0:1; /* CR5[0:0] */
+
+ BYTE V_1:1; /* CR5[1:1] */
+
+ BYTE V_2:1; /* CR5[2:2] */
+
+ BYTE V_3:1; /* CR5[3:3] */
+
+ BYTE V_4:1; /* CR5[4:4] */
+
+ BYTE V_5:1; /* CR5[5:5] */
+
+ BYTE V_6:1; /* CR5[6:6] */
+
+ BYTE V_7:1; /* CR5[7:7] */
+
+ } bitreg;
+ } cr5;
+
+ union /* SOP - CR6 Register */
+ {
+ BYTE reg;
+ struct _CR6_REGS {
+ BYTE reserved:8; /* CR6[0:7] */
+
+ } bitreg;
+ } cr6;
+
+ union /* SOP - CR7 Register */
+ {
+ BYTE reg;
+ struct _CR7_REGS {
+ BYTE reserved:8; /* CR7[0:7] */
+
+ } bitreg;
+ } cr7;
+ } SOP;
+
+ BYTE ByteRegs[sizeof(struct _SOP)];
+
+ } SOP_REGS;
+
+ /* DAA_REGS.SOP_REGS.SOP.CR5.reg */
+ /* DAA_REGS.SOP_REGS.SOP.CR5.bitreg */
+ /* DAA_REGS.SOP_REGS.SOP.CR5.bitreg.V_2 */
+ /* DAA_REGS.SOP_REGS.ByteRegs[5] */
+
+ /*----------------------------------------------- */
+ /* XOP Registers */
+ /* */
+ BYTE byXOP;
+
+ union _XOP_REGS {
+ struct _XOP {
+ union XOPXR0/* XOP - XR0 Register - Read values */
+ {
+ BYTE reg;
+ struct _XR0_BITREGS {
+ BYTE SI_0:1; /* XR0[0:0] - Read */
+
+ BYTE SI_1:1; /* XR0[1:1] - Read */
+
+ BYTE VDD_OK:1; /* XR0[2:2] - Read */
+
+ BYTE Caller_ID:1; /* XR0[3:3] - Read */
+
+ BYTE RING:1; /* XR0[4:4] - Read */
+
+ BYTE Cadence:1; /* XR0[5:5] - Read */
+
+ BYTE Wake_up:1; /* XR0[6:6] - Read */
+
+ BYTE RMR:1; /* XR0[7:7] - Read */
+
+ } bitreg;
+ } xr0;
+
+ union /* XOP - XR1 Register */
+ {
+ BYTE reg;
+ struct _XR1_BITREGS {
+ BYTE M_SI_0:1; /* XR1[0:0] */
+
+ BYTE M_SI_1:1; /* XR1[1:1] */
+
+ BYTE M_VDD_OK:1; /* XR1[2:2] */
+
+ BYTE M_Caller_ID:1; /* XR1[3:3] */
+
+ BYTE M_RING:1; /* XR1[4:4] */
+
+ BYTE M_Cadence:1; /* XR1[5:5] */
+
+ BYTE M_Wake_up:1; /* XR1[6:6] */
+
+ BYTE unused:1; /* XR1[7:7] */
+
+ } bitreg;
+ } xr1;
+
+ union /* XOP - XR2 Register */
+ {
+ BYTE reg;
+ struct _XR2_BITREGS {
+ BYTE CTO0:1; /* XR2[0:0] */
+
+ BYTE CTO1:1; /* XR2[1:1] */
+
+ BYTE CTO2:1; /* XR2[2:2] */
+
+ BYTE CTO3:1; /* XR2[3:3] */
+
+ BYTE CTO4:1; /* XR2[4:4] */
+
+ BYTE CTO5:1; /* XR2[5:5] */
+
+ BYTE CTO6:1; /* XR2[6:6] */
+
+ BYTE CTO7:1; /* XR2[7:7] */
+
+ } bitreg;
+ } xr2;
+
+ union /* XOP - XR3 Register */
+ {
+ BYTE reg;
+ struct _XR3_BITREGS {
+ BYTE DCR0:1; /* XR3[0:0] */
+
+ BYTE DCR1:1; /* XR3[1:1] */
+
+ BYTE DCI:1; /* XR3[2:2] */
+
+ BYTE DCU0:1; /* XR3[3:3] */
+
+ BYTE DCU1:1; /* XR3[4:4] */
+
+ BYTE B_off:1; /* XR3[5:5] */
+
+ BYTE AGB0:1; /* XR3[6:6] */
+
+ BYTE AGB1:1; /* XR3[7:7] */
+
+ } bitreg;
+ } xr3;
+
+ union /* XOP - XR4 Register */
+ {
+ BYTE reg;
+ struct _XR4_BITREGS {
+ BYTE C_0:1; /* XR4[0:0] */
+
+ BYTE C_1:1; /* XR4[1:1] */
+
+ BYTE C_2:1; /* XR4[2:2] */
+
+ BYTE C_3:1; /* XR4[3:3] */
+
+ BYTE C_4:1; /* XR4[4:4] */
+
+ BYTE C_5:1; /* XR4[5:5] */
+
+ BYTE C_6:1; /* XR4[6:6] */
+
+ BYTE C_7:1; /* XR4[7:7] */
+
+ } bitreg;
+ } xr4;
+
+ union /* XOP - XR5 Register */
+ {
+ BYTE reg;
+ struct _XR5_BITREGS {
+ BYTE T_0:1; /* XR5[0:0] */
+
+ BYTE T_1:1; /* XR5[1:1] */
+
+ BYTE T_2:1; /* XR5[2:2] */
+
+ BYTE T_3:1; /* XR5[3:3] */
+
+ BYTE T_4:1; /* XR5[4:4] */
+
+ BYTE T_5:1; /* XR5[5:5] */
+
+ BYTE T_6:1; /* XR5[6:6] */
+
+ BYTE T_7:1; /* XR5[7:7] */
+
+ } bitreg;
+ } xr5;
+
+ union /* XOP - XR6 Register - Read Values */
+ {
+ BYTE reg;
+ struct _XR6_BITREGS {
+ BYTE CPS0:1; /* XR6[0:0] */
+
+ BYTE CPS1:1; /* XR6[1:1] */
+
+ BYTE unused1:2; /* XR6[2:3] */
+
+ BYTE CLK_OFF:1; /* XR6[4:4] */
+
+ BYTE unused2:3; /* XR6[5:7] */
+
+ } bitreg;
+ } xr6;
+
+ union /* XOP - XR7 Register */
+ {
+ BYTE reg;
+ struct _XR7_BITREGS {
+ BYTE unused1:1; /* XR7[0:0] */
+
+ BYTE Vdd0:1; /* XR7[1:1] */
+
+ BYTE Vdd1:1; /* XR7[2:2] */
+
+ BYTE unused2:5; /* XR7[3:7] */
+
+ } bitreg;
+ } xr7;
+ } XOP;
+
+ BYTE ByteRegs[sizeof(struct _XOP)];
+
+ } XOP_REGS;
+
+ /* DAA_REGS.XOP_REGS.XOP.XR7.reg */
+ /* DAA_REGS.XOP_REGS.XOP.XR7.bitreg */
+ /* DAA_REGS.XOP_REGS.XOP.XR7.bitreg.Vdd0 */
+ /* DAA_REGS.XOP_REGS.ByteRegs[7] */
+
+ /*----------------------------------------------- */
+ /* COP Registers */
+ /* */
+ BYTE byCOP;
+
+ union _COP_REGS {
+ struct _COP {
+ BYTE THFilterCoeff_1[8]; /* COP - TH Filter Coefficients, CODE=0, Part 1 */
+
+ BYTE THFilterCoeff_2[8]; /* COP - TH Filter Coefficients, CODE=1, Part 2 */
+
+ BYTE THFilterCoeff_3[8]; /* COP - TH Filter Coefficients, CODE=2, Part 3 */
+
+ BYTE RingerImpendance_1[8]; /* COP - Ringer Impendance Coefficients, CODE=3, Part 1 */
+
+ BYTE IMFilterCoeff_1[8]; /* COP - IM Filter Coefficients, CODE=4, Part 1 */
+
+ BYTE IMFilterCoeff_2[8]; /* COP - IM Filter Coefficients, CODE=5, Part 2 */
+
+ BYTE RingerImpendance_2[8]; /* COP - Ringer Impendance Coefficients, CODE=6, Part 2 */
+
+ BYTE FRRFilterCoeff[8]; /* COP - FRR Filter Coefficients, CODE=7 */
+
+ BYTE FRXFilterCoeff[8]; /* COP - FRX Filter Coefficients, CODE=8 */
+
+ BYTE ARFilterCoeff[4]; /* COP - AR Filter Coefficients, CODE=9 */
+
+ BYTE AXFilterCoeff[4]; /* COP - AX Filter Coefficients, CODE=10 */
+
+ BYTE Tone1Coeff[4]; /* COP - Tone1 Coefficients, CODE=11 */
+
+ BYTE Tone2Coeff[4]; /* COP - Tone2 Coefficients, CODE=12 */
+
+ BYTE LevelmeteringRinging[4]; /* COP - Levelmetering Ringing, CODE=13 */
+
+ BYTE CallerID1stTone[8]; /* COP - Caller ID 1st Tone, CODE=14 */
+
+ BYTE CallerID2ndTone[8]; /* COP - Caller ID 2nd Tone, CODE=15 */
+
+ } COP;
+
+ BYTE ByteRegs[sizeof(struct _COP)];
+
+ } COP_REGS;
+
+ /* DAA_REGS.COP_REGS.COP.XR7.Tone1Coeff[3] */
+ /* DAA_REGS.COP_REGS.COP.XR7.bitreg */
+ /* DAA_REGS.COP_REGS.COP.XR7.bitreg.Vdd0 */
+ /* DAA_REGS.COP_REGS.ByteRegs[57] */
+
+ /*----------------------------------------------- */
+ /* CAO Registers */
+ /* */
+ BYTE byCAO;
+
+ union _CAO_REGS {
+ struct _CAO {
+ BYTE CallerID[512]; /* CAO - Caller ID Bytes */
+
+ } CAO;
+
+ BYTE ByteRegs[sizeof(struct _CAO)];
+ } CAO_REGS;
+
+ union /* XOP - XR0 Register - Write values */
+ {
+ BYTE reg;
+ struct _XR0_BITREGSW {
+ BYTE SO_0:1; /* XR1[0:0] - Write */
+
+ BYTE SO_1:1; /* XR1[1:1] - Write */
+
+ BYTE SO_2:1; /* XR1[2:2] - Write */
+
+ BYTE unused:5; /* XR1[3:7] - Write */
+
+ } bitreg;
+ } XOP_xr0_W;
+
+ union /* XOP - XR6 Register - Write values */
+ {
+ BYTE reg;
+ struct _XR6_BITREGSW {
+ BYTE unused1:4; /* XR6[0:3] */
+
+ BYTE CLK_OFF:1; /* XR6[4:4] */
+
+ BYTE unused2:3; /* XR6[5:7] */
+
+ } bitreg;
+ } XOP_xr6_W;
+
+} DAA_REGS;
+
+#define ALISDAA_ID_BYTE 0x81
+#define ALISDAA_CALLERID_SIZE 512
+
+/*------------------------------ */
+/* */
+/* Misc definitions */
+/* */
+
+/* Power Up Operation */
+#define SOP_PU_SLEEP 0
+#define SOP_PU_RINGING 1
+#define SOP_PU_CONVERSATION 2
+#define SOP_PU_PULSEDIALING 3
+#define SOP_PU_RESET 4
+
+#define ALISDAA_CALLERID_SIZE 512
+
+#define PLAYBACK_MODE_COMPRESSED 0 /* Selects: Compressed modes, TrueSpeech 8.5-4.1, G.723.1, G.722, G.728, G.729 */
+#define PLAYBACK_MODE_TRUESPEECH_V40 0 /* Selects: TrueSpeech 8.5, 6.3, 5.3, 4.8 or 4.1 Kbps */
+#define PLAYBACK_MODE_TRUESPEECH 8 /* Selects: TrueSpeech 8.5, 6.3, 5.3, 4.8 or 4.1 Kbps Version 5.1 */
+#define PLAYBACK_MODE_ULAW 2 /* Selects: 64 Kbit/sec MuA-law PCM */
+#define PLAYBACK_MODE_ALAW 10 /* Selects: 64 Kbit/sec A-law PCM */
+#define PLAYBACK_MODE_16LINEAR 6 /* Selects: 128 Kbit/sec 16-bit linear */
+#define PLAYBACK_MODE_8LINEAR 4 /* Selects: 64 Kbit/sec 8-bit signed linear */
+#define PLAYBACK_MODE_8LINEAR_WSS 5 /* Selects: 64 Kbit/sec WSS 8-bit unsigned linear */
+
+#define RECORD_MODE_COMPRESSED 0 /* Selects: Compressed modes, TrueSpeech 8.5-4.1, G.723.1, G.722, G.728, G.729 */
+#define RECORD_MODE_TRUESPEECH 0 /* Selects: TrueSpeech 8.5, 6.3, 5.3, 4.8 or 4.1 Kbps */
+#define RECORD_MODE_ULAW 4 /* Selects: 64 Kbit/sec Mu-law PCM */
+#define RECORD_MODE_ALAW 12 /* Selects: 64 Kbit/sec A-law PCM */
+#define RECORD_MODE_16LINEAR 5 /* Selects: 128 Kbit/sec 16-bit linear */
+#define RECORD_MODE_8LINEAR 6 /* Selects: 64 Kbit/sec 8-bit signed linear */
+#define RECORD_MODE_8LINEAR_WSS 7 /* Selects: 64 Kbit/sec WSS 8-bit unsigned linear */
+
+enum SLIC_STATES {
+ PLD_SLIC_STATE_OC = 0,
+ PLD_SLIC_STATE_RINGING,
+ PLD_SLIC_STATE_ACTIVE,
+ PLD_SLIC_STATE_OHT,
+ PLD_SLIC_STATE_TIPOPEN,
+ PLD_SLIC_STATE_STANDBY,
+ PLD_SLIC_STATE_APR,
+ PLD_SLIC_STATE_OHTPR
+};
+
+enum SCI_CONTROL {
+ SCI_End = 0,
+ SCI_Enable_DAA,
+ SCI_Enable_Mixer,
+ SCI_Enable_EEPROM
+};
+
+enum Mode {
+ T63, T53, T48, T40
+};
+enum Dir {
+ V3_TO_V4, V4_TO_V3, V4_TO_V5, V5_TO_V4
+};
+
+typedef struct Proc_Info_Tag {
+ enum Mode convert_mode;
+ enum Dir convert_dir;
+ int Prev_Frame_Type;
+ int Current_Frame_Type;
+} Proc_Info_Type;
+
+enum PREVAL {
+ NORMAL = 0,
+ NOPOST,
+ POSTONLY,
+ PREERROR
+};
+
+enum IXJ_EXTENSIONS {
+ G729LOADER = 0,
+ TS85LOADER,
+ PRE_READ,
+ POST_READ,
+ PRE_WRITE,
+ POST_WRITE,
+ PRE_IOCTL,
+ POST_IOCTL
+};
+
+typedef struct {
+ char enable;
+ char en_filter;
+ unsigned int filter;
+ unsigned int state; /* State 0 when cadence has not started. */
+
+ unsigned int on1; /* State 1 */
+
+ unsigned long on1min; /* State 1 - 10% + jiffies */
+ unsigned long on1dot; /* State 1 + jiffies */
+
+ unsigned long on1max; /* State 1 + 10% + jiffies */
+
+ unsigned int off1; /* State 2 */
+
+ unsigned long off1min;
+ unsigned long off1dot; /* State 2 + jiffies */
+ unsigned long off1max;
+ unsigned int on2; /* State 3 */
+
+ unsigned long on2min;
+ unsigned long on2dot;
+ unsigned long on2max;
+ unsigned int off2; /* State 4 */
+
+ unsigned long off2min;
+ unsigned long off2dot; /* State 4 + jiffies */
+ unsigned long off2max;
+ unsigned int on3; /* State 5 */
+
+ unsigned long on3min;
+ unsigned long on3dot;
+ unsigned long on3max;
+ unsigned int off3; /* State 6 */
+
+ unsigned long off3min;
+ unsigned long off3dot; /* State 6 + jiffies */
+ unsigned long off3max;
+} IXJ_CADENCE_F;
+
+typedef struct {
+ unsigned int busytone:1;
+ unsigned int dialtone:1;
+ unsigned int ringback:1;
+ unsigned int ringing:1;
+ unsigned int playing:1;
+ unsigned int recording:1;
+ unsigned int cringing:1;
+ unsigned int play_first_frame:1;
+ unsigned int pstn_present:1;
+ unsigned int pstn_ringing:1;
+ unsigned int pots_correct:1;
+ unsigned int pots_pstn:1;
+ unsigned int g729_loaded:1;
+ unsigned int ts85_loaded:1;
+ unsigned int dtmf_oob:1; /* DTMF Out-Of-Band */
+
+ unsigned int pcmciascp:1; /* SmartCABLE Present */
+
+ unsigned int pcmciasct:2; /* SmartCABLE Type */
+
+ unsigned int pcmciastate:3; /* SmartCABLE Init State */
+
+ unsigned int inwrite:1; /* Currently writing */
+
+ unsigned int inread:1; /* Currently reading */
+
+ unsigned int incheck:1; /* Currently checking the SmartCABLE */
+
+ unsigned int cidplay:1; /* Currently playing Caller ID */
+
+ unsigned int cidring:1; /* This is the ring for Caller ID */
+
+ unsigned int cidsent:1; /* Caller ID has been sent */
+
+ unsigned int cidcw_ack:1; /* Caller ID CW ACK (from CPE) */
+ unsigned int firstring:1; /* First ring cadence is complete */
+ unsigned int pstncheck:1; /* Currently checking the PSTN Line */
+ unsigned int pstn_rmr:1;
+ unsigned int x:3; /* unused bits */
+
+} IXJ_FLAGS;
+
+/******************************************************************************
+*
+* This structure holds the state of all of the Quicknet cards
+*
+******************************************************************************/
+
+typedef struct {
+ int elements_used;
+ IXJ_CADENCE_TERM termination;
+ IXJ_CADENCE_ELEMENT *ce;
+} ixj_cadence;
+
+typedef struct {
+ struct phone_device p;
+ struct timer_list timer;
+ unsigned int board;
+ unsigned int DSPbase;
+ unsigned int XILINXbase;
+ unsigned int serial;
+ atomic_t DSPWrite;
+ struct phone_capability caplist[30];
+ unsigned int caps;
+ struct pnp_dev *dev;
+ unsigned int cardtype;
+ unsigned int rec_codec;
+ unsigned int cid_rec_codec;
+ unsigned int cid_rec_volume;
+ unsigned char cid_rec_flag;
+ signed char rec_mode;
+ unsigned int play_codec;
+ unsigned int cid_play_codec;
+ unsigned int cid_play_volume;
+ unsigned char cid_play_flag;
+ signed char play_mode;
+ IXJ_FLAGS flags;
+ unsigned long busyflags;
+ unsigned int rec_frame_size;
+ unsigned int play_frame_size;
+ unsigned int cid_play_frame_size;
+ unsigned int cid_base_frame_size;
+ unsigned long cidcw_wait;
+ int aec_level;
+ int cid_play_aec_level;
+ int readers, writers;
+ wait_queue_head_t poll_q;
+ wait_queue_head_t read_q;
+ char *read_buffer, *read_buffer_end;
+ char *read_convert_buffer;
+ size_t read_buffer_size;
+ unsigned int read_buffer_ready;
+ wait_queue_head_t write_q;
+ char *write_buffer, *write_buffer_end;
+ char *write_convert_buffer;
+ size_t write_buffer_size;
+ unsigned int write_buffers_empty;
+ unsigned long drybuffer;
+ char *write_buffer_rp, *write_buffer_wp;
+ char dtmfbuffer[80];
+ char dtmf_current;
+ int dtmf_wp, dtmf_rp, dtmf_state, dtmf_proc;
+ int tone_off_time, tone_on_time;
+ struct fasync_struct *async_queue;
+ unsigned long tone_start_jif;
+ char tone_index;
+ char tone_state;
+ char maxrings;
+ ixj_cadence *cadence_t;
+ ixj_cadence *cadence_r;
+ int tone_cadence_state;
+ IXJ_CADENCE_F cadence_f[6];
+ DTMF dtmf;
+ CPTF cptf;
+ BYTES dsp;
+ BYTES ver;
+ BYTES scr;
+ BYTES ssr;
+ BYTES baseframe;
+ HSR hsr;
+ GPIO gpio;
+ PLD_SCRR pld_scrr;
+ PLD_SCRW pld_scrw;
+ PLD_SLICW pld_slicw;
+ PLD_SLICR pld_slicr;
+ PLD_CLOCK pld_clock;
+ PCMCIA_CR1 pccr1;
+ PCMCIA_CR2 pccr2;
+ PCMCIA_SCCR psccr;
+ PCMCIA_SLIC pslic;
+ char pscdd;
+ Si3C1 sic1;
+ Si3C2 sic2;
+ Si3RXG sirxg;
+ Si3ADC siadc;
+ Si3DAC sidac;
+ Si3STAT sistat;
+ Si3AATT siaatt;
+ MIX mix;
+ unsigned short ring_cadence;
+ int ring_cadence_t;
+ unsigned long ring_cadence_jif;
+ unsigned long checkwait;
+ int intercom;
+ int m_hook;
+ int r_hook;
+ int p_hook;
+ char pstn_envelope;
+ char pstn_cid_intr;
+ unsigned char fskz;
+ unsigned char fskphase;
+ unsigned char fskcnt;
+ unsigned int cidsize;
+ unsigned int cidcnt;
+ unsigned long pstn_cid_received;
+ PHONE_CID cid;
+ PHONE_CID cid_send;
+ unsigned long pstn_ring_int;
+ unsigned long pstn_ring_start;
+ unsigned long pstn_ring_stop;
+ unsigned long pstn_winkstart;
+ unsigned long pstn_last_rmr;
+ unsigned long pstn_prev_rmr;
+ unsigned long pots_winkstart;
+ unsigned int winktime;
+ unsigned long flash_end;
+ char port;
+ char hookstate;
+ union telephony_exception ex;
+ union telephony_exception ex_sig;
+ int ixj_signals[35];
+ IXJ_SIGDEF sigdef;
+ char daa_mode;
+ char daa_country;
+ unsigned long pstn_sleeptil;
+ DAA_REGS m_DAAShadowRegs;
+ Proc_Info_Type Info_read;
+ Proc_Info_Type Info_write;
+ unsigned short frame_count;
+ unsigned int filter_hist[4];
+ unsigned char filter_en[6];
+ unsigned short proc_load;
+ unsigned long framesread;
+ unsigned long frameswritten;
+ unsigned long read_wait;
+ unsigned long write_wait;
+ unsigned long timerchecks;
+ unsigned long txreadycheck;
+ unsigned long rxreadycheck;
+ unsigned long statuswait;
+ unsigned long statuswaitfail;
+ unsigned long pcontrolwait;
+ unsigned long pcontrolwaitfail;
+ unsigned long iscontrolready;
+ unsigned long iscontrolreadyfail;
+ unsigned long pstnstatecheck;
+#ifdef IXJ_DYN_ALLOC
+ short *fskdata;
+#else
+ short fskdata[8000];
+#endif
+ int fsksize;
+ int fskdcnt;
+} IXJ;
+
+typedef int (*IXJ_REGFUNC) (IXJ * j, unsigned long arg);
+
+extern IXJ *ixj_pcmcia_probe(unsigned long, unsigned long);
+
diff --git a/drivers/staging/telephony/ixj_pcmcia.c b/drivers/staging/telephony/ixj_pcmcia.c
new file mode 100644
index 000000000000..05032e2cc954
--- /dev/null
+++ b/drivers/staging/telephony/ixj_pcmcia.c
@@ -0,0 +1,187 @@
+#include "ixj-ver.h"
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/slab.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "ixj.h"
+
+/*
+ * PCMCIA service support for Quicknet cards
+ */
+
+
+typedef struct ixj_info_t {
+ int ndev;
+ struct ixj *port;
+} ixj_info_t;
+
+static void ixj_detach(struct pcmcia_device *p_dev);
+static int ixj_config(struct pcmcia_device * link);
+static void ixj_cs_release(struct pcmcia_device * link);
+
+static int ixj_probe(struct pcmcia_device *p_dev)
+{
+ dev_dbg(&p_dev->dev, "ixj_attach()\n");
+ /* Create new ixj device */
+ p_dev->priv = kzalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
+ if (!p_dev->priv) {
+ return -ENOMEM;
+ }
+
+ return ixj_config(p_dev);
+}
+
+static void ixj_detach(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "ixj_detach\n");
+
+ ixj_cs_release(link);
+
+ kfree(link->priv);
+}
+
+static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
+{
+ char *str;
+ int i, place;
+ dev_dbg(&link->dev, "ixj_get_serial\n");
+
+ str = link->prod_id[0];
+ if (!str)
+ goto failed;
+ printk("%s", str);
+ str = link->prod_id[1];
+ if (!str)
+ goto failed;
+ printk(" %s", str);
+ str = link->prod_id[2];
+ if (!str)
+ goto failed;
+ place = 1;
+ for (i = strlen(str) - 1; i >= 0; i--) {
+ switch (str[i]) {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ j->serial += (str[i] - 48) * place;
+ break;
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'E':
+ case 'F':
+ j->serial += (str[i] - 55) * place;
+ break;
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'd':
+ case 'e':
+ case 'f':
+ j->serial += (str[i] - 87) * place;
+ break;
+ }
+ place = place * 0x10;
+ }
+ str = link->prod_id[3];
+ if (!str)
+ goto failed;
+ printk(" version %s\n", str);
+failed:
+ return;
+}
+
+static int ixj_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->io_lines = 3;
+
+ return pcmcia_request_io(p_dev);
+}
+
+static int ixj_config(struct pcmcia_device * link)
+{
+ IXJ *j;
+ ixj_info_t *info;
+
+ info = link->priv;
+ dev_dbg(&link->dev, "ixj_config\n");
+
+ link->config_flags = CONF_AUTO_SET_IO;
+
+ if (pcmcia_loop_config(link, ixj_config_check, NULL))
+ goto failed;
+
+ if (pcmcia_enable_device(link))
+ goto failed;
+
+ /*
+ * Register the card with the core.
+ */
+ j = ixj_pcmcia_probe(link->resource[0]->start,
+ link->resource[0]->start + 0x10);
+
+ info->ndev = 1;
+ ixj_get_serial(link, j);
+ return 0;
+
+failed:
+ ixj_cs_release(link);
+ return -ENODEV;
+}
+
+static void ixj_cs_release(struct pcmcia_device *link)
+{
+ ixj_info_t *info = link->priv;
+ dev_dbg(&link->dev, "ixj_cs_release\n");
+ info->ndev = 0;
+ pcmcia_disable_device(link);
+}
+
+static const struct pcmcia_device_id ixj_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x0257, 0x0600),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, ixj_ids);
+
+static struct pcmcia_driver ixj_driver = {
+ .owner = THIS_MODULE,
+ .name = "ixj_cs",
+ .probe = ixj_probe,
+ .remove = ixj_detach,
+ .id_table = ixj_ids,
+};
+
+static int __init ixj_pcmcia_init(void)
+{
+ return pcmcia_register_driver(&ixj_driver);
+}
+
+static void ixj_pcmcia_exit(void)
+{
+ pcmcia_unregister_driver(&ixj_driver);
+}
+
+module_init(ixj_pcmcia_init);
+module_exit(ixj_pcmcia_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/telephony/phonedev.c b/drivers/staging/telephony/phonedev.c
new file mode 100644
index 000000000000..1915af201175
--- /dev/null
+++ b/drivers/staging/telephony/phonedev.c
@@ -0,0 +1,167 @@
+/*
+ * Telephony registration for Linux
+ *
+ * (c) Copyright 1999 Red Hat Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
+ *
+ * Fixes: Mar 01 2000 Thomas Sparr, <thomas.l.sparr@telia.com>
+ * phone_register_device now works with unit!=PHONE_UNIT_ANY
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/phonedev.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/kmod.h>
+#include <linux/sem.h>
+#include <linux/mutex.h>
+
+#define PHONE_NUM_DEVICES 256
+
+/*
+ * Active devices
+ */
+
+static struct phone_device *phone_device[PHONE_NUM_DEVICES];
+static DEFINE_MUTEX(phone_lock);
+
+/*
+ * Open a phone device.
+ */
+
+static int phone_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ int err = 0;
+ struct phone_device *p;
+ const struct file_operations *old_fops, *new_fops = NULL;
+
+ if (minor >= PHONE_NUM_DEVICES)
+ return -ENODEV;
+
+ mutex_lock(&phone_lock);
+ p = phone_device[minor];
+ if (p)
+ new_fops = fops_get(p->f_op);
+ if (!new_fops) {
+ mutex_unlock(&phone_lock);
+ request_module("char-major-%d-%d", PHONE_MAJOR, minor);
+ mutex_lock(&phone_lock);
+ p = phone_device[minor];
+ if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL)
+ {
+ err=-ENODEV;
+ goto end;
+ }
+ }
+ old_fops = file->f_op;
+ file->f_op = new_fops;
+ if (p->open)
+ err = p->open(p, file); /* Tell the device it is open */
+ if (err) {
+ fops_put(file->f_op);
+ file->f_op = fops_get(old_fops);
+ }
+ fops_put(old_fops);
+end:
+ mutex_unlock(&phone_lock);
+ return err;
+}
+
+/*
+ * Telephony For Linux device drivers request registration here.
+ */
+
+int phone_register_device(struct phone_device *p, int unit)
+{
+ int base;
+ int end;
+ int i;
+
+ base = 0;
+ end = PHONE_NUM_DEVICES - 1;
+
+ if (unit != PHONE_UNIT_ANY) {
+ base = unit;
+ end = unit + 1; /* enter the loop at least one time */
+ }
+
+ mutex_lock(&phone_lock);
+ for (i = base; i < end; i++) {
+ if (phone_device[i] == NULL) {
+ phone_device[i] = p;
+ p->minor = i;
+ mutex_unlock(&phone_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&phone_lock);
+ return -ENFILE;
+}
+
+/*
+ * Unregister an unused Telephony for linux device
+ */
+
+void phone_unregister_device(struct phone_device *pfd)
+{
+ mutex_lock(&phone_lock);
+ if (likely(phone_device[pfd->minor] == pfd))
+ phone_device[pfd->minor] = NULL;
+ mutex_unlock(&phone_lock);
+}
+
+
+static const struct file_operations phone_fops =
+{
+ .owner = THIS_MODULE,
+ .open = phone_open,
+ .llseek = noop_llseek,
+};
+
+/*
+ * Board init functions
+ */
+
+
+/*
+ * Initialise Telephony for linux
+ */
+
+static int __init telephony_init(void)
+{
+ printk(KERN_INFO "Linux telephony interface: v1.00\n");
+ if (register_chrdev(PHONE_MAJOR, "telephony", &phone_fops)) {
+ printk("phonedev: unable to get major %d\n", PHONE_MAJOR);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit telephony_exit(void)
+{
+ unregister_chrdev(PHONE_MAJOR, "telephony");
+}
+
+module_init(telephony_init);
+module_exit(telephony_exit);
+
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(phone_register_device);
+EXPORT_SYMBOL(phone_unregister_device);
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 21a559ecbbb1..d660891303b6 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -31,12 +31,6 @@ config TIDSPBRIDGE_MEMPOOL_SIZE
Allocate specified size of memory at booting time to avoid allocation
failure under heavy memory fragmentation after some use time.
-config TIDSPBRIDGE_DEBUG
- bool "Debug Support"
- depends on TIDSPBRIDGE
- help
- Say Y to enable Bridge debugging capabilities
-
config TIDSPBRIDGE_RECOVERY
bool "Recovery Support"
depends on TIDSPBRIDGE
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index fd6a2761cc3b..8c8c92a9083f 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
+obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge.o
libgen = gen/gh.o gen/uuidutil.o
libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
@@ -13,7 +13,7 @@ libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
dynload/tramp.o
libhw = hw/hw_mmu.o
-bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
+tidspbridge-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
$(libdload) $(libhw)
#Machine dependent
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
index 6d66e7d0fba8..e0c7e4c470c8 100644
--- a/drivers/staging/tidspbridge/core/chnl_sm.c
+++ b/drivers/staging/tidspbridge/core/chnl_sm.c
@@ -50,9 +50,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -123,7 +120,6 @@ int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
CHNL_IS_OUTPUT(pchnl->chnl_mode))
return -EPIPE;
/* No other possible states left */
- DBC_ASSERT(0);
}
dev_obj = dev_get_first();
@@ -190,7 +186,6 @@ func_cont:
* Note: for dma chans dw_dsp_addr contains dsp address
* of SM buffer.
*/
- DBC_ASSERT(chnl_mgr_obj->word_size != 0);
/* DSP address */
chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
chnl_packet_obj->byte_size = byte_size;
@@ -201,7 +196,6 @@ func_cont:
CHNL_IOCSTATCOMPLETE);
list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
pchnl->cio_reqs++;
- DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
/*
* If end of stream, update the channel state to prevent
* more IOR's.
@@ -209,8 +203,6 @@ func_cont:
if (is_eos)
pchnl->state |= CHNL_STATEEOS;
- /* Legacy DSM Processor-Copy */
- DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
/* Request IO from the DSP */
io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
(CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
@@ -283,7 +275,6 @@ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
list_add_tail(&chirp->link, &pchnl->io_completions);
pchnl->cio_cs++;
pchnl->cio_reqs--;
- DBC_ASSERT(pchnl->cio_reqs >= 0);
}
spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
@@ -311,8 +302,6 @@ int bridge_chnl_close(struct chnl_object *chnl_obj)
status = bridge_chnl_cancel_io(chnl_obj);
if (status)
return status;
- /* Assert I/O on this channel is now cancelled: Protects from io_dpc */
- DBC_ASSERT((pchnl->state & CHNL_STATECANCEL));
/* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
/* Free the slot in the channel manager: */
pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
@@ -358,13 +347,6 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
struct chnl_mgr *chnl_mgr_obj = NULL;
u8 max_channels;
- /* Check DBC requirements: */
- DBC_REQUIRE(channel_mgr != NULL);
- DBC_REQUIRE(mgr_attrts != NULL);
- DBC_REQUIRE(mgr_attrts->max_channels > 0);
- DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
- DBC_REQUIRE(mgr_attrts->word_size != 0);
-
/* Allocate channel manager object */
chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
if (chnl_mgr_obj) {
@@ -374,7 +356,6 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
* mgr_attrts->max_channels = CHNL_MAXCHANNELS =
* DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
*/
- DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
/* Create array of channels */
chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
@@ -491,7 +472,6 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
pchnl->state &= ~CHNL_STATECANCEL;
}
}
- DBC_ENSURE(status || list_empty(&pchnl->io_requests));
return status;
}
@@ -592,7 +572,6 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
if (dequeue_ioc) {
/* Dequeue IOC and set chan_ioc; */
- DBC_ASSERT(!list_empty(&pchnl->io_completions));
chnl_packet_obj = list_first_entry(&pchnl->io_completions,
struct chnl_irp, link);
list_del(&chnl_packet_obj->link);
@@ -705,8 +684,6 @@ int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
struct chnl_mgr *chnl_mgr_obj;
int status = 0;
- DBC_REQUIRE(chnl_obj);
-
chnl_mode = chnl_obj->chnl_mode;
chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
@@ -736,10 +713,7 @@ int bridge_chnl_open(struct chnl_object **chnl,
struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
struct chnl_object *pchnl = NULL;
struct sync_object *sync_event = NULL;
- /* Ensure DBC requirements: */
- DBC_REQUIRE(chnl != NULL);
- DBC_REQUIRE(pattrs != NULL);
- DBC_REQUIRE(hchnl_mgr != NULL);
+
*chnl = NULL;
/* Validate Args: */
@@ -761,7 +735,6 @@ int bridge_chnl_open(struct chnl_object **chnl,
return status;
}
- DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
/* Create channel object: */
pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
@@ -850,7 +823,6 @@ int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
{
int status = 0;
- DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
if (event_mask)
status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
@@ -906,8 +878,6 @@ static void free_chirp_list(struct list_head *chirp_list)
{
struct chnl_irp *chirp, *tmp;
- DBC_REQUIRE(chirp_list != NULL);
-
list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
list_del(&chirp->link);
kfree(chirp);
@@ -924,8 +894,6 @@ static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
int status = -ENOSR;
u32 i;
- DBC_REQUIRE(chnl_mgr_obj);
-
for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
if (chnl_mgr_obj->channels[i] == NULL) {
status = 0;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 7eb56178fb64..c7df34e6b60b 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -29,9 +29,6 @@
#include <dspbridge/dev.h>
#include "_tiomap.h"
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/clk.h>
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 694c0e5e55cc..9b50b5bd4edb 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -33,9 +33,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* Services Layer */
#include <dspbridge/ntfy.h>
#include <dspbridge/sync.h>
@@ -114,7 +111,7 @@ struct io_mgr {
struct mgr_processorextinfo ext_proc_info;
struct cmm_object *cmm_mgr; /* Shared Mem Mngr */
struct work_struct io_workq; /* workqueue */
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
u32 trace_buffer_begin; /* Trace message start address */
u32 trace_buffer_end; /* Trace message end address */
u32 trace_buffer_current; /* Trace message current address */
@@ -246,7 +243,7 @@ int bridge_io_destroy(struct io_mgr *hio_mgr)
/* Free IO DPC object */
tasklet_kill(&hio_mgr->dpc_tasklet);
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
kfree(hio_mgr->msg);
#endif
dsp_wdt_exit();
@@ -386,7 +383,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
status = -EFAULT;
}
if (!status) {
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
status =
cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
#else
@@ -731,7 +728,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
hmsg_mgr->max_msgs);
memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
/* Get the start address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
&hio_mgr->trace_buffer_begin);
@@ -910,7 +907,7 @@ void io_dpc(unsigned long ref_data)
}
#endif
-#ifdef CONFIG_TIDSPBRIDGE_DEBUG
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
/* Notify DSP Trace message */
print_dsp_debug_trace(pio_mgr);
@@ -973,29 +970,16 @@ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
chnl_mgr_obj = io_manager->chnl_mgr;
sm = io_manager->shared_mem;
if (io_mode == IO_INPUT) {
- /*
- * Assertion fires if CHNL_AddIOReq() called on a stream
- * which was cancelled, or attached to a dead board.
- */
- DBC_ASSERT((pchnl->state == CHNL_STATEREADY) ||
- (pchnl->state == CHNL_STATEEOS));
/* Indicate to the DSP we have a buffer available for input */
set_chnl_busy(sm, pchnl->chnl_id);
*mbx_val = MBX_PCPY_CLASS;
} else if (io_mode == IO_OUTPUT) {
/*
- * This assertion fails if CHNL_AddIOReq() was called on a
- * stream which was cancelled, or attached to a dead board.
- */
- DBC_ASSERT((pchnl->state & ~CHNL_STATEEOS) ==
- CHNL_STATEREADY);
- /*
* Record the fact that we have a buffer available for
* output.
*/
chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
} else {
- DBC_ASSERT(io_mode); /* Shouldn't get here. */
}
func_end:
return;
@@ -1087,7 +1071,6 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
dw_arg = sm->arg;
if (chnl_id >= CHNL_MAXCHANNELS) {
/* Shouldn't be here: would indicate corrupted shm. */
- DBC_ASSERT(chnl_id);
goto func_end;
}
pchnl = chnl_mgr_obj->channels[chnl_id];
@@ -1683,7 +1666,7 @@ int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
}
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
+#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
void print_dsp_debug_trace(struct io_mgr *hio_mgr)
{
u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
index 94d9e04a22fa..ce9557e16eb0 100644
--- a/drivers/staging/tidspbridge/core/msg_sm.c
+++ b/drivers/staging/tidspbridge/core/msg_sm.c
@@ -20,9 +20,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index dde559d06c43..670de0aae928 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -27,9 +27,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/drv.h>
#include <dspbridge/sync.h>
@@ -256,9 +253,6 @@ static void bad_page_dump(u32 pa, struct page *pg)
void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
const char *driver_file_name)
{
-
- DBC_REQUIRE(driver_file_name != NULL);
-
if (strcmp(driver_file_name, "UMA") == 0)
*drv_intf = &drv_interface_fxns;
else
@@ -399,16 +393,13 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
(void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
&ul_shm_base_virt);
ul_shm_base_virt *= DSPWORDSIZE;
- DBC_ASSERT(ul_shm_base_virt != 0);
/* DSP Virtual address */
ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va;
- DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
ul_shm_offset_virt =
ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
/* Kernel logical address */
ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
- DBC_ASSERT(ul_shm_base != 0);
/* 2nd wd is used as sync field */
dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
/* Write a signature into the shm base + offset; this will
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index 02dd4391309a..16a4aafa86ae 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -303,7 +303,6 @@ int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
}
/* TODO -- Assert may be a too hard restriction here.. May be we should
* just return with failure when the CLK ID does not match */
- /* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
if (clk_id_index == MBX_PM_MAX_RESOURCES) {
/* return with a more meaningfull error code */
return -EPERM;
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index dfb356eb6723..7fda10c36862 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -21,9 +21,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/drv.h>
@@ -68,20 +65,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
status = dev_get_symbol(dev_context->dev_obj,
SHMBASENAME, &ul_shm_base_virt);
}
- DBC_ASSERT(ul_shm_base_virt != 0);
/* Check if it is a read of Trace section */
if (!status && !ul_trace_sec_beg) {
status = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_BEG, &ul_trace_sec_beg);
}
- DBC_ASSERT(ul_trace_sec_beg != 0);
if (!status && !ul_trace_sec_end) {
status = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_END, &ul_trace_sec_end);
}
- DBC_ASSERT(ul_trace_sec_end != 0);
if (!status) {
if ((dsp_addr <= ul_trace_sec_end) &&
@@ -105,19 +99,16 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
status = dev_get_symbol(dev_context->dev_obj,
DYNEXTBASE, &ul_dyn_ext_base);
}
- DBC_ASSERT(ul_dyn_ext_base != 0);
if (!status) {
status = dev_get_symbol(dev_context->dev_obj,
EXTBASE, &ul_ext_base);
}
- DBC_ASSERT(ul_ext_base != 0);
if (!status) {
status = dev_get_symbol(dev_context->dev_obj,
EXTEND, &ul_ext_end);
}
- DBC_ASSERT(ul_ext_end != 0);
/* Trace buffer is right after the shm SEG0,
* so set the base address to SHMBASE */
@@ -126,8 +117,6 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
ul_ext_end = ul_trace_sec_end;
}
- DBC_ASSERT(ul_ext_end != 0);
- DBC_ASSERT(ul_ext_end > ul_ext_base);
if (ul_ext_end < ul_ext_base)
status = -EPERM;
@@ -135,7 +124,6 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
if (!status) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
- DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
dw_ext_prog_virt_mem =
dev_context->atlb_entry[0].gpp_va;
@@ -271,7 +259,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
/* Get SHM_BEG EXT_BEG and EXT_END. */
ret = dev_get_symbol(dev_context->dev_obj,
SHMBASENAME, &ul_shm_base_virt);
- DBC_ASSERT(ul_shm_base_virt != 0);
if (dynamic_load) {
if (!ret) {
if (symbols_reloaded)
@@ -280,7 +267,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
(dev_context->dev_obj, DYNEXTBASE,
&ul_ext_base);
}
- DBC_ASSERT(ul_ext_base != 0);
if (!ret) {
/* DR OMAPS00013235 : DLModules array may be
* in EXTMEM. It is expected that DYNEXTMEM and
@@ -299,7 +285,6 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
dev_get_symbol
(dev_context->dev_obj, EXTBASE,
&ul_ext_base);
- DBC_ASSERT(ul_ext_base != 0);
if (!ret)
ret =
dev_get_symbol
@@ -312,15 +297,12 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
if (trace_load)
ul_ext_base = ul_shm_base_virt;
- DBC_ASSERT(ul_ext_end != 0);
- DBC_ASSERT(ul_ext_end > ul_ext_base);
if (ul_ext_end < ul_ext_base)
ret = -EPERM;
if (!ret) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
- DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
if (symbols_reloaded) {
ret = dev_get_symbol
diff --git a/drivers/staging/tidspbridge/gen/uuidutil.c b/drivers/staging/tidspbridge/gen/uuidutil.c
index ff6ebadf98f4..b44656cf7858 100644
--- a/drivers/staging/tidspbridge/gen/uuidutil.c
+++ b/drivers/staging/tidspbridge/gen/uuidutil.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/uuidutil.h>
@@ -41,8 +38,6 @@ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
{
s32 i; /* return result from snprintf. */
- DBC_REQUIRE(uuid_obj && sz_uuid);
-
i = snprintf(sz_uuid, size,
"%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X",
uuid_obj->data1, uuid_obj->data2, uuid_obj->data3,
@@ -50,8 +45,6 @@ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
uuid_obj->data6[0], uuid_obj->data6[1],
uuid_obj->data6[2], uuid_obj->data6[3],
uuid_obj->data6[4], uuid_obj->data6[5]);
-
- DBC_ENSURE(i != -1);
}
static s32 uuid_hex_to_bin(char *buf, s32 len)
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbc.h b/drivers/staging/tidspbridge/include/dspbridge/dbc.h
deleted file mode 100644
index 463760f499a4..000000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dbc.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * dbc.h
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * "Design by Contract" programming macros.
- *
- * Notes:
- * Requires that the GT->ERROR function has been defaulted to a valid
- * error handler for the given execution environment.
- *
- * Does not require that GT_init() be called.
- *
- * Copyright (C) 2008 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef DBC_
-#define DBC_
-
-/* Assertion Macros: */
-#ifdef CONFIG_TIDSPBRIDGE_DEBUG
-
-#define DBC_ASSERT(exp) \
- if (!(exp)) \
- pr_err("%s, line %d: Assertion (" #exp ") failed.\n", \
- __FILE__, __LINE__)
-#define DBC_REQUIRE DBC_ASSERT /* Function Precondition. */
-#define DBC_ENSURE DBC_ASSERT /* Function Postcondition. */
-
-#else
-
-#define DBC_ASSERT(exp) {}
-#define DBC_REQUIRE(exp) {}
-#define DBC_ENSURE(exp) {}
-
-#endif /* DEBUG */
-
-#endif /* DBC_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
index a054dad21333..903ff12b14de 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -154,8 +154,6 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context);
void dump_dl_modules(struct bridge_dev_context *bridge_context);
-#endif
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
void print_dsp_debug_trace(struct io_mgr *hio_mgr);
#endif
diff --git a/drivers/staging/tidspbridge/pmgr/chnl.c b/drivers/staging/tidspbridge/pmgr/chnl.c
index 245de82e2d67..825be200e278 100644
--- a/drivers/staging/tidspbridge/pmgr/chnl.c
+++ b/drivers/staging/tidspbridge/pmgr/chnl.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -58,10 +55,6 @@ int chnl_create(struct chnl_mgr **channel_mgr,
struct chnl_mgr *hchnl_mgr;
struct chnl_mgr_ *chnl_mgr_obj = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(channel_mgr != NULL);
- DBC_REQUIRE(mgr_attrts != NULL);
-
*channel_mgr = NULL;
/* Validate args: */
@@ -99,8 +92,6 @@ int chnl_create(struct chnl_mgr **channel_mgr,
}
}
- DBC_ENSURE(status || chnl_mgr_obj);
-
return status;
}
@@ -115,8 +106,6 @@ int chnl_destroy(struct chnl_mgr *hchnl_mgr)
struct bridge_drv_interface *intf_fxns;
int status;
- DBC_REQUIRE(refs > 0);
-
if (chnl_mgr_obj) {
intf_fxns = chnl_mgr_obj->intf_fxns;
/* Let Bridge channel module destroy the chnl_mgr: */
@@ -135,11 +124,7 @@ int chnl_destroy(struct chnl_mgr *hchnl_mgr)
*/
void chnl_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -151,12 +136,8 @@ bool chnl_init(void)
{
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index e6b2c8962f81..3366e601009d 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -35,9 +35,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -244,9 +241,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
struct cmm_object *cmm_obj = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ph_cmm_mgr != NULL);
-
*ph_cmm_mgr = NULL;
/* create, zero, and tag a cmm mgr object */
cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
@@ -256,8 +250,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
if (mgr_attrts == NULL)
mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
- /* 4 bytes minimum */
- DBC_ASSERT(mgr_attrts->min_block_size >= 4);
/* save away smallest block allocation for this cmm mgr */
cmm_obj->min_block_size = mgr_attrts->min_block_size;
cmm_obj->page_size = PAGE_SIZE;
@@ -283,7 +275,6 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
s32 slot_seg;
struct cmm_mnode *node, *tmp;
- DBC_REQUIRE(refs > 0);
if (!hcmm_mgr) {
status = -EFAULT;
return status;
@@ -333,8 +324,6 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
*/
void cmm_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
}
@@ -351,9 +340,6 @@ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
struct cmm_allocator *allocator;
struct cmm_attrs *pattrs;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(buf_pa != NULL);
-
if (ul_seg_id == 0) {
pattrs = &cmm_dfltalctattrs;
ul_seg_id = pattrs->seg_id;
@@ -392,8 +378,6 @@ int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
int status = 0;
struct dev_object *hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ph_cmm_mgr != NULL);
if (hprocessor != NULL)
status = proc_get_dev_object(hprocessor, &hdev_obj);
else
@@ -419,8 +403,6 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
struct cmm_allocator *altr;
struct cmm_mnode *curr;
- DBC_REQUIRE(cmm_info_obj != NULL);
-
if (!hcmm_mgr) {
status = -EFAULT;
return status;
@@ -472,12 +454,9 @@ bool cmm_init(void)
{
bool ret = true;
- DBC_REQUIRE(refs >= 0);
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
@@ -499,13 +478,6 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
struct cmm_mnode *new_node;
s32 slot_seg;
- DBC_REQUIRE(ul_size > 0);
- DBC_REQUIRE(sgmt_id != NULL);
- DBC_REQUIRE(dw_gpp_base_pa != 0);
- DBC_REQUIRE(gpp_base_va != 0);
- DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
- (c_factor >= CMM_SUBFROMDSPPA));
-
dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
"dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
__func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
@@ -589,7 +561,6 @@ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
struct cmm_allocator *psma;
u32 ul_id = ul_seg_id;
- DBC_REQUIRE(ul_seg_id > 0);
if (!hcmm_mgr)
return -EFAULT;
@@ -635,8 +606,6 @@ static void un_register_gppsm_seg(struct cmm_allocator *psma)
{
struct cmm_mnode *curr, *tmp;
- DBC_REQUIRE(psma != NULL);
-
/* free nodes on free list */
list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
list_del(&curr->link);
@@ -664,7 +633,6 @@ static void un_register_gppsm_seg(struct cmm_allocator *psma)
static s32 get_slot(struct cmm_object *cmm_mgr_obj)
{
s32 slot_seg = -1; /* neg on failure */
- DBC_REQUIRE(cmm_mgr_obj != NULL);
/* get first available slot in cmm mgr SMSegTab[] */
for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
@@ -687,11 +655,6 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
{
struct cmm_mnode *pnode;
- DBC_REQUIRE(cmm_mgr_obj != NULL);
- DBC_REQUIRE(dw_pa != 0);
- DBC_REQUIRE(dw_va != 0);
- DBC_REQUIRE(ul_size != 0);
-
/* Check cmm mgr's node freelist */
if (list_empty(&cmm_mgr_obj->node_free_list)) {
pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
@@ -719,7 +682,6 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
*/
static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
{
- DBC_REQUIRE(pnode != NULL);
list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
}
@@ -794,9 +756,6 @@ static void add_to_free_list(struct cmm_allocator *allocator,
static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
u32 ul_seg_id)
{
- DBC_REQUIRE(cmm_mgr_obj != NULL);
- DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
-
return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
}
@@ -818,10 +777,6 @@ int cmm_xlator_create(struct cmm_xlatorobject **xlator,
struct cmm_xlator *xlator_object = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(xlator != NULL);
- DBC_REQUIRE(hcmm_mgr != NULL);
-
*xlator = NULL;
if (xlator_attrs == NULL)
xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
@@ -851,13 +806,6 @@ void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
void *tmp_va_buff;
struct cmm_attrs attrs;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(xlator != NULL);
- DBC_REQUIRE(xlator_obj->cmm_mgr != NULL);
- DBC_REQUIRE(va_buf != NULL);
- DBC_REQUIRE(pa_size > 0);
- DBC_REQUIRE(xlator_obj->seg_id > 0);
-
if (xlator_obj) {
attrs.seg_id = xlator_obj->seg_id;
__raw_writel(0, va_buf);
@@ -887,10 +835,6 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
int status = -EPERM;
void *buf_pa = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(buf_va != NULL);
- DBC_REQUIRE(xlator_obj->seg_id > 0);
-
if (xlator_obj) {
/* convert Va to Pa so we can free it. */
buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
@@ -900,7 +844,8 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
if (status) {
/* Uh oh, this shouldn't happen. Descriptor
* gone! */
- DBC_ASSERT(false); /* CMM is leaking mem */
+ pr_err("%s, line %d: Assertion failed\n",
+ __FILE__, __LINE__);
}
}
}
@@ -918,10 +863,6 @@ int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(paddr != NULL);
- DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
-
if (xlator_obj) {
if (set_info) {
/* set translators virtual address range */
@@ -948,16 +889,11 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
struct cmm_allocator *allocator = NULL;
u32 dw_offset = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(paddr != NULL);
- DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
-
if (!xlator_obj)
goto loop_cont;
cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
/* get this translator's default SM allocator */
- DBC_ASSERT(xlator_obj->seg_id > 0);
allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
if (!allocator)
goto loop_cont;
diff --git a/drivers/staging/tidspbridge/pmgr/cod.c b/drivers/staging/tidspbridge/pmgr/cod.c
index 1a29264b5853..d01fb8e364b2 100644
--- a/drivers/staging/tidspbridge/pmgr/cod.c
+++ b/drivers/staging/tidspbridge/pmgr/cod.c
@@ -30,9 +30,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
/* Include appropriate loader header file */
#include <dspbridge/dbll.h>
@@ -183,10 +180,6 @@ void cod_close(struct cod_libraryobj *lib)
{
struct cod_manager *hmgr;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(lib != NULL);
- DBC_REQUIRE(lib->cod_mgr);
-
hmgr = lib->cod_mgr;
hmgr->fxns.close_fxn(lib->dbll_lib);
@@ -208,9 +201,6 @@ int cod_create(struct cod_manager **mgr, char *str_zl_file)
struct dbll_attrs zl_attrs;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
/* assume failure */
*mgr = NULL;
@@ -263,9 +253,6 @@ int cod_create(struct cod_manager **mgr, char *str_zl_file)
*/
void cod_delete(struct cod_manager *cod_mgr_obj)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
-
if (cod_mgr_obj->base_lib) {
if (cod_mgr_obj->loaded)
cod_mgr_obj->fxns.unload_fxn(cod_mgr_obj->base_lib,
@@ -288,11 +275,7 @@ void cod_delete(struct cod_manager *cod_mgr_obj)
*/
void cod_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -305,10 +288,6 @@ int cod_get_base_lib(struct cod_manager *cod_mgr_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(plib != NULL);
-
*plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib;
return status;
@@ -322,10 +301,6 @@ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(sz_name != NULL);
-
if (usize <= COD_MAXPATHLENGTH)
strncpy(sz_name, cod_mgr_obj->sz_zl_file, usize);
else
@@ -342,10 +317,6 @@ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name,
*/
int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(entry_pt != NULL);
-
*entry_pt = cod_mgr_obj->entry;
return 0;
@@ -361,10 +332,6 @@ int cod_get_loader(struct cod_manager *cod_mgr_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(loader != NULL);
-
*loader = (struct dbll_tar_obj *)cod_mgr_obj->target;
return status;
@@ -382,13 +349,6 @@ int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
struct cod_manager *cod_mgr_obj;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(lib != NULL);
- DBC_REQUIRE(lib->cod_mgr);
- DBC_REQUIRE(str_sect != NULL);
- DBC_REQUIRE(addr != NULL);
- DBC_REQUIRE(len != NULL);
-
*addr = 0;
*len = 0;
if (lib != NULL) {
@@ -399,8 +359,6 @@ int cod_get_section(struct cod_libraryobj *lib, char *str_sect,
status = -ESPIPE;
}
- DBC_ENSURE(!status || ((*addr == 0) && (*len == 0)));
-
return status;
}
@@ -417,11 +375,6 @@ int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym,
{
struct dbll_sym_val *dbll_sym;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(str_sym != NULL);
- DBC_REQUIRE(pul_value != NULL);
-
dev_dbg(bridge, "%s: cod_mgr_obj: %p str_sym: %s pul_value: %p\n",
__func__, cod_mgr_obj, str_sym, pul_value);
if (cod_mgr_obj->base_lib) {
@@ -451,12 +404,9 @@ bool cod_init(void)
{
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
if (ret)
refs++;
- DBC_ENSURE((ret && refs > 0) || (!ret && refs >= 0));
return ret;
}
@@ -482,14 +432,6 @@ int cod_load_base(struct cod_manager *cod_mgr_obj, u32 num_argc, char *args[],
int status;
u32 i;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr_obj);
- DBC_REQUIRE(num_argc > 0);
- DBC_REQUIRE(args != NULL);
- DBC_REQUIRE(args[0] != NULL);
- DBC_REQUIRE(pfn_write != NULL);
- DBC_REQUIRE(cod_mgr_obj->base_lib != NULL);
-
/*
* Make sure every argv[] stated in argc has a value, or change argc to
* reflect true number in NULL terminated argv array.
@@ -538,12 +480,6 @@ int cod_open(struct cod_manager *hmgr, char *sz_coff_path,
int status = 0;
struct cod_libraryobj *lib = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hmgr);
- DBC_REQUIRE(sz_coff_path != NULL);
- DBC_REQUIRE(flags == COD_NOLOAD || flags == COD_SYMB);
- DBC_REQUIRE(lib_obj != NULL);
-
*lib_obj = NULL;
lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL);
@@ -575,10 +511,6 @@ int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path,
int status = 0;
struct dbll_library_obj *lib;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hmgr);
- DBC_REQUIRE(sz_coff_path != NULL);
-
/* if we previously opened a base image, close it now */
if (hmgr->base_lib) {
if (hmgr->loaded) {
@@ -612,12 +544,6 @@ int cod_read_section(struct cod_libraryobj *lib, char *str_sect,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(lib != NULL);
- DBC_REQUIRE(lib->cod_mgr);
- DBC_REQUIRE(str_sect != NULL);
- DBC_REQUIRE(str_content != NULL);
-
if (lib != NULL)
status =
lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, str_sect,
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index 31da62b14bc9..071ee86dd028 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -21,8 +21,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
#include <dspbridge/gh.h>
/* ----------------------------------- OS Adaptation Layer */
@@ -202,9 +200,6 @@ void dbll_close(struct dbll_library_obj *zl_lib)
{
struct dbll_tar_obj *zl_target;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(zl_lib->open_ref > 0);
zl_target = zl_lib->target_obj;
zl_lib->open_ref--;
if (zl_lib->open_ref == 0) {
@@ -241,10 +236,6 @@ int dbll_create(struct dbll_tar_obj **target_obj,
struct dbll_tar_obj *pzl_target;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pattrs != NULL);
- DBC_REQUIRE(target_obj != NULL);
-
/* Allocate DBL target object */
pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL);
if (target_obj != NULL) {
@@ -255,8 +246,6 @@ int dbll_create(struct dbll_tar_obj **target_obj,
pzl_target->attrs = *pattrs;
*target_obj = (struct dbll_tar_obj *)pzl_target;
}
- DBC_ENSURE((!status && *target_obj) ||
- (status && *target_obj == NULL));
}
return status;
@@ -269,9 +258,6 @@ void dbll_delete(struct dbll_tar_obj *target)
{
struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_target);
-
kfree(zl_target);
}
@@ -282,14 +268,10 @@ void dbll_delete(struct dbll_tar_obj *target)
*/
void dbll_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
if (refs == 0)
gh_exit();
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -302,12 +284,6 @@ bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
struct dbll_symbol *sym;
bool status = false;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(name != NULL);
- DBC_REQUIRE(sym_val != NULL);
- DBC_REQUIRE(zl_lib->sym_tab != NULL);
-
sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name);
if (sym != NULL) {
*sym_val = &sym->value;
@@ -327,10 +303,6 @@ void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
{
struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_target);
- DBC_REQUIRE(pattrs != NULL);
-
if ((pattrs != NULL) && (zl_target != NULL))
*pattrs = zl_target->attrs;
@@ -347,12 +319,6 @@ bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
char cname[MAXEXPR + 1];
bool status = false;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(sym_val != NULL);
- DBC_REQUIRE(zl_lib->sym_tab != NULL);
- DBC_REQUIRE(name != NULL);
-
cname[0] = '_';
strncpy(cname + 1, name, sizeof(cname) - 2);
@@ -382,12 +348,6 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(name != NULL);
- DBC_REQUIRE(paddr != NULL);
- DBC_REQUIRE(psize != NULL);
- DBC_REQUIRE(zl_lib);
-
/* If DOFF file is not open, we open it. */
if (zl_lib != NULL) {
if (zl_lib->fp == NULL) {
@@ -434,8 +394,6 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
*/
bool dbll_init(void)
{
- DBC_REQUIRE(refs >= 0);
-
if (refs == 0)
gh_init();
@@ -456,10 +414,6 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
s32 err;
int status = 0;
bool opened_doff = false;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(entry != NULL);
- DBC_REQUIRE(attrs != NULL);
/*
* Load if not already loaded.
@@ -558,8 +512,6 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
if (opened_doff)
dof_close(zl_lib);
- DBC_ENSURE(status || zl_lib->load_ref > 0);
-
dev_dbg(bridge, "%s: lib: %p flags: 0x%x entry: %p, status 0x%x\n",
__func__, lib, flags, entry, status);
@@ -577,12 +529,6 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
s32 err;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_target);
- DBC_REQUIRE(zl_target->attrs.fopen != NULL);
- DBC_REQUIRE(file != NULL);
- DBC_REQUIRE(lib_obj != NULL);
-
zl_lib = zl_target->head;
while (zl_lib != NULL) {
if (strcmp(zl_lib->file_name, file) == 0) {
@@ -699,8 +645,6 @@ func_cont:
dbll_close((struct dbll_library_obj *)zl_lib);
}
- DBC_ENSURE((!status && (zl_lib->open_ref > 0) && *lib_obj)
- || (status && *lib_obj == NULL));
dev_dbg(bridge, "%s: target: %p file: %s lib_obj: %p, status 0x%x\n",
__func__, target, file, lib_obj, status);
@@ -722,12 +666,6 @@ int dbll_read_sect(struct dbll_library_obj *lib, char *name,
const struct ldr_section_info *sect = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(name != NULL);
- DBC_REQUIRE(buf != NULL);
- DBC_REQUIRE(size != 0);
-
/* If DOFF file is not open, we open it. */
if (zl_lib != NULL) {
if (zl_lib->fp == NULL) {
@@ -788,14 +726,11 @@ void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
s32 err = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(zl_lib);
- DBC_REQUIRE(zl_lib->load_ref > 0);
dev_dbg(bridge, "%s: lib: %p\n", __func__, lib);
zl_lib->load_ref--;
/* Unload only if reference count is 0 */
if (zl_lib->load_ref != 0)
- goto func_end;
+ return;
zl_lib->target_obj->attrs = *attrs;
if (zl_lib->dload_mod_obj) {
@@ -814,8 +749,6 @@ void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
/* delete DOFF desc since it holds *lots* of host OS
* resources */
dof_close(zl_lib);
-func_end:
- DBC_ENSURE(zl_lib->load_ref >= 0);
}
/*
@@ -874,8 +807,6 @@ static u16 name_hash(void *key, u16 max_bucket)
u16 hash;
char *name = (char *)key;
- DBC_REQUIRE(name != NULL);
-
hash = 0;
while (*name) {
@@ -893,9 +824,6 @@ static u16 name_hash(void *key, u16 max_bucket)
*/
static bool name_match(void *key, void *sp)
{
- DBC_REQUIRE(key != NULL);
- DBC_REQUIRE(sp != NULL);
-
if ((key != NULL) && (sp != NULL)) {
if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) ==
0)
@@ -938,10 +866,7 @@ static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
struct dbll_library_obj *lib;
int bytes_read = 0;
- DBC_REQUIRE(this != NULL);
lib = pstream->lib;
- DBC_REQUIRE(lib);
-
if (lib != NULL) {
bytes_read =
(*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize,
@@ -960,10 +885,7 @@ static int dbll_set_file_posn(struct dynamic_loader_stream *this,
struct dbll_library_obj *lib;
int status = 0; /* Success */
- DBC_REQUIRE(this != NULL);
lib = pstream->lib;
- DBC_REQUIRE(lib);
-
if (lib != NULL) {
status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos,
SEEK_SET);
@@ -986,10 +908,7 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
struct dbll_sym_val *dbll_sym = NULL;
bool status = false; /* Symbol not found yet */
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
-
if (lib != NULL) {
if (lib->target_obj->attrs.sym_lookup) {
/* Check current lib + base lib + dep lib +
@@ -1015,9 +934,6 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
if (!status && gbl_search)
dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name);
- DBC_ASSERT((status && (dbll_sym != NULL))
- || (!status && (dbll_sym == NULL)));
-
ret_sym = (struct dynload_symbol *)dbll_sym;
return ret_sym;
}
@@ -1034,11 +950,7 @@ static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
struct dbll_library_obj *lib;
struct dbll_symbol *sym;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
- DBC_REQUIRE(lib->sym_tab != NULL);
-
sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name);
ret_sym = (struct dynload_symbol *)&sym->value;
@@ -1059,10 +971,7 @@ static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
struct dbll_library_obj *lib;
struct dynload_symbol *ret;
- DBC_REQUIRE(this != NULL);
- DBC_REQUIRE(name);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
/* Check to see if symbol is already defined in symbol table */
if (!(lib->target_obj->attrs.base_image)) {
@@ -1111,10 +1020,7 @@ static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
struct dbll_library_obj *lib;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
-
/* May not need to do anything */
}
@@ -1127,9 +1033,7 @@ static void *allocate(struct dynamic_loader_sym *this, unsigned memsize)
struct dbll_library_obj *lib;
void *buf;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
buf = kzalloc(memsize, GFP_KERNEL);
@@ -1144,9 +1048,7 @@ static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr)
struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
struct dbll_library_obj *lib;
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
kfree(mem_ptr);
}
@@ -1161,9 +1063,7 @@ static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
struct dbll_library_obj *lib;
char temp_buf[MAXEXPR];
- DBC_REQUIRE(this != NULL);
lib = ldr_sym->lib;
- DBC_REQUIRE(lib);
vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args);
dev_dbg(bridge, "%s\n", temp_buf);
}
@@ -1195,9 +1095,7 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
u32 alloc_size = 0;
u32 run_addr_flag = 0;
- DBC_REQUIRE(this != NULL);
lib = dbll_alloc_obj->lib;
- DBC_REQUIRE(lib);
mem_sect_type =
(stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
@@ -1206,7 +1104,6 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
/* Attempt to extract the segment ID and requirement information from
the name of the section */
- DBC_REQUIRE(info->name);
token_len = strlen((char *)(info->name)) + 1;
sz_sect_name = kzalloc(token_len, GFP_KERNEL);
@@ -1307,9 +1204,7 @@ static void rmm_dealloc(struct dynamic_loader_allocate *this,
(stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
DLOAD_BSS) ? DBLL_BSS :
DBLL_DATA;
- DBC_REQUIRE(this != NULL);
lib = dbll_alloc_obj->lib;
- DBC_REQUIRE(lib);
/* segid was set by alloc function */
segid = (u32) info->context;
if (mem_sect_type == DBLL_CODE)
@@ -1347,9 +1242,7 @@ static int read_mem(struct dynamic_loader_initialize *this, void *buf,
struct dbll_library_obj *lib;
int bytes_read = 0;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
- DBC_REQUIRE(lib);
/* Need bridge_brd_read function */
return bytes_read;
}
@@ -1368,7 +1261,6 @@ static int write_mem(struct dynamic_loader_initialize *this, void *buf,
u32 mem_sect_type;
bool ret = true;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
if (!lib)
return false;
@@ -1415,7 +1307,6 @@ static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
struct dbll_library_obj *lib;
struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
pbuf = NULL;
/* Pass the NULL pointer to write_mem to get the start address of Shared
@@ -1439,9 +1330,7 @@ static int execute(struct dynamic_loader_initialize *this, ldr_addr start)
struct dbll_library_obj *lib;
bool ret = true;
- DBC_REQUIRE(this != NULL);
lib = init_obj->lib;
- DBC_REQUIRE(lib);
/* Save entry point */
if (lib != NULL)
lib->entry = (u32) start;
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 522810bc7427..883765e3c851 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/cod.h>
#include <dspbridge/drv.h>
@@ -106,11 +103,8 @@ u32 dev_brd_write_fxn(void *arb, u32 dsp_add, void *host_buf,
u32 ul_written = 0;
int status;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */
if (dev_obj) {
/* Require of BrdWrite() */
- DBC_ASSERT(dev_obj->bridge_context != NULL);
status = (*dev_obj->bridge_interface.brd_write) (
dev_obj->bridge_context, host_buf,
dsp_add, ul_num_bytes, mem_space);
@@ -143,9 +137,6 @@ int dev_create_device(struct dev_object **device_obj,
struct drv_object *hdrv_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(device_obj != NULL);
- DBC_REQUIRE(driver_file_name != NULL);
status = drv_request_bridge_res_dsp((void *)&host_res);
@@ -169,7 +160,6 @@ int dev_create_device(struct dev_object **device_obj,
/* Create the device object, and pass a handle to the Bridge driver for
* storage. */
if (!status) {
- DBC_ASSERT(drv_fxns);
dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL);
if (dev_obj) {
/* Fill out the rest of the Dev Object structure: */
@@ -191,9 +181,6 @@ int dev_create_device(struct dev_object **device_obj,
status = (dev_obj->bridge_interface.dev_create)
(&dev_obj->bridge_context, dev_obj,
host_res);
- /* Assert bridge_dev_create()'s ensure clause: */
- DBC_ASSERT(status
- || (dev_obj->bridge_context != NULL));
} else {
status = -ENOMEM;
}
@@ -271,7 +258,6 @@ leave:
*device_obj = NULL;
}
- DBC_ENSURE((!status && *device_obj) || (status && !*device_obj));
return status;
}
@@ -287,17 +273,11 @@ int dev_create2(struct dev_object *hdev_obj)
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj);
-
/* There can be only one Node Manager per DEV object */
- DBC_ASSERT(!dev_obj->node_mgr);
status = node_create_mgr(&dev_obj->node_mgr, hdev_obj);
if (status)
dev_obj->node_mgr = NULL;
- DBC_ENSURE((!status && dev_obj->node_mgr != NULL)
- || (status && dev_obj->node_mgr == NULL));
return status;
}
@@ -311,9 +291,6 @@ int dev_destroy2(struct dev_object *hdev_obj)
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj);
-
if (dev_obj->node_mgr) {
if (node_delete_mgr(dev_obj->node_mgr))
status = -EPERM;
@@ -322,7 +299,6 @@ int dev_destroy2(struct dev_object *hdev_obj)
}
- DBC_ENSURE((!status && dev_obj->node_mgr == NULL) || status);
return status;
}
@@ -337,8 +313,6 @@ int dev_destroy_device(struct dev_object *hdev_obj)
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
-
if (hdev_obj) {
if (dev_obj->cod_mgr) {
cod_delete(dev_obj->cod_mgr);
@@ -415,9 +389,6 @@ int dev_get_chnl_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
if (hdev_obj) {
*mgr = dev_obj->chnl_mgr;
} else {
@@ -425,7 +396,6 @@ int dev_get_chnl_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
@@ -441,9 +411,6 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
if (hdev_obj) {
*mgr = dev_obj->cmm_mgr;
} else {
@@ -451,7 +418,6 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
@@ -467,9 +433,6 @@ int dev_get_dmm_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(mgr != NULL);
-
if (hdev_obj) {
*mgr = dev_obj->dmm_mgr;
} else {
@@ -477,7 +440,6 @@ int dev_get_dmm_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
@@ -492,9 +454,6 @@ int dev_get_cod_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(cod_mgr != NULL);
-
if (hdev_obj) {
*cod_mgr = dev_obj->cod_mgr;
} else {
@@ -502,7 +461,6 @@ int dev_get_cod_mgr(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (cod_mgr != NULL && *cod_mgr == NULL));
return status;
}
@@ -514,9 +472,6 @@ int dev_get_deh_mgr(struct dev_object *hdev_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(deh_manager != NULL);
- DBC_REQUIRE(hdev_obj);
if (hdev_obj) {
*deh_manager = hdev_obj->deh_mgr;
} else {
@@ -537,9 +492,6 @@ int dev_get_dev_node(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dev_nde != NULL);
-
if (hdev_obj) {
*dev_nde = dev_obj->dev_node_obj;
} else {
@@ -547,7 +499,6 @@ int dev_get_dev_node(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (dev_nde != NULL && *dev_nde == NULL));
return status;
}
@@ -578,9 +529,6 @@ int dev_get_intf_fxns(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(if_fxns != NULL);
-
if (hdev_obj) {
*if_fxns = &dev_obj->bridge_interface;
} else {
@@ -588,7 +536,6 @@ int dev_get_intf_fxns(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || ((if_fxns != NULL) && (*if_fxns == NULL)));
return status;
}
@@ -600,10 +547,6 @@ int dev_get_io_mgr(struct dev_object *hdev_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(io_man != NULL);
- DBC_REQUIRE(hdev_obj);
-
if (hdev_obj) {
*io_man = hdev_obj->iomgr;
} else {
@@ -638,10 +581,6 @@ struct dev_object *dev_get_next(struct dev_object *hdev_obj)
*/
void dev_get_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr **msg_man)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(msg_man != NULL);
- DBC_REQUIRE(hdev_obj);
-
*msg_man = hdev_obj->msg_mgr;
}
@@ -656,9 +595,6 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_man != NULL);
-
if (hdev_obj) {
*node_man = dev_obj->node_mgr;
} else {
@@ -666,7 +602,6 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || (node_man != NULL && *node_man == NULL));
return status;
}
@@ -679,9 +614,6 @@ int dev_get_symbol(struct dev_object *hdev_obj,
int status = 0;
struct cod_manager *cod_mgr;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(str_sym != NULL && pul_value != NULL);
-
if (hdev_obj) {
status = dev_get_cod_mgr(hdev_obj, &cod_mgr);
if (cod_mgr)
@@ -706,9 +638,6 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(phbridge_context != NULL);
-
if (hdev_obj) {
*phbridge_context = dev_obj->bridge_context;
} else {
@@ -716,8 +645,6 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
status = -EFAULT;
}
- DBC_ENSURE(!status || ((phbridge_context != NULL) &&
- (*phbridge_context == NULL)));
return status;
}
@@ -729,16 +656,12 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
*/
void dev_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
if (refs == 0) {
cmm_exit();
dmm_exit();
}
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -750,8 +673,6 @@ bool dev_init(void)
{
bool cmm_ret, dmm_ret, ret = true;
- DBC_REQUIRE(refs >= 0);
-
if (refs == 0) {
cmm_ret = cmm_init();
dmm_ret = dmm_init();
@@ -771,8 +692,6 @@ bool dev_init(void)
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
@@ -841,14 +760,11 @@ int dev_set_chnl_mgr(struct dev_object *hdev_obj,
int status = 0;
struct dev_object *dev_obj = hdev_obj;
- DBC_REQUIRE(refs > 0);
-
if (hdev_obj)
dev_obj->chnl_mgr = hmgr;
else
status = -EFAULT;
- DBC_ENSURE(status || (dev_obj->chnl_mgr == hmgr));
return status;
}
@@ -859,9 +775,6 @@ int dev_set_chnl_mgr(struct dev_object *hdev_obj,
*/
void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj);
-
hdev_obj->msg_mgr = hmgr;
}
@@ -879,8 +792,6 @@ int dev_start_device(struct cfg_devnode *dev_node_obj)
struct mgr_object *hmgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(refs > 0);
-
/* Given all resources, create a device object. */
status = dev_create_device(&hdev_obj, bridge_file_name,
dev_node_obj);
@@ -944,9 +855,6 @@ static int init_cod_mgr(struct dev_object *dev_obj)
int status = 0;
char *sz_dummy_file = "dummy";
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL));
-
status = cod_create(&dev_obj->cod_mgr, sz_dummy_file);
return status;
@@ -976,10 +884,6 @@ int dev_insert_proc_object(struct dev_object *hdev_obj,
{
struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dev_obj);
- DBC_REQUIRE(proc_obj != 0);
- DBC_REQUIRE(already_attached != NULL);
if (!list_empty(&dev_obj->proc_list))
*already_attached = true;
@@ -1017,10 +921,6 @@ int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj)
struct list_head *cur_elem;
struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
- DBC_REQUIRE(dev_obj);
- DBC_REQUIRE(proc_obj != 0);
- DBC_REQUIRE(!list_empty(&dev_obj->proc_list));
-
/* Search list for dev_obj: */
list_for_each(cur_elem, &dev_obj->proc_list) {
if ((u32) cur_elem == proc_obj) {
@@ -1069,10 +969,6 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
(intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
(cast)fxn_not_implemented))
- DBC_REQUIRE(intf_fxns != NULL);
- DBC_REQUIRE(drv_fxns != NULL);
- DBC_REQUIRE(MAKEVERSION(drv_fxns->brd_api_major_version,
- drv_fxns->brd_api_minor_version) <= BRD_API_VERSION);
bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version,
drv_fxns->brd_api_minor_version);
intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version;
@@ -1119,33 +1015,5 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
STORE_FXN(fxn_msg_setqueueid, msg_set_queue_id);
}
/* Add code for any additional functions in newerBridge versions here */
- /* Ensure postcondition: */
- DBC_ENSURE(intf_fxns->dev_create != NULL);
- DBC_ENSURE(intf_fxns->dev_destroy != NULL);
- DBC_ENSURE(intf_fxns->dev_cntrl != NULL);
- DBC_ENSURE(intf_fxns->brd_monitor != NULL);
- DBC_ENSURE(intf_fxns->brd_start != NULL);
- DBC_ENSURE(intf_fxns->brd_stop != NULL);
- DBC_ENSURE(intf_fxns->brd_status != NULL);
- DBC_ENSURE(intf_fxns->brd_read != NULL);
- DBC_ENSURE(intf_fxns->brd_write != NULL);
- DBC_ENSURE(intf_fxns->chnl_create != NULL);
- DBC_ENSURE(intf_fxns->chnl_destroy != NULL);
- DBC_ENSURE(intf_fxns->chnl_open != NULL);
- DBC_ENSURE(intf_fxns->chnl_close != NULL);
- DBC_ENSURE(intf_fxns->chnl_add_io_req != NULL);
- DBC_ENSURE(intf_fxns->chnl_get_ioc != NULL);
- DBC_ENSURE(intf_fxns->chnl_cancel_io != NULL);
- DBC_ENSURE(intf_fxns->chnl_flush_io != NULL);
- DBC_ENSURE(intf_fxns->chnl_get_info != NULL);
- DBC_ENSURE(intf_fxns->chnl_get_mgr_info != NULL);
- DBC_ENSURE(intf_fxns->chnl_idle != NULL);
- DBC_ENSURE(intf_fxns->chnl_register_notify != NULL);
- DBC_ENSURE(intf_fxns->io_create != NULL);
- DBC_ENSURE(intf_fxns->io_destroy != NULL);
- DBC_ENSURE(intf_fxns->io_on_loaded != NULL);
- DBC_ENSURE(intf_fxns->io_get_proc_load != NULL);
- DBC_ENSURE(intf_fxns->msg_set_queue_id != NULL);
-
#undef STORE_FXN
}
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
index 8685233d7627..83faff885f1d 100644
--- a/drivers/staging/tidspbridge/pmgr/dmm.c
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -28,9 +28,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -123,8 +120,6 @@ int dmm_create(struct dmm_object **dmm_manager,
{
struct dmm_object *dmm_obj = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dmm_manager != NULL);
*dmm_manager = NULL;
/* create, zero, and tag a cmm mgr object */
@@ -149,7 +144,6 @@ int dmm_destroy(struct dmm_object *dmm_mgr)
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
int status = 0;
- DBC_REQUIRE(refs > 0);
if (dmm_mgr) {
status = dmm_delete_tables(dmm_obj);
if (!status)
@@ -169,7 +163,6 @@ int dmm_delete_tables(struct dmm_object *dmm_mgr)
{
int status = 0;
- DBC_REQUIRE(refs > 0);
/* Delete all DMM tables */
if (dmm_mgr)
vfree(virtual_mapping_table);
@@ -186,7 +179,6 @@ int dmm_delete_tables(struct dmm_object *dmm_mgr)
*/
void dmm_exit(void)
{
- DBC_REQUIRE(refs > 0);
refs--;
}
@@ -202,8 +194,6 @@ int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
int status = 0;
struct dev_object *hdev_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dmm_manager != NULL);
if (hprocessor != NULL)
status = proc_get_dev_object(hprocessor, &hdev_obj);
else
@@ -224,13 +214,9 @@ bool dmm_init(void)
{
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
virtual_mapping_table = NULL;
table_size = 0;
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 767ffe270ed6..27889466a6d2 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/ntfy.h>
@@ -266,7 +263,6 @@ err:
*/
void api_exit(void)
{
- DBC_REQUIRE(api_c_refs > 0);
api_c_refs--;
if (api_c_refs == 0) {
@@ -284,7 +280,6 @@ void api_exit(void)
rmm_exit();
drv_exit();
}
- DBC_ENSURE(api_c_refs >= 0);
}
/*
@@ -382,8 +377,6 @@ int api_init_complete2(void)
struct drv_data *drv_datap;
u8 dev_type;
- DBC_REQUIRE(api_c_refs > 0);
-
/* Walk the list of DevObjects, get each devnode, and attempting to
* autostart the board. Note that this requires COF loading, which
* requires KFILE. */
diff --git a/drivers/staging/tidspbridge/pmgr/io.c b/drivers/staging/tidspbridge/pmgr/io.c
index 65245f310f89..a56b085deb4d 100644
--- a/drivers/staging/tidspbridge/pmgr/io.c
+++ b/drivers/staging/tidspbridge/pmgr/io.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
@@ -50,10 +47,6 @@ int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj,
struct io_mgr_ *pio_mgr = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(io_man != NULL);
- DBC_REQUIRE(mgr_attrts != NULL);
-
*io_man = NULL;
/* A memory base of 0 implies no memory base: */
@@ -94,8 +87,6 @@ int io_destroy(struct io_mgr *hio_mgr)
struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr;
int status;
- DBC_REQUIRE(refs > 0);
-
intf_fxns = pio_mgr->intf_fxns;
/* Let Bridge channel module destroy the io_mgr: */
@@ -111,11 +102,7 @@ int io_destroy(struct io_mgr *hio_mgr)
*/
void io_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -127,12 +114,8 @@ bool io_init(void)
{
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
diff --git a/drivers/staging/tidspbridge/pmgr/msg.c b/drivers/staging/tidspbridge/pmgr/msg.c
index a6916039eed6..077c11850c0b 100644
--- a/drivers/staging/tidspbridge/pmgr/msg.c
+++ b/drivers/staging/tidspbridge/pmgr/msg.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Bridge Driver */
#include <dspbridge/dspdefs.h>
@@ -53,11 +50,6 @@ int msg_create(struct msg_mgr **msg_man,
struct msg_mgr *hmsg_mgr;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(msg_man != NULL);
- DBC_REQUIRE(msg_callback != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
*msg_man = NULL;
dev_get_intf_fxns(hdev_obj, &intf_fxns);
@@ -90,8 +82,6 @@ void msg_delete(struct msg_mgr *hmsg_mgr)
struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
struct bridge_drv_interface *intf_fxns;
- DBC_REQUIRE(refs > 0);
-
if (msg_mgr_obj) {
intf_fxns = msg_mgr_obj->intf_fxns;
@@ -108,10 +98,7 @@ void msg_delete(struct msg_mgr *hmsg_mgr)
*/
void msg_exit(void)
{
- DBC_REQUIRE(refs > 0);
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -119,11 +106,7 @@ void msg_exit(void)
*/
bool msg_mod_init(void)
{
- DBC_REQUIRE(refs >= 0);
-
refs++;
- DBC_ENSURE(refs >= 0);
-
return true;
}
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index fda240214cd6..8378b7b2b666 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -29,8 +29,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/cod.h>
@@ -85,8 +83,6 @@ int dcd_auto_register(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (hdcd_mgr)
status = dcd_get_objects(hdcd_mgr, sz_coff_path,
(dcd_registerfxn) dcd_register_object,
@@ -107,8 +103,6 @@ int dcd_auto_unregister(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (hdcd_mgr)
status = dcd_get_objects(hdcd_mgr, sz_coff_path,
(dcd_registerfxn) dcd_register_object,
@@ -131,9 +125,6 @@ int dcd_create_manager(char *sz_zl_dll_name,
struct dcd_manager *dcd_mgr_obj = NULL; /* DCD Manager pointer */
int status = 0;
- DBC_REQUIRE(refs >= 0);
- DBC_REQUIRE(dcd_mgr);
-
status = cod_create(&cod_mgr, sz_zl_dll_name);
if (status)
goto func_end;
@@ -156,9 +147,6 @@ int dcd_create_manager(char *sz_zl_dll_name,
cod_delete(cod_mgr);
}
- DBC_ENSURE((!status) ||
- ((dcd_mgr_obj == NULL) && (status == -ENOMEM)));
-
func_end:
return status;
}
@@ -173,8 +161,6 @@ int dcd_destroy_manager(struct dcd_manager *hdcd_mgr)
struct dcd_manager *dcd_mgr_obj = hdcd_mgr;
int status = -EFAULT;
- DBC_REQUIRE(refs >= 0);
-
if (hdcd_mgr) {
/* Delete the COD manager. */
cod_delete(dcd_mgr_obj->cod_mgr);
@@ -205,10 +191,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
struct dcd_key_elem *dcd_key;
int len;
- DBC_REQUIRE(refs >= 0);
- DBC_REQUIRE(index >= 0);
- DBC_REQUIRE(uuid_obj != NULL);
-
if ((index != 0) && (enum_refs == 0)) {
/*
* If an enumeration is being performed on an index greater
@@ -222,7 +204,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
* "_\0" + length of sz_obj_type string + terminating NULL.
*/
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with
* obj_type. */
@@ -294,8 +275,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
}
}
- DBC_ENSURE(uuid_obj || (status == -EPERM));
-
return status;
}
@@ -307,7 +286,6 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
void dcd_exit(void)
{
struct dcd_key_elem *rv, *rv_tmp;
- DBC_REQUIRE(refs > 0);
refs--;
if (refs == 0) {
@@ -319,7 +297,6 @@ void dcd_exit(void)
}
}
- DBC_ENSURE(refs >= 0);
}
/*
@@ -333,12 +310,6 @@ int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdcd_mgr);
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE(dep_lib_uuids != NULL);
- DBC_REQUIRE(prstnt_dep_libs != NULL);
-
status =
get_dep_lib_info(hdcd_mgr, uuid_obj, &num_libs, NULL, dep_lib_uuids,
prstnt_dep_libs, phase);
@@ -356,12 +327,6 @@ int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdcd_mgr);
- DBC_REQUIRE(num_libs != NULL);
- DBC_REQUIRE(num_pers_libs != NULL);
- DBC_REQUIRE(uuid_obj != NULL);
-
status = get_dep_lib_info(hdcd_mgr, uuid_obj, num_libs, num_pers_libs,
NULL, NULL, phase);
@@ -393,10 +358,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
u32 dw_key_len; /* Len of REG key. */
char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(obj_def != NULL);
- DBC_REQUIRE(obj_uuid != NULL);
-
sz_uuid = kzalloc(MAXUUIDLEN, GFP_KERNEL);
if (!sz_uuid) {
status = -ENOMEM;
@@ -411,7 +372,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
/* Pre-determine final key length. It's length of DCD_REGKEY +
* "_\0" + length of sz_obj_type string + terminating NULL */
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -470,7 +430,6 @@ int dcd_get_object_def(struct dcd_manager *hdcd_mgr,
}
/* Ensure sz_uuid + 1 is not greater than sizeof sz_sect_name. */
- DBC_ASSERT((strlen(sz_uuid) + 1) < sizeof(sz_sect_name));
/* Create section name based on node UUID. A period is
* pre-pended to the UUID string to form the section name.
@@ -553,7 +512,6 @@ int dcd_get_objects(struct dcd_manager *hdcd_mgr,
struct dsp_uuid dsp_uuid_obj;
s32 object_type;
- DBC_REQUIRE(refs > 0);
if (!hdcd_mgr) {
status = -EFAULT;
goto func_end;
@@ -663,11 +621,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
int status = 0;
struct dcd_key_elem *dcd_key = NULL;
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE(str_lib_name != NULL);
- DBC_REQUIRE(buff_size != NULL);
- DBC_REQUIRE(hdcd_mgr);
-
dev_dbg(bridge, "%s: hdcd_mgr %p, uuid_obj %p, str_lib_name %p,"
" buff_size %p\n", __func__, hdcd_mgr, uuid_obj, str_lib_name,
buff_size);
@@ -677,7 +630,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
* "_\0" + length of sz_obj_type string + terminating NULL.
*/
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -705,7 +657,6 @@ int dcd_get_library_name(struct dcd_manager *hdcd_mgr,
break;
default:
status = -EINVAL;
- DBC_ASSERT(false);
}
if (!status) {
if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
@@ -790,8 +741,6 @@ bool dcd_init(void)
bool init_cod;
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
if (refs == 0) {
/* Initialize required modules. */
init_cod = cod_init();
@@ -809,8 +758,6 @@ bool dcd_init(void)
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs == 0)));
-
return ret;
}
@@ -832,15 +779,6 @@ int dcd_register_object(struct dsp_uuid *uuid_obj,
char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */
struct dcd_key_elem *dcd_key = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
- (obj_type == DSP_DCDPROCESSORTYPE) ||
- (obj_type == DSP_DCDLIBRARYTYPE) ||
- (obj_type == DSP_DCDCREATELIBTYPE) ||
- (obj_type == DSP_DCDEXECUTELIBTYPE) ||
- (obj_type == DSP_DCDDELETELIBTYPE));
-
dev_dbg(bridge, "%s: object UUID %p, obj_type %d, szPathName %s\n",
__func__, uuid_obj, obj_type, psz_path_name);
@@ -849,7 +787,6 @@ int dcd_register_object(struct dsp_uuid *uuid_obj,
* "_\0" + length of sz_obj_type string + terminating NULL.
*/
dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1;
- DBC_ASSERT(dw_key_len < DCD_MAXPATHLENGTH);
/* Create proper REG key; concatenate DCD_REGKEY with obj_type. */
strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1);
@@ -987,15 +924,6 @@ int dcd_unregister_object(struct dsp_uuid *uuid_obj,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(uuid_obj != NULL);
- DBC_REQUIRE((obj_type == DSP_DCDNODETYPE) ||
- (obj_type == DSP_DCDPROCESSORTYPE) ||
- (obj_type == DSP_DCDLIBRARYTYPE) ||
- (obj_type == DSP_DCDCREATELIBTYPE) ||
- (obj_type == DSP_DCDEXECUTELIBTYPE) ||
- (obj_type == DSP_DCDDELETELIBTYPE));
-
/*
* When dcd_register_object is called with NULL as pathname,
* it indicates an unregister object operation.
@@ -1055,12 +983,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
s32 entry_id;
#endif
- DBC_REQUIRE(psz_buf != NULL);
- DBC_REQUIRE(ul_buf_size != 0);
- DBC_REQUIRE((obj_type == DSP_DCDNODETYPE)
- || (obj_type == DSP_DCDPROCESSORTYPE));
- DBC_REQUIRE(gen_obj != NULL);
-
switch (obj_type) {
case DSP_DCDNODETYPE:
/*
@@ -1082,7 +1004,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* ac_name */
- DBC_REQUIRE(token);
token_len = strlen(token);
if (token_len > DSP_MAXNAMELEN - 1)
token_len = DSP_MAXNAMELEN - 1;
@@ -1167,7 +1088,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* char *str_create_phase_fxn */
- DBC_REQUIRE(token);
token_len = strlen(token);
gen_obj->obj_data.node_obj.str_create_phase_fxn =
kzalloc(token_len + 1, GFP_KERNEL);
@@ -1178,7 +1098,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* char *str_execute_phase_fxn */
- DBC_REQUIRE(token);
token_len = strlen(token);
gen_obj->obj_data.node_obj.str_execute_phase_fxn =
kzalloc(token_len + 1, GFP_KERNEL);
@@ -1189,7 +1108,6 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* char *str_delete_phase_fxn */
- DBC_REQUIRE(token);
token_len = strlen(token);
gen_obj->obj_data.node_obj.str_delete_phase_fxn =
kzalloc(token_len + 1, GFP_KERNEL);
@@ -1421,12 +1339,6 @@ static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
u16 dep_libs = 0;
int status = 0;
- DBC_REQUIRE(refs > 0);
-
- DBC_REQUIRE(hdcd_mgr);
- DBC_REQUIRE(num_libs != NULL);
- DBC_REQUIRE(uuid_obj != NULL);
-
/* Initialize to 0 dependent libraries, if only counting number of
* dependent libraries */
if (!get_uuids) {
diff --git a/drivers/staging/tidspbridge/rmgr/disp.c b/drivers/staging/tidspbridge/rmgr/disp.c
index a9aa22f3b4f6..e510bb25110b 100644
--- a/drivers/staging/tidspbridge/rmgr/disp.c
+++ b/drivers/staging/tidspbridge/rmgr/disp.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -96,11 +93,6 @@ int disp_create(struct disp_object **dispatch_obj,
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dispatch_obj != NULL);
- DBC_REQUIRE(disp_attrs != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
*dispatch_obj = NULL;
/* Allocate Node Dispatcher object */
@@ -168,8 +160,6 @@ func_cont:
else
delete_disp(disp_obj);
- DBC_ENSURE((status && *dispatch_obj == NULL) ||
- (!status && *dispatch_obj));
return status;
}
@@ -179,9 +169,6 @@ func_cont:
*/
void disp_delete(struct disp_object *disp_obj)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
-
delete_disp(disp_obj);
}
@@ -191,11 +178,7 @@ void disp_delete(struct disp_object *disp_obj)
*/
void disp_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -206,12 +189,9 @@ bool disp_init(void)
{
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
return ret;
}
@@ -227,10 +207,6 @@ int disp_node_change_priority(struct disp_object *disp_obj,
struct rms_command *rms_cmd;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
-
/* Send message to RMS to change priority */
rms_cmd = (struct rms_command *)(disp_obj->buf);
rms_cmd->fxn = (rms_word) (rms_fxn);
@@ -276,12 +252,6 @@ int disp_node_create(struct disp_object *disp_obj,
struct dsp_nodeinfo node_info;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
- DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE);
- DBC_REQUIRE(node_env != NULL);
-
status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
if (status)
@@ -292,11 +262,9 @@ int disp_node_create(struct disp_object *disp_obj,
__func__, dev_type);
goto func_end;
}
- DBC_REQUIRE(pargs != NULL);
node_type = node_get_type(hnode);
node_msg_args = pargs->asa.node_msg_args;
max = disp_obj->bufsize_rms; /*Max # of RMS words that can be sent */
- DBC_ASSERT(max == RMS_COMMANDBUFSIZE);
chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size;
/* Number of RMS words needed to hold arg data */
dw_length =
@@ -457,7 +425,6 @@ int disp_node_create(struct disp_object *disp_obj,
}
if (!status) {
ul_bytes = total * sizeof(rms_word);
- DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word)));
status = send_message(disp_obj, node_get_timeout(hnode),
ul_bytes, node_env);
}
@@ -480,10 +447,6 @@ int disp_node_delete(struct disp_object *disp_obj,
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
-
status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
if (!status) {
@@ -521,9 +484,6 @@ int disp_node_run(struct disp_object *disp_obj,
struct rms_command *rms_cmd;
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(disp_obj);
- DBC_REQUIRE(hnode != NULL);
status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
@@ -620,7 +580,6 @@ static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
* 1 from total.
*/
total += sizeof(struct rms_strm_def) / sizeof(rms_word) - 1;
- DBC_REQUIRE(strm_def.sz_device);
dw_length = strlen(strm_def.sz_device) + 1;
/* Number of RMS_WORDS needed to hold device name */
@@ -659,8 +618,6 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
struct chnl_ioc chnl_ioc_obj;
int status = 0;
- DBC_REQUIRE(pdw_arg != NULL);
-
*pdw_arg = (u32) NULL;
intf_fxns = disp_obj->intf_fxns;
chnl_obj = disp_obj->chnl_to_dsp;
@@ -703,7 +660,6 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
status = -EPERM;
} else {
if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
- DBC_ASSERT(chnl_ioc_obj.buf == pbuf);
if (*((int *)chnl_ioc_obj.buf) < 0) {
/* Translate DSP's to kernel error */
status = -EREMOTEIO;
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index db8215f540d8..b34dba739387 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
@@ -172,7 +169,6 @@ void drv_proc_node_update_status(void *node_resource, s32 status)
{
struct node_res_object *node_res_obj =
(struct node_res_object *)node_resource;
- DBC_ASSERT(node_resource != NULL);
node_res_obj->node_allocated = status;
}
@@ -181,7 +177,6 @@ void drv_proc_node_update_heap_status(void *node_resource, s32 status)
{
struct node_res_object *node_res_obj =
(struct node_res_object *)node_resource;
- DBC_ASSERT(node_resource != NULL);
node_res_obj->heap_allocated = status;
}
@@ -308,9 +303,6 @@ int drv_create(struct drv_object **drv_obj)
struct drv_object *pdrv_object = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(drv_obj != NULL);
- DBC_REQUIRE(refs > 0);
-
pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
if (pdrv_object) {
/* Create and Initialize List of device objects */
@@ -336,7 +328,6 @@ int drv_create(struct drv_object **drv_obj)
kfree(pdrv_object);
}
- DBC_ENSURE(status || pdrv_object);
return status;
}
@@ -347,11 +338,7 @@ int drv_create(struct drv_object **drv_obj)
*/
void drv_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -365,9 +352,6 @@ int drv_destroy(struct drv_object *driver_obj)
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pdrv_object);
-
kfree(pdrv_object);
/* Update the DRV Object in the driver data */
if (drv_datap) {
@@ -389,17 +373,8 @@ int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
struct dev_object **device_obj)
{
int status = 0;
-#ifdef CONFIG_TIDSPBRIDGE_DEBUG
- /* used only for Assertions and debug messages */
- struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
-#endif
struct dev_object *dev_obj;
u32 i;
- DBC_REQUIRE(pdrv_obj);
- DBC_REQUIRE(device_obj != NULL);
- DBC_REQUIRE(index >= 0);
- DBC_REQUIRE(refs > 0);
- DBC_ASSERT(!(list_empty(&pdrv_obj->dev_list)));
dev_obj = (struct dev_object *)drv_get_first_dev_object();
for (i = 0; i < index; i++) {
@@ -532,13 +507,9 @@ int drv_init(void)
{
s32 ret = 1; /* function return value */
- DBC_REQUIRE(refs >= 0);
-
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
@@ -552,10 +523,6 @@ int drv_insert_dev_object(struct drv_object *driver_obj,
{
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hdev_obj != NULL);
- DBC_REQUIRE(pdrv_object);
-
list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
return 0;
@@ -574,12 +541,6 @@ int drv_remove_dev_object(struct drv_object *driver_obj,
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
struct list_head *cur_elem;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pdrv_object);
- DBC_REQUIRE(hdev_obj != NULL);
-
- DBC_REQUIRE(!list_empty(&pdrv_object->dev_list));
-
/* Search list for p_proc_object: */
list_for_each(cur_elem, &pdrv_object->dev_list) {
/* If found, remove it. */
@@ -605,9 +566,6 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
struct drv_ext *pszdev_node;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(dw_context != 0);
- DBC_REQUIRE(dev_node_strg != NULL);
-
/*
* Allocate memory to hold the string. This will live until
* it is freed in the Release resources. Update the driver object
@@ -639,10 +597,6 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
*dev_node_strg = 0;
}
- DBC_ENSURE((!status && dev_node_strg != NULL &&
- !list_empty(&pdrv_object->dev_node_string)) ||
- (status && *dev_node_strg == 0));
-
return status;
}
@@ -900,8 +854,6 @@ void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
void mem_free_phys_mem(void *virtual_address, u32 physical_address,
u32 byte_size)
{
- DBC_REQUIRE(virtual_address != NULL);
-
if (!ext_phys_mem_pool_enabled)
dma_free_coherent(NULL, byte_size, virtual_address,
physical_address);
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 385740bad0de..c14fa8ec3fa8 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -16,11 +16,8 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
-/* ----------------------------------- Host OS */
-
#include <plat/dsp.h>
-#include <dspbridge/host_os.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
@@ -33,36 +30,25 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/clk.h>
-#include <dspbridge/sync.h>
/* ----------------------------------- Platform Manager */
-#include <dspbridge/dspapi-ioctl.h>
#include <dspbridge/dspapi.h>
#include <dspbridge/dspdrv.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/pwr.h>
-/* ----------------------------------- This */
-#include <drv_interface.h>
-
#include <dspbridge/resourcecleanup.h>
-#include <dspbridge/chnl.h>
#include <dspbridge/proc.h>
#include <dspbridge/dev.h>
-#include <dspbridge/drv.h>
#ifdef CONFIG_TIDSPBRIDGE_DVFS
#include <mach-omap2/omap3-opp.h>
#endif
/* ----------------------------------- Globals */
-#define DRIVER_NAME "DspBridge"
#define DSPBRIDGE_VERSION "0.3"
s32 dsp_debug;
@@ -131,7 +117,157 @@ MODULE_AUTHOR("Texas Instruments");
MODULE_LICENSE("GPL");
MODULE_VERSION(DSPBRIDGE_VERSION);
-static char *driver_name = DRIVER_NAME;
+/*
+ * This function is called when an application opens handle to the
+ * bridge driver.
+ */
+static int bridge_open(struct inode *ip, struct file *filp)
+{
+ int status = 0;
+ struct process_context *pr_ctxt = NULL;
+
+ /*
+ * Allocate a new process context and insert it into global
+ * process context list.
+ */
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (recover) {
+ if (filp->f_flags & O_NONBLOCK ||
+ wait_for_completion_interruptible(&bridge_open_comp))
+ return -EBUSY;
+ }
+#endif
+ pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
+ if (pr_ctxt) {
+ pr_ctxt->res_state = PROC_RES_ALLOCATED;
+ spin_lock_init(&pr_ctxt->dmm_map_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
+ spin_lock_init(&pr_ctxt->dmm_rsv_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
+
+ pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (pr_ctxt->node_id) {
+ idr_init(pr_ctxt->node_id);
+ } else {
+ status = -ENOMEM;
+ goto err;
+ }
+
+ pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (pr_ctxt->stream_id)
+ idr_init(pr_ctxt->stream_id);
+ else
+ status = -ENOMEM;
+ } else {
+ status = -ENOMEM;
+ }
+err:
+ filp->private_data = pr_ctxt;
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (!status)
+ atomic_inc(&bridge_cref);
+#endif
+ return status;
+}
+
+/*
+ * This function is called when an application closes handle to the bridge
+ * driver.
+ */
+static int bridge_release(struct inode *ip, struct file *filp)
+{
+ int status = 0;
+ struct process_context *pr_ctxt;
+
+ if (!filp->private_data) {
+ status = -EIO;
+ goto err;
+ }
+
+ pr_ctxt = filp->private_data;
+ flush_signals(current);
+ drv_remove_all_resources(pr_ctxt);
+ proc_detach(pr_ctxt);
+ kfree(pr_ctxt);
+
+ filp->private_data = NULL;
+
+err:
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (!atomic_dec_return(&bridge_cref))
+ complete(&bridge_comp);
+#endif
+ return status;
+}
+
+/* This function provides IO interface to the bridge driver. */
+static long bridge_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ int status;
+ u32 retval = 0;
+ union trapped_args buf_in;
+
+#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
+ if (recover) {
+ status = -EIO;
+ goto err;
+ }
+#endif
+#ifdef CONFIG_PM
+ status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
+ if (status != 0)
+ return status;
+#endif
+
+ if (!filp->private_data) {
+ status = -EIO;
+ goto err;
+ }
+
+ status = copy_from_user(&buf_in, (union trapped_args *)args,
+ sizeof(union trapped_args));
+
+ if (!status) {
+ status = api_call_dev_ioctl(code, &buf_in, &retval,
+ filp->private_data);
+
+ if (!status) {
+ status = retval;
+ } else {
+ dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
+ "status 0x%x\n", __func__, code, status);
+ status = -1;
+ }
+
+ }
+
+err:
+ return status;
+}
+
+/* This function maps kernel space memory to user space memory. */
+static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ u32 status;
+
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
+ "flags %lx\n", __func__, filp,
+ vma->vm_start, vma->vm_end, vma->vm_page_prot,
+ vma->vm_flags);
+
+ status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (status != 0)
+ status = -EAGAIN;
+
+ return status;
+}
static const struct file_operations bridge_fops = {
.open = bridge_open,
@@ -211,10 +347,10 @@ void bridge_recover_schedule(void)
#endif
#ifdef CONFIG_TIDSPBRIDGE_DVFS
static int dspbridge_scale_notification(struct notifier_block *op,
- unsigned long val, void *ptr)
+ unsigned long val, void *ptr)
{
struct omap_dsp_platform_data *pdata =
- omap_dspbridge_dev->dev.platform_data;
+ omap_dspbridge_dev->dev.platform_data;
if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp)
pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp());
@@ -319,7 +455,7 @@ err2:
err1:
#ifdef CONFIG_TIDSPBRIDGE_DVFS
cpufreq_unregister_notifier(&iva_clk_notifier,
- CPUFREQ_TRANSITION_NOTIFIER);
+ CPUFREQ_TRANSITION_NOTIFIER);
#endif
dsp_clk_exit();
@@ -345,7 +481,7 @@ static int __devinit omap34_xx_bridge_probe(struct platform_device *pdev)
goto err1;
/* use 2.6 device model */
- err = alloc_chrdev_region(&dev, 0, 1, driver_name);
+ err = alloc_chrdev_region(&dev, 0, 1, "DspBridge");
if (err) {
pr_err("%s: Can't get major %d\n", __func__, driver_major);
goto err1;
@@ -385,7 +521,6 @@ err1:
static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
{
dev_t devno;
- bool ret;
int status = 0;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
@@ -398,16 +533,15 @@ static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
#ifdef CONFIG_TIDSPBRIDGE_DVFS
if (cpufreq_unregister_notifier(&iva_clk_notifier,
- CPUFREQ_TRANSITION_NOTIFIER))
+ CPUFREQ_TRANSITION_NOTIFIER))
pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n",
__func__);
#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
if (driver_context) {
/* Put the DSP in reset state */
- ret = dsp_deinit(driver_context);
+ dsp_deinit(driver_context);
driver_context = 0;
- DBC_ASSERT(ret == true);
}
kfree(drv_datap);
@@ -431,7 +565,7 @@ func_cont:
}
#ifdef CONFIG_PM
-static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
+static int bridge_suspend(struct platform_device *pdev, pm_message_t state)
{
u32 status;
u32 command = PWR_EMERGENCYDEEPSLEEP;
@@ -444,7 +578,7 @@ static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int BRIDGE_RESUME(struct platform_device *pdev)
+static int bridge_resume(struct platform_device *pdev)
{
u32 status;
@@ -456,9 +590,6 @@ static int BRIDGE_RESUME(struct platform_device *pdev)
wake_up(&bridge_suspend_data.suspend_wq);
return 0;
}
-#else
-#define BRIDGE_SUSPEND NULL
-#define BRIDGE_RESUME NULL
#endif
static struct platform_driver bridge_driver = {
@@ -467,8 +598,10 @@ static struct platform_driver bridge_driver = {
},
.probe = omap34_xx_bridge_probe,
.remove = __devexit_p(omap34_xx_bridge_remove),
- .suspend = BRIDGE_SUSPEND,
- .resume = BRIDGE_RESUME,
+#ifdef CONFIG_PM
+ .suspend = bridge_suspend,
+ .resume = bridge_resume,
+#endif
};
static int __init bridge_init(void)
@@ -481,170 +614,6 @@ static void __exit bridge_exit(void)
platform_driver_unregister(&bridge_driver);
}
-/*
- * This function is called when an application opens handle to the
- * bridge driver.
- */
-static int bridge_open(struct inode *ip, struct file *filp)
-{
- int status = 0;
- struct process_context *pr_ctxt = NULL;
-
- /*
- * Allocate a new process context and insert it into global
- * process context list.
- */
-
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (recover) {
- if (filp->f_flags & O_NONBLOCK ||
- wait_for_completion_interruptible(&bridge_open_comp))
- return -EBUSY;
- }
-#endif
- pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
- if (!pr_ctxt)
- return -ENOMEM;
-
- pr_ctxt->res_state = PROC_RES_ALLOCATED;
- spin_lock_init(&pr_ctxt->dmm_map_lock);
- INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
- spin_lock_init(&pr_ctxt->dmm_rsv_lock);
- INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
-
- pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
- if (!pr_ctxt->node_id) {
- status = -ENOMEM;
- goto err1;
- }
-
- idr_init(pr_ctxt->node_id);
-
- pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
- if (!pr_ctxt->stream_id) {
- status = -ENOMEM;
- goto err2;
- }
-
- idr_init(pr_ctxt->stream_id);
-
- filp->private_data = pr_ctxt;
-
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- atomic_inc(&bridge_cref);
-#endif
- return 0;
-
-err2:
- kfree(pr_ctxt->node_id);
-err1:
- kfree(pr_ctxt);
- return status;
-}
-
-/*
- * This function is called when an application closes handle to the bridge
- * driver.
- */
-static int bridge_release(struct inode *ip, struct file *filp)
-{
- int status = 0;
- struct process_context *pr_ctxt;
-
- if (!filp->private_data) {
- status = -EIO;
- goto err;
- }
-
- pr_ctxt = filp->private_data;
- flush_signals(current);
- drv_remove_all_resources(pr_ctxt);
- proc_detach(pr_ctxt);
- kfree(pr_ctxt->node_id);
- kfree(pr_ctxt->stream_id);
- kfree(pr_ctxt);
-
- filp->private_data = NULL;
-
-err:
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (!atomic_dec_return(&bridge_cref))
- complete(&bridge_comp);
-#endif
- return status;
-}
-
-/* This function provides IO interface to the bridge driver. */
-static long bridge_ioctl(struct file *filp, unsigned int code,
- unsigned long args)
-{
- int status;
- u32 retval = 0;
- union trapped_args buf_in;
-
- DBC_REQUIRE(filp != NULL);
-#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (recover) {
- status = -EIO;
- goto err;
- }
-#endif
-#ifdef CONFIG_PM
- status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp);
- if (status != 0)
- return status;
-#endif
-
- if (!filp->private_data) {
- status = -EIO;
- goto err;
- }
-
- status = copy_from_user(&buf_in, (union trapped_args *)args,
- sizeof(union trapped_args));
-
- if (!status) {
- status = api_call_dev_ioctl(code, &buf_in, &retval,
- filp->private_data);
-
- if (!status) {
- status = retval;
- } else {
- dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
- "status 0x%x\n", __func__, code, status);
- status = -1;
- }
-
- }
-
-err:
- return status;
-}
-
-/* This function maps kernel space memory to user space memory. */
-static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- u32 offset = vma->vm_pgoff << PAGE_SHIFT;
- u32 status;
-
- DBC_ASSERT(vma->vm_start < vma->vm_end);
-
- vma->vm_flags |= VM_RESERVED | VM_IO;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- dev_dbg(bridge, "%s: vm filp %p offset %x start %lx end %lx page_prot "
- "%lx flags %lx\n", __func__, filp, offset,
- vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags);
-
- status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- if (status != 0)
- status = -EAGAIN;
-
- return status;
-}
-
/* To remove all process resources before removing the process from the
* process context list */
int drv_remove_all_resources(void *process_ctxt)
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.h b/drivers/staging/tidspbridge/rmgr/drv_interface.h
deleted file mode 100644
index ab070602adc2..000000000000
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * drv_interface.h
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * Copyright (C) 2005-2006 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef _DRV_INTERFACE_H_
-#define _DRV_INTERFACE_H_
-
-/* Prototypes for all functions in this bridge */
-static int __init bridge_init(void); /* Initialize bridge */
-static void __exit bridge_exit(void); /* Opposite of initialize */
-static int bridge_open(struct inode *ip, struct file *filp); /* Open */
-static int bridge_release(struct inode *ip, struct file *filp); /* Release */
-static long bridge_ioctl(struct file *filp, unsigned int code,
- unsigned long args);
-static int bridge_mmap(struct file *filp, struct vm_area_struct *vma);
-#endif /* ifndef _DRV_INTERFACE_H_ */
diff --git a/drivers/staging/tidspbridge/rmgr/dspdrv.c b/drivers/staging/tidspbridge/rmgr/dspdrv.c
index 7a6fc737872c..dc767b183cdf 100644
--- a/drivers/staging/tidspbridge/rmgr/dspdrv.c
+++ b/drivers/staging/tidspbridge/rmgr/dspdrv.c
@@ -23,9 +23,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- Platform Manager */
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
@@ -102,8 +99,6 @@ func_cont:
} else {
dev_dbg(bridge, "%s: Failed\n", __func__);
} /* End api_init_complete2 */
- DBC_ENSURE((!status && drv_obj != NULL) ||
- (status && drv_obj == NULL));
*init_status = status;
/* Return the Driver Object */
return (u32) drv_obj;
diff --git a/drivers/staging/tidspbridge/rmgr/mgr.c b/drivers/staging/tidspbridge/rmgr/mgr.c
index d635c01c015e..938eea5aaaf9 100644
--- a/drivers/staging/tidspbridge/rmgr/mgr.c
+++ b/drivers/staging/tidspbridge/rmgr/mgr.c
@@ -26,9 +26,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -62,9 +59,6 @@ int mgr_create(struct mgr_object **mgr_obj,
struct mgr_object *pmgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(mgr_obj != NULL);
- DBC_REQUIRE(refs > 0);
-
pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
if (pmgr_obj) {
status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->dcd_mgr);
@@ -92,7 +86,6 @@ int mgr_create(struct mgr_object **mgr_obj,
status = -ENOMEM;
}
- DBC_ENSURE(status || pmgr_obj);
return status;
}
@@ -106,9 +99,6 @@ int mgr_destroy(struct mgr_object *hmgr_obj)
struct mgr_object *pmgr_obj = (struct mgr_object *)hmgr_obj;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hmgr_obj);
-
/* Free resources */
if (hmgr_obj->dcd_mgr)
dcd_destroy_manager(hmgr_obj->dcd_mgr);
@@ -140,11 +130,6 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
struct mgr_object *pmgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
- DBC_REQUIRE(pndb_props != NULL);
- DBC_REQUIRE(pu_num_nodes != NULL);
- DBC_REQUIRE(undb_props_size >= sizeof(struct dsp_ndbprops));
- DBC_REQUIRE(refs > 0);
-
*pu_num_nodes = 0;
/* Get the Manager Object from the driver data */
if (!drv_datap || !drv_datap->mgr_object) {
@@ -153,7 +138,6 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
}
pmgr_obj = drv_datap->mgr_object;
- DBC_ASSERT(pmgr_obj);
/* Forever loop till we hit failed or no more items in the
* Enumeration. We will exit the loop other than 0; */
while (!status) {
@@ -205,11 +189,6 @@ int mgr_enum_processor_info(u32 processor_id,
struct drv_data *drv_datap = dev_get_drvdata(bridge);
bool proc_detect = false;
- DBC_REQUIRE(processor_info != NULL);
- DBC_REQUIRE(pu_num_procs != NULL);
- DBC_REQUIRE(processor_info_size >= sizeof(struct dsp_processorinfo));
- DBC_REQUIRE(refs > 0);
-
*pu_num_procs = 0;
/* Retrieve the Object handle from the driver data */
@@ -242,7 +221,6 @@ int mgr_enum_processor_info(u32 processor_id,
dev_dbg(bridge, "%s: Failed to get MGR Object\n", __func__);
goto func_end;
}
- DBC_ASSERT(pmgr_obj);
/* Forever loop till we hit no more items in the
* Enumeration. We will exit the loop other than 0; */
while (status1 == 0) {
@@ -310,12 +288,9 @@ func_end:
*/
void mgr_exit(void)
{
- DBC_REQUIRE(refs > 0);
refs--;
if (refs == 0)
dcd_exit();
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -328,16 +303,11 @@ int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
int status = -EPERM;
struct mgr_object *pmgr_obj = (struct mgr_object *)mgr_handle;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dcd_handle != NULL);
-
*dcd_handle = (u32) NULL;
if (pmgr_obj) {
*dcd_handle = (u32) pmgr_obj->dcd_mgr;
status = 0;
}
- DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
- (status && *dcd_handle == (u32) NULL));
return status;
}
@@ -351,8 +321,6 @@ bool mgr_init(void)
bool ret = true;
bool init_dcd = false;
- DBC_REQUIRE(refs >= 0);
-
if (refs == 0) {
init_dcd = dcd_init(); /* DCD Module */
@@ -363,8 +331,6 @@ bool mgr_init(void)
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
@@ -380,8 +346,6 @@ int mgr_wait_for_bridge_events(struct dsp_notification **anotifications,
struct sync_object *sync_events[MAX_EVENTS];
u32 i;
- DBC_REQUIRE(count < MAX_EVENTS);
-
for (i = 0; i < count; i++)
sync_events[i] = anotifications[i]->handle;
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index 0e70cba15ebc..5cff46f767b8 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -22,8 +22,6 @@
#include <dspbridge/dbdefs.h>
-#include <dspbridge/dbc.h>
-
/* Platform manager */
#include <dspbridge/cod.h>
#include <dspbridge/dev.h>
@@ -313,11 +311,6 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
struct nldr_nodeobject *nldr_node_obj = NULL;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_props != NULL);
- DBC_REQUIRE(nldr_nodeobj != NULL);
- DBC_REQUIRE(nldr_obj);
-
/* Initialize handle in case of failure */
*nldr_nodeobj = NULL;
/* Allocate node object */
@@ -398,8 +391,6 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
if (status && nldr_node_obj)
kfree(nldr_node_obj);
- DBC_ENSURE((!status && *nldr_nodeobj)
- || (status && *nldr_nodeobj == NULL));
return status;
}
@@ -425,12 +416,6 @@ int nldr_create(struct nldr_object **nldr,
struct rmm_segment *rmm_segs = NULL;
u16 i;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
- DBC_REQUIRE(pattrs != NULL);
- DBC_REQUIRE(pattrs->ovly != NULL);
- DBC_REQUIRE(pattrs->write != NULL);
/* Allocate dynamic loader object */
nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
@@ -440,13 +425,10 @@ int nldr_create(struct nldr_object **nldr,
dev_get_cod_mgr(hdev_obj, &cod_mgr);
if (cod_mgr) {
status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
- DBC_ASSERT(!status);
status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
- DBC_ASSERT(!status);
status =
cod_get_base_name(cod_mgr, sz_zl_file,
COD_MAXPATHLENGTH);
- DBC_ASSERT(!status);
}
status = 0;
/* end lazy status checking */
@@ -547,7 +529,6 @@ int nldr_create(struct nldr_object **nldr,
status =
cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
/* lazy check */
- DBC_ASSERT(!status);
/* First count number of overlay nodes */
status =
dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
@@ -583,7 +564,6 @@ int nldr_create(struct nldr_object **nldr,
*nldr = NULL;
}
/* FIXME:Temp. Fix. Must be removed */
- DBC_ENSURE((!status && *nldr) || (status && *nldr == NULL));
return status;
}
@@ -595,8 +575,6 @@ void nldr_delete(struct nldr_object *nldr_obj)
struct ovly_sect *ovly_section;
struct ovly_sect *next;
u16 i;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_obj);
nldr_obj->ldr_fxns.exit_fxn();
if (nldr_obj->rmm)
@@ -649,14 +627,10 @@ void nldr_delete(struct nldr_object *nldr_obj)
*/
void nldr_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
if (refs == 0)
rmm_exit();
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -671,10 +645,6 @@ int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
bool status1 = false;
s32 i = 0;
struct lib_node root = { NULL, 0, NULL };
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_node_obj);
- DBC_REQUIRE(addr != NULL);
- DBC_REQUIRE(str_fxn != NULL);
nldr_obj = nldr_node_obj->nldr_obj;
/* Called from node_create(), node_delete(), or node_run(). */
@@ -690,7 +660,6 @@ int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
root = nldr_node_obj->delete_lib;
break;
default:
- DBC_ASSERT(false);
break;
}
} else {
@@ -760,7 +729,6 @@ int nldr_get_rmm_manager(struct nldr_object *nldr,
{
int status = 0;
struct nldr_object *nldr_obj = nldr;
- DBC_REQUIRE(rmm_mgr != NULL);
if (nldr) {
*rmm_mgr = nldr_obj->rmm;
@@ -769,8 +737,6 @@ int nldr_get_rmm_manager(struct nldr_object *nldr,
status = -EFAULT;
}
- DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
-
return status;
}
@@ -780,14 +746,11 @@ int nldr_get_rmm_manager(struct nldr_object *nldr,
*/
bool nldr_init(void)
{
- DBC_REQUIRE(refs >= 0);
-
if (refs == 0)
rmm_init();
refs++;
- DBC_ENSURE(refs > 0);
return true;
}
@@ -801,9 +764,6 @@ int nldr_load(struct nldr_nodeobject *nldr_node_obj,
struct dsp_uuid lib_uuid;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_node_obj);
-
nldr_obj = nldr_node_obj->nldr_obj;
if (nldr_node_obj->dynamic) {
@@ -839,7 +799,6 @@ int nldr_load(struct nldr_nodeobject *nldr_node_obj,
break;
default:
- DBC_ASSERT(false);
break;
}
}
@@ -863,9 +822,6 @@ int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
struct lib_node *root_lib = NULL;
s32 i = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(nldr_node_obj);
-
if (nldr_node_obj != NULL) {
if (nldr_node_obj->dynamic) {
if (*nldr_node_obj->phase_split) {
@@ -889,7 +845,6 @@ int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
nldr_node_obj->pers_libs = 0;
break;
default:
- DBC_ASSERT(false);
break;
}
} else {
@@ -929,7 +884,6 @@ static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
/* Find the node it belongs to */
for (i = 0; i < nldr_obj->ovly_nodes; i++) {
node_name = nldr_obj->ovly_table[i].node_name;
- DBC_REQUIRE(node_name);
if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
/* Found the node */
break;
@@ -1018,8 +972,6 @@ static int add_ovly_node(struct dsp_uuid *uuid_obj,
/* Add node to table */
nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
*uuid_obj;
- DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
- ac_name);
len =
strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
@@ -1129,7 +1081,6 @@ static void free_sects(struct nldr_object *nldr_obj,
ret =
rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
ovly_section->size, true);
- DBC_ASSERT(ret);
ovly_section = ovly_section->next_sect;
i++;
}
@@ -1249,7 +1200,6 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
if (depth > MAXDEPTH) {
/* Error */
- DBC_ASSERT(false);
}
root->lib = NULL;
/* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
@@ -1312,7 +1262,6 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr,
&uuid, &nd_libs, &np_libs, phase);
}
- DBC_ASSERT(nd_libs >= np_libs);
if (!status) {
if (!(*nldr_node_obj->phase_split))
np_libs = 0;
@@ -1474,7 +1423,6 @@ static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
}
}
- DBC_ASSERT(i < nldr_obj->ovly_nodes);
if (!po_node) {
status = -ENOENT;
@@ -1500,7 +1448,6 @@ static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
break;
default:
- DBC_ASSERT(false);
break;
}
@@ -1623,9 +1570,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
bool mem_load_req = false;
int status = -ENOMEM; /* Set to fail */
- DBC_REQUIRE(hnode);
- DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
- mem_sect == DBLL_BSS);
nldr_obj = hnode->nldr_obj;
rmm = nldr_obj->rmm;
/* Convert size to DSP words */
@@ -1651,7 +1595,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
mem_phase_bit = EXECUTEDATAFLAGBIT;
break;
default:
- DBC_ASSERT(false);
break;
}
if (mem_sect == DBLL_CODE)
@@ -1670,11 +1613,9 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
/* Find an appropriate segment based on mem_sect */
if (segid == NULLID) {
/* No memory requirements of preferences */
- DBC_ASSERT(!mem_load_req);
goto func_cont;
}
if (segid <= MAXSEGID) {
- DBC_ASSERT(segid < nldr_obj->dload_segs);
/* Attempt to allocate from segid first. */
rmm_addr_obj->segid = segid;
status =
@@ -1685,7 +1626,6 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
}
} else {
/* segid > MAXSEGID ==> Internal or external memory */
- DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
/* Check for any internal or external memory segment,
* depending on segid. */
mem_sect_type |= segid == MEMINTERNALID ?
@@ -1736,8 +1676,6 @@ static int remote_free(void **ref, u16 space, u32 dsp_address,
u32 word_size;
int status = -ENOMEM; /* Set to fail */
- DBC_REQUIRE(nldr_obj);
-
rmm = nldr_obj->rmm;
/* Convert size to DSP words */
@@ -1761,7 +1699,6 @@ static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
u16 i;
- DBC_ASSERT(root != NULL);
/* Unload dependent libraries */
for (i = 0; i < root->dep_libs; i++)
@@ -1812,7 +1749,6 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
}
}
- DBC_ASSERT(i < nldr_obj->ovly_nodes);
if (!po_node)
/* TODO: Should we print warning here? */
@@ -1839,14 +1775,11 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
other_alloc = po_node->other_sects;
break;
default:
- DBC_ASSERT(false);
break;
}
- DBC_ASSERT(ref_count && (*ref_count > 0));
if (ref_count && (*ref_count > 0)) {
*ref_count -= 1;
if (other_ref) {
- DBC_ASSERT(*other_ref > 0);
*other_ref -= 1;
}
}
@@ -1897,9 +1830,6 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
bool status1 = false;
s32 i = 0;
struct lib_node root = { NULL, 0, NULL };
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(offset_output != NULL);
- DBC_REQUIRE(sym_name != NULL);
pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
sym_addr, offset_range, (u32) offset_output, sym_name);
@@ -1915,7 +1845,6 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
root = nldr_node->delete_lib;
break;
default:
- DBC_ASSERT(false);
break;
}
} else {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 5dadaa445ad9..1b24589847f9 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -26,9 +26,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/memdefs.h>
#include <dspbridge/proc.h>
@@ -326,11 +323,6 @@ int node_allocate(struct proc_object *hprocessor,
void *node_res;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hprocessor != NULL);
- DBC_REQUIRE(noderes != NULL);
- DBC_REQUIRE(node_uuid != NULL);
-
*noderes = NULL;
status = proc_get_processor_id(hprocessor, &proc_id);
@@ -673,7 +665,6 @@ func_cont:
drv_proc_node_update_heap_status(node_res, true);
drv_proc_node_update_status(node_res, true);
}
- DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
func_end:
dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
"node_res: %p status: 0x%x\n", __func__, hprocessor,
@@ -696,11 +687,6 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
bool set_info;
u32 proc_id;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pbuffer != NULL);
-
- DBC_REQUIRE(usize > 0);
-
if (!pnode)
status = -EFAULT;
else if (node_get_type(pnode) == NODE_DEVICE)
@@ -714,7 +700,6 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id != DSP_UNIT) {
- DBC_ASSERT(NULL);
goto func_end;
}
/* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
@@ -782,8 +767,6 @@ int node_change_priority(struct node_object *hnode, s32 prio)
int status = 0;
u32 proc_id;
- DBC_REQUIRE(refs > 0);
-
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
} else {
@@ -854,7 +837,6 @@ int node_connect(struct node_object *node1, u32 stream1,
s8 chnl_mode;
u32 dw_length;
int status = 0;
- DBC_REQUIRE(refs > 0);
if (!node1 || !node2)
return -EFAULT;
@@ -903,7 +885,6 @@ int node_connect(struct node_object *node1, u32 stream1,
if (node1_type != NODE_GPP) {
hnode_mgr = node1->node_mgr;
} else {
- DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
hnode_mgr = node2->node_mgr;
}
@@ -982,9 +963,6 @@ int node_connect(struct node_object *node1, u32 stream1,
goto out_unlock;
}
- DBC_ASSERT((node1_type == NODE_GPP) ||
- (node2_type == NODE_GPP));
-
chnl_mode = (node1_type == NODE_GPP) ?
CHNL_MODETODSP : CHNL_MODEFROMDSP;
@@ -1139,7 +1117,6 @@ int node_create(struct node_object *hnode)
omap_dspbridge_dev->dev.platform_data;
#endif
- DBC_REQUIRE(refs > 0);
if (!pnode) {
status = -EFAULT;
goto func_end;
@@ -1291,10 +1268,6 @@ int node_create_mgr(struct node_mgr **node_man,
int status = 0;
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_man != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
*node_man = NULL;
/* Allocate Node manager object */
node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
@@ -1375,8 +1348,6 @@ int node_create_mgr(struct node_mgr **node_man,
*node_man = node_mgr_obj;
- DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
-
return status;
out_err:
delete_node_mgr(node_mgr_obj);
@@ -1409,7 +1380,6 @@ int node_delete(struct node_res_object *noderes,
void *node_res = noderes;
struct dsp_processorstate proc_state;
- DBC_REQUIRE(refs > 0);
if (!pnode) {
status = -EFAULT;
@@ -1554,8 +1524,6 @@ func_end:
*/
int node_delete_mgr(struct node_mgr *hnode_mgr)
{
- DBC_REQUIRE(refs > 0);
-
if (!hnode_mgr)
return -EFAULT;
@@ -1576,10 +1544,6 @@ int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
struct node_object *hnode;
u32 i = 0;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
- DBC_REQUIRE(pu_num_nodes != NULL);
- DBC_REQUIRE(pu_allocated != NULL);
if (!hnode_mgr) {
status = -EFAULT;
@@ -1611,11 +1575,7 @@ func_end:
*/
void node_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -1629,10 +1589,6 @@ int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
struct node_object *pnode = (struct node_object *)hnode;
int status = 0;
u32 proc_id;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pbuffer != NULL);
- DBC_REQUIRE(pnode != NULL);
- DBC_REQUIRE(pnode->xlator != NULL);
if (!hnode) {
status = -EFAULT;
@@ -1653,7 +1609,6 @@ int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
}
} else {
- DBC_ASSERT(NULL); /* BUG */
}
func_end:
return status;
@@ -1669,9 +1624,6 @@ int node_get_attr(struct node_object *hnode,
struct dsp_nodeattr *pattr, u32 attr_size)
{
struct node_mgr *hnode_mgr;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pattr != NULL);
- DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
if (!hnode)
return -EFAULT;
@@ -1713,9 +1665,6 @@ int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
{
enum node_type node_type;
int status = -EINVAL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
- DBC_REQUIRE(chan_id != NULL);
if (!hnode) {
status = -EFAULT;
@@ -1734,7 +1683,6 @@ int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
}
}
} else {
- DBC_ASSERT(dir == DSP_FROMNODE);
if (index < MAX_OUTPUTS(hnode)) {
if (hnode->outputs[index].type == HOSTCONNECT) {
*chan_id = hnode->outputs[index].dev_id;
@@ -1761,9 +1709,6 @@ int node_get_message(struct node_object *hnode,
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(message != NULL);
-
if (!hnode) {
status = -EFAULT;
goto func_end;
@@ -1831,14 +1776,12 @@ int node_get_nldr_obj(struct node_mgr *hnode_mgr,
{
int status = 0;
struct node_mgr *node_mgr_obj = hnode_mgr;
- DBC_REQUIRE(nldr_ovlyobj != NULL);
if (!hnode_mgr)
status = -EFAULT;
else
*nldr_ovlyobj = node_mgr_obj->nldr_obj;
- DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
return status;
}
@@ -1852,8 +1795,6 @@ int node_get_strm_mgr(struct node_object *hnode,
{
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (!hnode)
status = -EFAULT;
else
@@ -1867,8 +1808,6 @@ int node_get_strm_mgr(struct node_object *hnode,
*/
enum nldr_loadtype node_get_load_type(struct node_object *hnode)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnode);
if (!hnode) {
dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
return -1;
@@ -1884,8 +1823,6 @@ enum nldr_loadtype node_get_load_type(struct node_object *hnode)
*/
u32 node_get_timeout(struct node_object *hnode)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnode);
if (!hnode) {
dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
return 0;
@@ -1921,8 +1858,6 @@ enum node_type node_get_type(struct node_object *hnode)
*/
bool node_init(void)
{
- DBC_REQUIRE(refs >= 0);
-
refs++;
return true;
@@ -1970,8 +1905,6 @@ int node_pause(struct node_object *hnode)
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
-
if (!hnode) {
status = -EFAULT;
} else {
@@ -2054,9 +1987,6 @@ int node_put_message(struct node_object *hnode,
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pmsg != NULL);
-
if (!hnode) {
status = -EFAULT;
goto func_end;
@@ -2146,9 +2076,6 @@ int node_register_notify(struct node_object *hnode, u32 event_mask,
struct bridge_drv_interface *intf_fxns;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnotification != NULL);
-
if (!hnode) {
status = -EFAULT;
} else {
@@ -2207,8 +2134,6 @@ int node_run(struct node_object *hnode)
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
- DBC_REQUIRE(refs > 0);
-
if (!hnode) {
status = -EFAULT;
goto func_end;
@@ -2287,7 +2212,6 @@ int node_run(struct node_object *hnode)
NODE_GET_PRIORITY(hnode));
} else {
/* We should never get here */
- DBC_ASSERT(false);
}
func_cont1:
/* Update node state. */
@@ -2326,9 +2250,6 @@ int node_terminate(struct node_object *hnode, int *pstatus)
struct deh_mgr *hdeh_mgr;
struct dsp_processorstate proc_state;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pstatus != NULL);
-
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
goto func_end;
@@ -2668,7 +2589,6 @@ static void fill_stream_connect(struct node_object *node1,
strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
} else {
/* GPP == > NODE */
- DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
strm_index = node2->num_inputs + node2->num_outputs - 1;
strm2 = &(node2->stream_connect[strm_index]);
strm2->cb_struct = sizeof(struct dsp_streamconnect);
@@ -2748,9 +2668,6 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
char *pstr_fxn_name = NULL;
struct node_mgr *hnode_mgr = hnode->node_mgr;
int status = 0;
- DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
- node_get_type(hnode) == NODE_DAISSOCKET ||
- node_get_type(hnode) == NODE_MESSAGE);
switch (phase) {
case CREATEPHASE:
@@ -2767,7 +2684,6 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
break;
default:
/* Should never get here */
- DBC_ASSERT(false);
break;
}
@@ -2787,9 +2703,6 @@ void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
{
u32 i;
- DBC_REQUIRE(hnode);
- DBC_REQUIRE(node_info != NULL);
-
node_info->cb_struct = sizeof(struct dsp_nodeinfo);
node_info->nb_node_database_props =
hnode->dcd_props.obj_data.node_obj.ndb_props;
@@ -2848,9 +2761,7 @@ static int get_node_props(struct dcd_manager *hdcd_mgr,
pmsg_args->max_msgs);
} else {
/* Copy device name */
- DBC_REQUIRE(pndb_props->ac_name);
len = strlen(pndb_props->ac_name);
- DBC_ASSERT(len < MAXDEVNAMELEN);
hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
if (hnode->str_dev_name == NULL) {
status = -ENOMEM;
@@ -2938,10 +2849,6 @@ int node_get_uuid_props(void *hprocessor,
struct dcd_nodeprops dcd_node_props;
struct dsp_processorstate proc_state;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hprocessor != NULL);
- DBC_REQUIRE(node_uuid != NULL);
-
if (hprocessor == NULL || node_uuid == NULL) {
status = -EFAULT;
goto func_end;
@@ -3063,8 +2970,6 @@ static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
/* Function interface to Bridge driver*/
struct bridge_drv_interface *intf_fxns;
- DBC_REQUIRE(hnode);
-
hnode_mgr = hnode->node_mgr;
ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
@@ -3106,9 +3011,6 @@ static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
- DBC_REQUIRE(hnode);
- DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
-
hnode_mgr = hnode->node_mgr;
ul_timeout = hnode->timeout;
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 242dd1399996..b865d64234a2 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -25,9 +25,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/ntfy.h>
#include <dspbridge/sync.h>
@@ -281,9 +278,6 @@ proc_attach(u32 processor_id,
struct drv_data *drv_datap = dev_get_drvdata(bridge);
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ph_processor != NULL);
-
if (pr_ctxt->processor) {
*ph_processor = pr_ctxt->processor;
return status;
@@ -382,10 +376,6 @@ proc_attach(u32 processor_id,
kfree(p_proc_object);
}
func_end:
- DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
- (!status && p_proc_object) ||
- (status == 0 && p_proc_object));
-
return status;
}
@@ -445,10 +435,6 @@ int proc_auto_start(struct cfg_devnode *dev_node_obj,
struct drv_data *drv_datap = dev_get_drvdata(bridge);
u8 dev_type;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(dev_node_obj != NULL);
- DBC_REQUIRE(hdev_obj != NULL);
-
/* Create a Dummy PROC Object */
if (!drv_datap || !drv_datap->mgr_object) {
status = -ENODATA;
@@ -516,8 +502,6 @@ int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
struct proc_object *p_proc_object = hprocessor;
u32 timeout = 0;
- DBC_REQUIRE(refs > 0);
-
if (p_proc_object) {
/* intercept PWR deep sleep command */
if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
@@ -565,8 +549,6 @@ int proc_detach(struct process_context *pr_ctxt)
int status = 0;
struct proc_object *p_proc_object = NULL;
- DBC_REQUIRE(refs > 0);
-
p_proc_object = (struct proc_object *)pr_ctxt->processor;
if (p_proc_object) {
@@ -607,11 +589,6 @@ int proc_enum_nodes(void *hprocessor, void **node_tab,
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct node_mgr *hnode_mgr = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
- DBC_REQUIRE(pu_num_nodes != NULL);
- DBC_REQUIRE(pu_allocated != NULL);
-
if (p_proc_object) {
if (!(dev_get_node_manager(p_proc_object->dev_obj,
&hnode_mgr))) {
@@ -768,8 +745,6 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
struct process_context *pr_ctxt = (struct process_context *) hprocessor;
struct dmm_map_object *map_obj;
- DBC_REQUIRE(refs > 0);
-
if (!pr_ctxt) {
status = -EFAULT;
goto err_out;
@@ -810,8 +785,6 @@ int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
struct process_context *pr_ctxt = (struct process_context *) hprocessor;
struct dmm_map_object *map_obj;
- DBC_REQUIRE(refs > 0);
-
if (!pr_ctxt) {
status = -EFAULT;
goto err_out;
@@ -884,10 +857,6 @@ int proc_get_resource_info(void *hprocessor, u32 resource_type,
struct rmm_target_obj *rmm = NULL;
struct io_mgr *hio_mgr = NULL; /* IO manager handle */
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(resource_info != NULL);
- DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
-
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -947,11 +916,7 @@ func_end:
*/
void proc_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -966,9 +931,6 @@ int proc_get_dev_object(void *hprocessor,
int status = -EPERM;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(device_obj != NULL);
-
if (p_proc_object) {
*device_obj = p_proc_object->dev_obj;
status = 0;
@@ -977,9 +939,6 @@ int proc_get_dev_object(void *hprocessor,
status = -EFAULT;
}
- DBC_ENSURE((!status && *device_obj != NULL) ||
- (status && *device_obj == NULL));
-
return status;
}
@@ -996,10 +955,6 @@ int proc_get_state(void *hprocessor,
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
int brd_status;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(proc_state_obj != NULL);
- DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
-
if (p_proc_object) {
/* First, retrieve BRD state information */
status = (*p_proc_object->intf_fxns->brd_status)
@@ -1063,13 +1018,9 @@ bool proc_init(void)
{
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
@@ -1111,10 +1062,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
omap_dspbridge_dev->dev.platform_data;
#endif
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(argc_index > 0);
- DBC_REQUIRE(user_args != NULL);
-
#ifdef OPT_LOAD_TIME_INSTRUMENTATION
do_gettimeofday(&tv1);
#endif
@@ -1202,8 +1149,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
if (status) {
status = -EPERM;
} else {
- DBC_ASSERT(p_proc_object->last_coff ==
- NULL);
/* Allocate memory for pszLastCoff */
p_proc_object->last_coff =
kzalloc((strlen(user_args[0]) +
@@ -1226,7 +1171,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
if (!hmsg_mgr) {
status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
(msg_onexit) node_on_exit);
- DBC_ASSERT(!status);
dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
}
}
@@ -1322,7 +1266,6 @@ int proc_load(void *hprocessor, const s32 argc_index,
strlen(pargv0) + 1);
else
status = -ENOMEM;
- DBC_ASSERT(brd_state == BRD_LOADED);
}
}
@@ -1331,9 +1274,6 @@ func_end:
pr_err("%s: Processor failed to load\n", __func__);
proc_stop(p_proc_object);
}
- DBC_ENSURE((!status
- && p_proc_object->proc_state == PROC_LOADED)
- || status);
#ifdef OPT_LOAD_TIME_INSTRUMENTATION
do_gettimeofday(&tv2);
if (tv2.tv_usec < tv1.tv_usec) {
@@ -1443,9 +1383,6 @@ int proc_register_notify(void *hprocessor, u32 event_mask,
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct deh_mgr *hdeh_mgr;
- DBC_REQUIRE(hnotification != NULL);
- DBC_REQUIRE(refs > 0);
-
/* Check processor handle */
if (!p_proc_object) {
status = -EFAULT;
@@ -1567,7 +1504,6 @@ int proc_start(void *hprocessor)
u32 dw_dsp_addr; /* Loaded code's entry point. */
int brd_state;
- DBC_REQUIRE(refs > 0);
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -1616,7 +1552,6 @@ func_cont:
if (!((*p_proc_object->intf_fxns->brd_status)
(p_proc_object->bridge_context, &brd_state))) {
pr_info("%s: dsp in running state\n", __func__);
- DBC_ASSERT(brd_state != BRD_HIBERNATION);
}
} else {
pr_err("%s: Failed to start the dsp\n", __func__);
@@ -1624,8 +1559,6 @@ func_cont:
}
func_end:
- DBC_ENSURE((!status && p_proc_object->proc_state ==
- PROC_RUNNING) || status);
return status;
}
@@ -1644,9 +1577,7 @@ int proc_stop(void *hprocessor)
u32 node_tab_size = 1;
u32 num_nodes = 0;
u32 nodes_allocated = 0;
- int brd_state;
- DBC_REQUIRE(refs > 0);
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -1678,11 +1609,6 @@ int proc_stop(void *hprocessor)
msg_delete(hmsg_mgr);
dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
}
- if (!((*p_proc_object->
- intf_fxns->brd_status) (p_proc_object->
- bridge_context,
- &brd_state)))
- DBC_ASSERT(brd_state == BRD_STOPPED);
}
} else {
pr_err("%s: Failed to stop the processor\n", __func__);
@@ -1820,10 +1746,6 @@ static int proc_monitor(struct proc_object *proc_obj)
{
int status = -EPERM;
struct msg_mgr *hmsg_mgr;
- int brd_state;
-
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(proc_obj);
/* This is needed only when Device is loaded when it is
* already 'ACTIVE' */
@@ -1840,13 +1762,8 @@ static int proc_monitor(struct proc_object *proc_obj)
if (!((*proc_obj->intf_fxns->brd_monitor)
(proc_obj->bridge_context))) {
status = 0;
- if (!((*proc_obj->intf_fxns->brd_status)
- (proc_obj->bridge_context, &brd_state)))
- DBC_ASSERT(brd_state == BRD_IDLE);
}
- DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
- status);
return status;
}
@@ -1880,8 +1797,6 @@ static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
{
char **pp_envp = new_envp;
- DBC_REQUIRE(new_envp);
-
/* Prepend new environ var=value string */
*new_envp++ = sz_var;
@@ -1906,9 +1821,6 @@ int proc_notify_clients(void *proc, u32 events)
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)proc;
- DBC_REQUIRE(p_proc_object);
- DBC_REQUIRE(is_valid_proc_event(events));
- DBC_REQUIRE(refs > 0);
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
@@ -1930,9 +1842,6 @@ int proc_notify_all_clients(void *proc, u32 events)
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)proc;
- DBC_REQUIRE(is_valid_proc_event(events));
- DBC_REQUIRE(refs > 0);
-
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
diff --git a/drivers/staging/tidspbridge/rmgr/rmm.c b/drivers/staging/tidspbridge/rmgr/rmm.c
index f3dc0ddbfacc..55acfcd80a84 100644
--- a/drivers/staging/tidspbridge/rmgr/rmm.c
+++ b/drivers/staging/tidspbridge/rmgr/rmm.c
@@ -46,9 +46,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- This */
#include <dspbridge/rmm.h>
@@ -101,12 +98,6 @@ int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
u32 addr;
int status = 0;
- DBC_REQUIRE(target);
- DBC_REQUIRE(dsp_address != NULL);
- DBC_REQUIRE(size > 0);
- DBC_REQUIRE(reserve || (target->num_segs > 0));
- DBC_REQUIRE(refs > 0);
-
if (!reserve) {
if (!alloc_block(target, segid, size, align, dsp_address)) {
status = -ENOMEM;
@@ -170,9 +161,6 @@ int rmm_create(struct rmm_target_obj **target_obj,
s32 i;
int status = 0;
- DBC_REQUIRE(target_obj != NULL);
- DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
-
/* Allocate DBL target object */
target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
@@ -235,9 +223,6 @@ func_cont:
}
- DBC_ENSURE((!status && *target_obj)
- || (status && *target_obj == NULL));
-
return status;
}
@@ -251,8 +236,6 @@ void rmm_delete(struct rmm_target_obj *target)
struct rmm_header *next;
u32 i;
- DBC_REQUIRE(target);
-
kfree(target->seg_tab);
list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
@@ -281,11 +264,7 @@ void rmm_delete(struct rmm_target_obj *target)
*/
void rmm_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -297,15 +276,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
struct rmm_ovly_sect *sect, *tmp;
bool ret = false;
- DBC_REQUIRE(target);
-
- DBC_REQUIRE(reserved || segid < target->num_segs);
- DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
- (dsp_addr + size) <= (target->seg_tab[segid].
- base +
- target->seg_tab[segid].
- length)));
-
/*
* Free or unreserve memory.
*/
@@ -319,7 +289,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
list_for_each_entry_safe(sect, tmp, &target->ovly_list,
list_elem) {
if (dsp_addr == sect->addr) {
- DBC_ASSERT(size == sect->size);
/* Remove from list */
list_del(&sect->list_elem);
kfree(sect);
@@ -335,8 +304,6 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
*/
bool rmm_init(void)
{
- DBC_REQUIRE(refs >= 0);
-
refs++;
return true;
@@ -354,9 +321,6 @@ bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
u32 total_free_size = 0;
u32 free_blocks = 0;
- DBC_REQUIRE(mem_stat_buf != NULL);
- DBC_ASSERT(target != NULL);
-
if ((u32) segid < target->num_segs) {
head = target->free_list[segid];
diff --git a/drivers/staging/tidspbridge/rmgr/strm.c b/drivers/staging/tidspbridge/rmgr/strm.c
index 3fae0e9f511e..bd684f16baef 100644
--- a/drivers/staging/tidspbridge/rmgr/strm.c
+++ b/drivers/staging/tidspbridge/rmgr/strm.c
@@ -24,9 +24,6 @@
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
-/* ----------------------------------- Trace & Debug */
-#include <dspbridge/dbc.h>
-
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
@@ -104,9 +101,6 @@ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
u32 i;
struct strm_object *stream_obj = strmres->stream;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ap_buffer != NULL);
-
if (stream_obj) {
/*
* Allocate from segment specified at time of stream open.
@@ -122,7 +116,6 @@ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
goto func_end;
for (i = 0; i < num_bufs; i++) {
- DBC_ASSERT(stream_obj->xlator != NULL);
(void)cmm_xlator_alloc_buf(stream_obj->xlator, &ap_buffer[i],
usize);
if (ap_buffer[i] == NULL) {
@@ -156,8 +149,6 @@ int strm_close(struct strm_res_object *strmres,
int status = 0;
struct strm_object *stream_obj = strmres->stream;
- DBC_REQUIRE(refs > 0);
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -167,7 +158,6 @@ int strm_close(struct strm_res_object *strmres,
status =
(*intf_fxns->chnl_get_info) (stream_obj->chnl_obj,
&chnl_info_obj);
- DBC_ASSERT(!status);
if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0)
status = -EPIPE;
@@ -180,9 +170,6 @@ int strm_close(struct strm_res_object *strmres,
idr_remove(pr_ctxt->stream_id, strmres->id);
func_end:
- DBC_ENSURE(status == 0 || status == -EFAULT ||
- status == -EPIPE || status == -EPERM);
-
dev_dbg(bridge, "%s: stream_obj: %p, status 0x%x\n", __func__,
stream_obj, status);
return status;
@@ -199,10 +186,6 @@ int strm_create(struct strm_mgr **strm_man,
struct strm_mgr *strm_mgr_obj;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strm_man != NULL);
- DBC_REQUIRE(dev_obj != NULL);
-
*strm_man = NULL;
/* Allocate STRM manager object */
strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL);
@@ -217,7 +200,6 @@ int strm_create(struct strm_mgr **strm_man,
if (!status) {
(void)dev_get_intf_fxns(dev_obj,
&(strm_mgr_obj->intf_fxns));
- DBC_ASSERT(strm_mgr_obj->intf_fxns != NULL);
}
}
@@ -226,8 +208,6 @@ int strm_create(struct strm_mgr **strm_man,
else
kfree(strm_mgr_obj);
- DBC_ENSURE((!status && *strm_man) || (status && *strm_man == NULL));
-
return status;
}
@@ -238,9 +218,6 @@ int strm_create(struct strm_mgr **strm_man,
*/
void strm_delete(struct strm_mgr *strm_mgr_obj)
{
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strm_mgr_obj);
-
kfree(strm_mgr_obj);
}
@@ -251,11 +228,7 @@ void strm_delete(struct strm_mgr *strm_mgr_obj)
*/
void strm_exit(void)
{
- DBC_REQUIRE(refs > 0);
-
refs--;
-
- DBC_ENSURE(refs >= 0);
}
/*
@@ -270,15 +243,11 @@ int strm_free_buffer(struct strm_res_object *strmres, u8 ** ap_buffer,
u32 i = 0;
struct strm_object *stream_obj = strmres->stream;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(ap_buffer != NULL);
-
if (!stream_obj)
status = -EFAULT;
if (!status) {
for (i = 0; i < num_bufs; i++) {
- DBC_ASSERT(stream_obj->xlator != NULL);
status =
cmm_xlator_free_buf(stream_obj->xlator,
ap_buffer[i]);
@@ -306,10 +275,6 @@ int strm_get_info(struct strm_object *stream_obj,
int status = 0;
void *virt_base = NULL; /* NULL if no SM used */
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(stream_info != NULL);
- DBC_REQUIRE(stream_info_size >= sizeof(struct stream_info));
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -330,7 +295,6 @@ int strm_get_info(struct strm_object *stream_obj,
if (stream_obj->xlator) {
/* We have a translator */
- DBC_ASSERT(stream_obj->segment_id > 0);
cmm_xlator_info(stream_obj->xlator, (u8 **) &virt_base, 0,
stream_obj->segment_id, false);
}
@@ -370,8 +334,6 @@ int strm_idle(struct strm_object *stream_obj, bool flush_data)
struct bridge_drv_interface *intf_fxns;
int status = 0;
- DBC_REQUIRE(refs > 0);
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -396,13 +358,9 @@ bool strm_init(void)
{
bool ret = true;
- DBC_REQUIRE(refs >= 0);
-
if (ret)
refs++;
- DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
-
return ret;
}
@@ -418,9 +376,6 @@ int strm_issue(struct strm_object *stream_obj, u8 *pbuf, u32 ul_bytes,
int status = 0;
void *tmp_buf = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(pbuf != NULL);
-
if (!stream_obj) {
status = -EFAULT;
} else {
@@ -471,9 +426,6 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
void *stream_res;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strmres != NULL);
- DBC_REQUIRE(pattr != NULL);
*strmres = NULL;
if (dir != DSP_TONODE && dir != DSP_FROMNODE) {
status = -EPERM;
@@ -536,14 +488,12 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
goto func_cont;
/* No System DMA */
- DBC_ASSERT(strm_obj->strm_mode != STRMMODE_LDMA);
/* Get the shared mem mgr for this streams dev object */
status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr);
if (!status) {
/*Allocate a SM addr translator for this strm. */
status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL);
if (!status) {
- DBC_ASSERT(strm_obj->segment_id > 0);
/* Set translators Virt Addr attributes */
status = cmm_xlator_info(strm_obj->xlator,
(u8 **) &pattr->virt_base,
@@ -575,10 +525,6 @@ func_cont:
* strm_mgr_obj->chnl_mgr better be valid or we
* assert here), and then return -EPERM.
*/
- DBC_ASSERT(status == -ENOSR ||
- status == -ECHRNG ||
- status == -EALREADY ||
- status == -EIO);
status = -EPERM;
}
}
@@ -594,12 +540,6 @@ func_cont:
(void)delete_strm(strm_obj);
}
- /* ensure we return a documented error code */
- DBC_ENSURE((!status && strm_obj) ||
- (*strmres == NULL && (status == -EFAULT ||
- status == -EPERM
- || status == -EINVAL)));
-
dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p "
"strmres: %p status: 0x%x\n", __func__,
hnode, dir, index, pattr, strmres, status);
@@ -619,11 +559,6 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
int status = 0;
void *tmp_buf = NULL;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(buf_ptr != NULL);
- DBC_REQUIRE(nbytes != NULL);
- DBC_REQUIRE(pdw_arg != NULL);
-
if (!stream_obj) {
status = -EFAULT;
goto func_end;
@@ -679,11 +614,6 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
*buf_ptr = chnl_ioc_obj.buf;
}
func_end:
- /* ensure we return a documented return code */
- DBC_ENSURE(!status || status == -EFAULT ||
- status == -ETIME || status == -ESRCH ||
- status == -EPERM);
-
dev_dbg(bridge, "%s: stream_obj: %p buf_ptr: %p nbytes: %p "
"pdw_arg: %p status 0x%x\n", __func__, stream_obj,
buf_ptr, nbytes, pdw_arg, status);
@@ -702,9 +632,6 @@ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
struct bridge_drv_interface *intf_fxns;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(hnotification != NULL);
-
if (!stream_obj) {
status = -EFAULT;
} else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) |
@@ -725,10 +652,7 @@ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
notify_type,
hnotification);
}
- /* ensure we return a documented return code */
- DBC_ENSURE(!status || status == -EFAULT ||
- status == -ETIME || status == -ESRCH ||
- status == -ENOSYS || status == -EPERM);
+
return status;
}
@@ -747,11 +671,6 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
u32 i;
int status = 0;
- DBC_REQUIRE(refs > 0);
- DBC_REQUIRE(strm_tab != NULL);
- DBC_REQUIRE(pmask != NULL);
- DBC_REQUIRE(strms > 0);
-
*pmask = 0;
for (i = 0; i < strms; i++) {
if (!strm_tab[i]) {
@@ -811,9 +730,6 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
func_end:
kfree(sync_events);
- DBC_ENSURE((!status && (*pmask != 0 || utimeout == 0)) ||
- (status && *pmask == 0));
-
return status;
}
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index d4073684eacd..a73e437ec215 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -35,7 +35,6 @@
struct stub_device {
struct usb_interface *interface;
struct usb_device *udev;
- struct list_head list;
struct usbip_device ud;
__u32 devid;
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 27ac363d1cfa..1d5b3fc62160 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -367,15 +367,6 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
}
epd = &ep->desc;
-#if 0
- /* epnum 0 is always control */
- if (epnum == 0) {
- if (dir == USBIP_DIR_OUT)
- return usb_sndctrlpipe(udev, 0);
- else
- return usb_rcvctrlpipe(udev, 0);
- }
-#endif
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index d93e7f1f7973..70f230269329 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -735,26 +735,25 @@ EXPORT_SYMBOL_GPL(usbip_recv_iso);
* buffer and iso packets need to be stored and be in propeper endian in urb
* before calling this function
*/
-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
+void usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
{
int np = urb->number_of_packets;
int i;
- int ret;
int actualoffset = urb->actual_length;
if (!usb_pipeisoc(urb->pipe))
- return 0;
+ return;
/* if no packets or length of data is 0, then nothing to unpack */
if (np == 0 || urb->actual_length == 0)
- return 0;
+ return;
/*
* if actual_length is transfer_buffer_length then no padding is
* present.
*/
if (urb->actual_length == urb->transfer_buffer_length)
- return 0;
+ return;
/*
* loop over all packets from last to first (to prevent overwritting
@@ -766,8 +765,6 @@ int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
urb->transfer_buffer + actualoffset,
urb->iso_frame_desc[i].actual_length);
}
-
- return ret;
}
EXPORT_SYMBOL_GPL(usbip_pad_iso);
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index b8f8c48b8a72..c7b888ca54f5 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -306,7 +306,7 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send);
void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
/* some members of urb must be substituted before. */
int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
+void usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
/* usbip_event.c */
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 2ee97e2095b0..8d96ab065cb7 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -386,29 +386,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dum->port_status[rhport] |=
USB_PORT_STAT_ENABLE;
}
-#if 0
- if (dum->driver) {
- dum->port_status[rhport] |=
- USB_PORT_STAT_ENABLE;
- /* give it the best speed we agree on */
- dum->gadget.speed = dum->driver->speed;
- dum->gadget.ep0->maxpacket = 64;
- switch (dum->gadget.speed) {
- case USB_SPEED_HIGH:
- dum->port_status[rhport] |=
- USB_PORT_STAT_HIGH_SPEED;
- break;
- case USB_SPEED_LOW:
- dum->gadget.ep0->maxpacket = 8;
- dum->port_status[rhport] |=
- USB_PORT_STAT_LOW_SPEED;
- break;
- default:
- dum->gadget.speed = USB_SPEED_FULL;
- break;
- }
- }
-#endif
}
((u16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
((u16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
@@ -425,15 +402,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_SUSPEND:
usbip_dbg_vhci_rh(" SetPortFeature: "
"USB_PORT_FEAT_SUSPEND\n");
-#if 0
- dum->port_status[rhport] |=
- (1 << USB_PORT_FEAT_SUSPEND);
- if (dum->driver->suspend) {
- spin_unlock(&dum->lock);
- dum->driver->suspend(&dum->gadget);
- spin_lock(&dum->lock);
- }
-#endif
break;
case USB_PORT_FEAT_RESET:
usbip_dbg_vhci_rh(" SetPortFeature: "
@@ -444,13 +412,6 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
~(USB_PORT_STAT_ENABLE |
USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED);
-#if 0
- if (dum->driver) {
- dev_dbg(hardware, "disconnect\n");
- stop_activity(dum, dum->driver);
- }
-#endif
-
/* FIXME test that code path! */
}
/* 50msec reset signaling */
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 3f511b47563d..f5fba7320c5a 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -94,8 +94,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
return;
/* restore the padding in iso packets */
- if (usbip_pad_iso(ud, urb) < 0)
- return;
+ usbip_pad_iso(ud, urb);
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
index 9d38ceed60e2..c9d65bf14cec 100644
--- a/drivers/staging/vme/vme.h
+++ b/drivers/staging/vme/vme.h
@@ -156,7 +156,7 @@ int vme_irq_request(struct vme_dev *, int, int,
void vme_irq_free(struct vme_dev *, int, int);
int vme_irq_generate(struct vme_dev *, int, int);
-struct vme_resource * vme_lm_request(struct vme_dev *);
+struct vme_resource *vme_lm_request(struct vme_dev *);
int vme_lm_count(struct vme_resource *);
int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 7fd5cc5a55f6..ef197efab049 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -324,16 +324,16 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
- if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) {
+ if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo))
pList->sBSSIDList[ii].byNetType = INFRA;
- } else {
+ else
pList->sBSSIDList[ii].byNetType = ADHOC;
- }
- if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) {
+
+ if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo))
pList->sBSSIDList[ii].bWEPOn = true;
- } else {
+ else
pList->sBSSIDList[ii].bWEPOn = false;
- }
+
ii++;
if (ii >= pList->uItem)
break;
@@ -367,9 +367,9 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
- if (pDevice->bRadioOff == false) {
+ if (pDevice->bRadioOff == false)
CARDbRadioPowerOff(pDevice);
- }
+
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -489,13 +489,12 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
break;
}
- if (sStartAPCmd.wBBPType == PHY80211g) {
+ if (sStartAPCmd.wBBPType == PHY80211g)
pMgmt->byAPBBType = PHY_TYPE_11G;
- } else if (sStartAPCmd.wBBPType == PHY80211a) {
+ else if (sStartAPCmd.wBBPType == PHY80211a)
pMgmt->byAPBBType = PHY_TYPE_11A;
- } else {
+ else
pMgmt->byAPBBType = PHY_TYPE_11B;
- }
pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index ecfda5272fa1..b24e5314a6af 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -46,9 +46,6 @@
#include <net/iw_handler.h>
-
-/*--------------------- Static Definitions -------------------------*/
-
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
#define SUPPORTED_WIRELESS_EXT 18
#else
@@ -63,19 +60,8 @@ static const long frequency_list[] = {
5700, 5745, 5765, 5785, 5805, 5825
};
-
-/*--------------------- Static Classes ----------------------------*/
-
-
-//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
-
-/*--------------------- Static Variables --------------------------*/
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
{
PSDevice pDevice = netdev_priv(dev);
@@ -87,7 +73,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
pDevice->wstats.qual.qual =(BYTE) pDevice->scStatistic.LinkQuality;
RFvRSSITodBm(pDevice, (BYTE)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
- //pDevice->wstats.qual.level = 0x100 - pDevice->uCurrRSSI;
pDevice->wstats.qual.noise = 0;
pDevice->wstats.qual.updated = 1;
pDevice->wstats.discard.nwid = 0;
@@ -100,21 +85,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
return &pDevice->wstats;
}
-
-
-/*------------------------------------------------------------------*/
-
-
-static int iwctl_commit(struct net_device *dev,
- struct iw_request_info *info,
- void *wrq,
- char *extra)
-{
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWCOMMIT\n");
-
- return 0;
-}
-
/*
* Wireless Handler : get protocol name
*/
@@ -197,14 +167,12 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
}
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- //printk("SIOCSIWSCAN:WLAN_CMD_BSSID_SCAN\n");
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
spin_unlock_irq(&pDevice->lock);
return 0;
}
-
/*
* Wireless Handler : get scan results
*/
@@ -503,7 +471,7 @@ int iwctl_siwmode(struct net_device *dev,
* Wireless Handler : get operation mode
*/
-int iwctl_giwmode(struct net_device *dev,
+void iwctl_giwmode(struct net_device *dev,
struct iw_request_info *info,
__u32 *wmode,
char *extra)
@@ -530,8 +498,6 @@ int iwctl_giwmode(struct net_device *dev,
default:
*wmode = IW_MODE_ADHOC;
}
-
- return 0;
}
@@ -539,7 +505,7 @@ int iwctl_giwmode(struct net_device *dev,
* Wireless Handler : get capability range
*/
-int iwctl_giwrange(struct net_device *dev,
+void iwctl_giwrange(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra)
@@ -634,9 +600,6 @@ int iwctl_giwrange(struct net_device *dev,
range->avg_qual.level = 176; // -80 dBm
range->avg_qual.noise = 0;
}
-
-
- return 0;
}
@@ -708,9 +671,7 @@ int iwctl_giwap(struct net_device *dev,
memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
-//20080123-02,<Modify> by Einsn Liu
if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
- // if ((pDevice->bLinkPass == FALSE) && (pMgmt->eCurrMode == WMAC_MODE_ESS_STA))
memset(wrq->sa_data, 0, 6);
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -895,8 +856,7 @@ int iwctl_siwessid(struct net_device *dev,
/*
* Wireless Handler : get essid
*/
-
-int iwctl_giwessid(struct net_device *dev,
+void iwctl_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra)
@@ -913,14 +873,11 @@ int iwctl_giwessid(struct net_device *dev,
// Get the current SSID
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- //pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
memcpy(extra, pItemSSID->abySSID , pItemSSID->len);
extra[pItemSSID->len] = '\0';
wrq->length = pItemSSID->len;
wrq->flags = 1; // active
-
- return 0;
}
/*
@@ -1008,8 +965,7 @@ int iwctl_siwrate(struct net_device *dev,
/*
* Wireless Handler : get data rate
*/
-
-int iwctl_giwrate(struct net_device *dev,
+void iwctl_giwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
char *extra)
@@ -1047,9 +1003,6 @@ int iwctl_giwrate(struct net_device *dev,
if (pDevice->bFixRate == TRUE)
wrq->fixed = TRUE;
}
-
-
- return 0;
}
@@ -1057,27 +1010,19 @@ int iwctl_giwrate(struct net_device *dev,
/*
* Wireless Handler : set rts threshold
*/
-
int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
+ struct iw_param *wrq)
{
- PSDevice pDevice = (PSDevice)netdev_priv(dev);
- int rc = 0;
+ PSDevice pDevice = (PSDevice)netdev_priv(dev);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWRTS \n");
+ if ((wrq->value < 0 || wrq->value > 2312) && !wrq->disabled)
+ return -EINVAL;
- {
- int rthr = wrq->value;
- if(wrq->disabled)
- rthr = 2312;
- if((rthr < 0) || (rthr > 2312)) {
- rc = -EINVAL;
- }else {
- pDevice->wRTSThreshold = rthr;
- }
- }
+ else if (wrq->disabled)
+ pDevice->wRTSThreshold = 2312;
+
+ else
+ pDevice->wRTSThreshold = wrq->value;
return 0;
}
@@ -1327,55 +1272,6 @@ int iwctl_siwencode(struct net_device *dev,
return rc;
}
-/*
- * Wireless Handler : get encode mode
- */
-//2008-0409-06, <Mark> by Einsn Liu
- /*
-int iwctl_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- PSDevice pDevice = (PSDevice)netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int rc = 0;
- char abyKey[WLAN_WEP232_KEYLEN];
- unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
- PSKeyItem pKey = NULL;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n");
-
-
- memset(abyKey, 0, sizeof(abyKey));
- // Check encryption mode
- wrq->flags = IW_ENCODE_NOKEY;
- // Is WEP enabled ???
- if (pDevice->bEncryptionEnable)
- wrq->flags |= IW_ENCODE_ENABLED;
- else
- wrq->flags |= IW_ENCODE_DISABLED;
-
- if (pMgmt->bShareKeyAlgorithm)
- wrq->flags |= IW_ENCODE_RESTRICTED;
- else
- wrq->flags |= IW_ENCODE_OPEN;
-
- if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (BYTE)index , &pKey)){
- wrq->length = pKey->uKeyLength;
- memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
- }
- else {
- rc = -EINVAL;
- return rc;
- }
- wrq->flags |= index;
- // Copy the key to the user buffer
- memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
- return 0;
-}
-*/
-
int iwctl_giwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
@@ -1562,7 +1458,6 @@ int iwctl_siwauth(struct net_device *dev,
wpa_version = wrq->value;
if(wrq->value == IW_AUTH_WPA_VERSION_DISABLED) {
PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n");
- //pDevice->bWPADEVUp = FALSE;
}
else if(wrq->value == IW_AUTH_WPA_VERSION_WPA) {
PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n");
@@ -1570,7 +1465,6 @@ int iwctl_siwauth(struct net_device *dev,
else {
PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n");
}
- //pDevice->bWPASuppWextEnabled =TRUE;
break;
case IW_AUTH_CIPHER_PAIRWISE:
pairwise = wrq->value;
@@ -1627,11 +1521,6 @@ int iwctl_siwauth(struct net_device *dev,
}
break;
case IW_AUTH_WPA_ENABLED:
- //pDevice->bWPADEVUp = !! wrq->value;
- //if(pDevice->bWPADEVUp==TRUE)
- // printk("iwctl_siwauth:set WPADEV to enable successful*******\n");
- //else
- // printk("iwctl_siwauth:set WPADEV to enable fail?????\n");
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
break;
@@ -1646,7 +1535,6 @@ int iwctl_siwauth(struct net_device *dev,
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pMgmt->bShareKeyAlgorithm = FALSE;
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- //pDevice->bWPADEVUp = FALSE;
PRINT_K("iwctl_siwauth:set WPADEV to disaable at 2?????\n");
}
@@ -1655,15 +1543,6 @@ int iwctl_siwauth(struct net_device *dev,
ret = -EOPNOTSUPP;
break;
}
-/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_version = %d\n",wpa_version);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise = %d\n",pairwise);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->eEncryptionStatus = %d\n",pDevice->eEncryptionStatus);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->eAuthenMode = %d\n",pMgmt->eAuthenMode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->bShareKeyAlgorithm = %s\n",pMgmt->bShareKeyAlgorithm?"TRUE":"FALSE");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bEncryptionEnable = %s\n",pDevice->bEncryptionEnable?"TRUE":"FALSE");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->bWPADEVUp = %s\n",pDevice->bWPADEVUp?"TRUE":"FALSE");
-*/
return ret;
}
@@ -1752,8 +1631,6 @@ int iwctl_siwencodeext(struct net_device *dev,
u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
u8 key[64];
size_t seq_len=0,key_len=0;
-//
- // int ii;
u8 *buf;
size_t blen;
u8 key_array[64];
@@ -1883,7 +1760,6 @@ int iwctl_siwmlme(struct net_device *dev,
PSDevice pDevice = (PSDevice)netdev_priv(dev);
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- //u16 reason = cpu_to_le16(mlme->reason_code);
int ret = 0;
if(memcmp(pMgmt->abyCurrBSSID, mlme->addr.sa_data, ETH_ALEN)){
@@ -1892,12 +1768,6 @@ int iwctl_siwmlme(struct net_device *dev,
}
switch(mlme->cmd){
case IW_MLME_DEAUTH:
- //this command seems to be not complete,please test it --einsnliu
- //printk("iwctl_siwmlme--->send DEAUTH\n");
- /* bScheduleCommand((void *) pDevice,
- WLAN_CMD_DEAUTH,
- (PBYTE)&reason); */
- //break;
case IW_MLME_DISASSOC:
if(pDevice->bLinkPass == TRUE){
PRINT_K("iwctl_siwmlme--->send DISASSOCIATE\n");
@@ -1916,77 +1786,9 @@ int iwctl_siwmlme(struct net_device *dev,
#endif
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-
-/*
-static const iw_handler iwctl_handler[] =
-{
- (iw_handler) iwctl_commit, // SIOCSIWCOMMIT
- (iw_handler) iwctl_giwname, // SIOCGIWNAME
- (iw_handler) NULL, // SIOCSIWNWID
- (iw_handler) iwctl_siwfreq, // SIOCSIWFREQ
- (iw_handler) iwctl_giwfreq, // SIOCGIWFREQ
- (iw_handler) iwctl_siwmode, // SIOCSIWMODE
- (iw_handler) iwctl_giwmode, // SIOCGIWMODE
- (iw_handler) NULL, // SIOCSIWSENS
- (iw_handler) iwctl_giwsens, // SIOCGIWSENS
- (iw_handler) NULL, // SIOCSIWRANGE
- (iw_handler) iwctl_giwrange, // SIOCGIWRANGE
- (iw_handler) NULL, // SIOCSIWPRIV
- (iw_handler) NULL, // SIOCGIWPRIV
- (iw_handler) NULL, // SIOCSIWSTATS
- (iw_handler) NULL, // SIOCGIWSTATS
- (iw_handler) NULL, // SIOCSIWSPY
- (iw_handler) NULL, // SIOCGIWSPY
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) iwctl_siwap, // SIOCSIWAP
- (iw_handler) iwctl_giwap, // SIOCGIWAP
- (iw_handler) NULL, // -- hole -- 0x16
- (iw_handler) iwctl_giwaplist, // SIOCGIWAPLIST
- (iw_handler) iwctl_siwscan, // SIOCSIWSCAN
- (iw_handler) iwctl_giwscan, // SIOCGIWSCAN
- (iw_handler) iwctl_siwessid, // SIOCSIWESSID
- (iw_handler) iwctl_giwessid, // SIOCGIWESSID
- (iw_handler) NULL, // SIOCSIWNICKN
- (iw_handler) NULL, // SIOCGIWNICKN
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) iwctl_siwrate, // SIOCSIWRATE 0x20
- (iw_handler) iwctl_giwrate, // SIOCGIWRATE
- (iw_handler) iwctl_siwrts, // SIOCSIWRTS
- (iw_handler) iwctl_giwrts, // SIOCGIWRTS
- (iw_handler) iwctl_siwfrag, // SIOCSIWFRAG
- (iw_handler) iwctl_giwfrag, // SIOCGIWFRAG
- (iw_handler) NULL, // SIOCSIWTXPOW
- (iw_handler) NULL, // SIOCGIWTXPOW
- (iw_handler) iwctl_siwretry, // SIOCSIWRETRY
- (iw_handler) iwctl_giwretry, // SIOCGIWRETRY
- (iw_handler) iwctl_siwencode, // SIOCSIWENCODE
- (iw_handler) iwctl_giwencode, // SIOCGIWENCODE
- (iw_handler) iwctl_siwpower, // SIOCSIWPOWER
- (iw_handler) iwctl_giwpower, // SIOCGIWPOWER
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) iwctl_siwgenie, // SIOCSIWGENIE
- (iw_handler) iwctl_giwgenie, // SIOCGIWGENIE
- (iw_handler) iwctl_siwauth, // SIOCSIWAUTH
- (iw_handler) iwctl_giwauth, // SIOCGIWAUTH
- (iw_handler) iwctl_siwencodeext, // SIOCSIWENCODEEXT
- (iw_handler) iwctl_giwencodeext, // SIOCGIWENCODEEXT
- (iw_handler) NULL, // SIOCSIWPMKSA
- (iw_handler) NULL, // -- hole --
-
-};
-*/
-
static const iw_handler iwctl_handler[] =
{
- (iw_handler) iwctl_commit, // SIOCSIWCOMMIT
+ (iw_handler) NULL, /* SIOCSIWCOMMIT */
(iw_handler) NULL, // SIOCGIWNAME
(iw_handler) NULL, // SIOCSIWNWID
(iw_handler) NULL, // SIOCGIWNWID
@@ -2063,13 +1865,9 @@ const struct iw_handler_def iwctl_handler_def =
{
.get_wireless_stats = &iwctl_get_wireless_stats,
.num_standard = sizeof(iwctl_handler)/sizeof(iw_handler),
-// .num_private = sizeof(iwctl_private_handler)/sizeof(iw_handler),
-// .num_private_args = sizeof(iwctl_private_args)/sizeof(struct iw_priv_args),
.num_private = 0,
.num_private_args = 0,
.standard = (iw_handler *) iwctl_handler,
-// .private = (iw_handler *) iwctl_private_handler,
-// .private_args = (struct iw_priv_args *)iwctl_private_args,
.private = NULL,
.private_args = NULL,
};
diff --git a/drivers/staging/vt6656/iwctl.h b/drivers/staging/vt6656/iwctl.h
index 10a240e65012..0c6e0496779b 100644
--- a/drivers/staging/vt6656/iwctl.h
+++ b/drivers/staging/vt6656/iwctl.h
@@ -46,13 +46,13 @@ int iwctl_siwap(struct net_device *dev,
struct sockaddr *wrq,
char *extra);
-int iwctl_giwrange(struct net_device *dev,
+void iwctl_giwrange(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra);
-int iwctl_giwmode(struct net_device *dev,
+void iwctl_giwmode(struct net_device *dev,
struct iw_request_info *info,
__u32 *wmode,
char *extra);
@@ -97,7 +97,7 @@ int iwctl_siwessid(struct net_device *dev,
struct iw_point *wrq,
char *extra);
-int iwctl_giwessid(struct net_device *dev,
+void iwctl_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *wrq,
char *extra);
@@ -107,16 +107,13 @@ int iwctl_siwrate(struct net_device *dev,
struct iw_param *wrq,
char *extra);
-int iwctl_giwrate(struct net_device *dev,
+void iwctl_giwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
char *extra);
int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
+ struct iw_param *wrq);
int iwctl_giwrts(struct net_device *dev,
struct iw_request_info *info,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 6a708f447651..763e028a5cc5 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1657,8 +1657,8 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
{
char essid[IW_ESSID_MAX_SIZE+1];
if (wrq->u.essid.pointer) {
- rc = iwctl_giwessid(dev, NULL,
- &(wrq->u.essid), essid);
+ iwctl_giwessid(dev, NULL,
+ &(wrq->u.essid), essid);
if (copy_to_user(wrq->u.essid.pointer,
essid,
wrq->u.essid.length) )
@@ -1698,14 +1698,13 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
// Get the current bit-rate
case SIOCGIWRATE:
-
- rc = iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
+ iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
break;
// Set the desired RTS threshold
case SIOCSIWRTS:
- rc = iwctl_siwrts(dev, NULL, &(wrq->u.rts), NULL);
+ rc = iwctl_siwrts(dev, &(wrq->u.rts));
break;
// Get the current RTS threshold
@@ -1733,7 +1732,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
// Get mode of operation
case SIOCGIWMODE:
- rc = iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
+ iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
break;
// Set WEP keys and mode
@@ -1811,7 +1810,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
{
struct iw_range range;
- rc = iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range);
+ iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range);
if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range)))
rc = -EFAULT;
}
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 2fa4f845a755..5435e8205b2c 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -46,23 +46,18 @@
#define VIAWGET_WPA_MAX_BUF_SIZE 1024
-
-
static const int frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484
};
+
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
-//static int msglevel =MSG_LEVEL_DEBUG;
-static int msglevel =MSG_LEVEL_INFO;
+static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
-
-
-
/*--------------------- Export Variables --------------------------*/
static void wpadev_setup(struct net_device *dev)
{
@@ -72,9 +67,9 @@ static void wpadev_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000;
- memset(dev->broadcast,0xFF, ETH_ALEN);
+ memset(dev->broadcast, 0xFF, ETH_ALEN);
- dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
/*
@@ -90,45 +85,43 @@ static void wpadev_setup(struct net_device *dev)
* Return Value:
*
*/
-
static int wpa_init_wpadev(PSDevice pDevice)
{
- PSDevice wpadev_priv;
+ PSDevice wpadev_priv;
struct net_device *dev = pDevice->dev;
- int ret=0;
+ int ret = 0;
pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup);
if (pDevice->wpadev == NULL)
return -ENOMEM;
- wpadev_priv = netdev_priv(pDevice->wpadev);
- *wpadev_priv = *pDevice;
+ wpadev_priv = netdev_priv(pDevice->wpadev);
+ *wpadev_priv = *pDevice;
memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
- pDevice->wpadev->base_addr = dev->base_addr;
+ pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
pDevice->wpadev->mem_end = dev->mem_end;
ret = register_netdev(pDevice->wpadev);
if (ret) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdev(WPA) failed!\n",
- dev->name);
+ dev->name);
free_netdev(pDevice->wpadev);
return -1;
}
if (pDevice->skb == NULL) {
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pDevice->skb == NULL)
- return -ENOMEM;
- }
+ pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
+ if (pDevice->skb == NULL)
+ return -ENOMEM;
+ }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
- dev->name, pDevice->wpadev->name);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
+ dev->name, pDevice->wpadev->name);
return 0;
}
-
/*
* Description:
* unregister net_device (wpadev)
@@ -141,29 +134,24 @@ static int wpa_init_wpadev(PSDevice pDevice)
* Return Value:
*
*/
-
static int wpa_release_wpadev(PSDevice pDevice)
{
- if (pDevice->skb) {
- dev_kfree_skb(pDevice->skb);
- pDevice->skb = NULL;
- }
+ if (pDevice->skb) {
+ dev_kfree_skb(pDevice->skb);
+ pDevice->skb = NULL;
+ }
- if (pDevice->wpadev) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
- pDevice->dev->name, pDevice->wpadev->name);
- unregister_netdev(pDevice->wpadev);
- free_netdev(pDevice->wpadev);
- pDevice->wpadev = NULL;
- }
+ if (pDevice->wpadev) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
+ pDevice->dev->name, pDevice->wpadev->name);
+ unregister_netdev(pDevice->wpadev);
+ free_netdev(pDevice->wpadev);
+ pDevice->wpadev = NULL;
+ }
return 0;
}
-
-
-
-
/*
* Description:
* Set enable/disable dev for wpa supplicant deamon
@@ -177,13 +165,11 @@ static int wpa_release_wpadev(PSDevice pDevice)
* Return Value:
*
*/
-
int wpa_set_wpadev(PSDevice pDevice, int val)
{
if (val)
return wpa_init_wpadev(pDevice);
- else
- return wpa_release_wpadev(pDevice);
+ return wpa_release_wpadev(pDevice);
}
/*
@@ -199,245 +185,217 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
* Return Value:
*
*/
-
int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel)
{
- struct viawget_wpa_param *param=ctx;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- DWORD dwKeyIndex = 0;
- BYTE abyKey[MAX_KEY_LEN];
- BYTE abySeq[MAX_KEY_LEN];
- QWORD KeyRSC;
-// NDIS_802_11_KEY_RSC KeyRSC;
- BYTE byKeyDecMode = KEY_CTL_WEP;
+ struct viawget_wpa_param *param = ctx;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ DWORD dwKeyIndex = 0;
+ BYTE abyKey[MAX_KEY_LEN];
+ BYTE abySeq[MAX_KEY_LEN];
+ QWORD KeyRSC;
+ BYTE byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
- int uu, ii;
-
+ int uu;
+ int ii;
if (param->u.wpa_key.alg_name > WPA_ALG_CCMP)
return -EINVAL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n", param->u.wpa_key.alg_name);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n",
+ param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pDevice->bEncryptionEnable = FALSE;
- pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = FALSE;
- for (uu=0; uu<MAX_KEY_TABLE; uu++) {
- MACvDisableKeyEntry(pDevice, uu);
- }
- return ret;
- }
+ pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
+ pDevice->bEncryptionEnable = FALSE;
+ pDevice->byKeyIndex = 0;
+ pDevice->bTransmitKey = FALSE;
+ for (uu=0; uu<MAX_KEY_TABLE; uu++) {
+ MACvDisableKeyEntry(pDevice, uu);
+ }
+ return ret;
+ }
if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
return -EINVAL;
- spin_unlock_irq(&pDevice->lock);
- if(param->u.wpa_key.key && fcpfkernel) {
- memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
- }
- else {
- if (param->u.wpa_key.key &&
- copy_from_user(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
+ spin_unlock_irq(&pDevice->lock);
+ if (param->u.wpa_key.key && fcpfkernel) {
+ memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
+ } else {
+ if (param->u.wpa_key.key &&
+ copy_from_user(&abyKey[0], param->u.wpa_key.key,
+ param->u.wpa_key.key_len)) {
+ spin_lock_irq(&pDevice->lock);
+ return -EINVAL;
+ }
}
- }
- spin_lock_irq(&pDevice->lock);
+ spin_lock_irq(&pDevice->lock);
- dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
+ dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
- if (dwKeyIndex > 3) {
- return -EINVAL;
- }
- else {
- if (param->u.wpa_key.set_tx) {
- pDevice->byKeyIndex = (BYTE)dwKeyIndex;
- pDevice->bTransmitKey = TRUE;
- dwKeyIndex |= (1 << 31);
- }
- KeybSetDefaultKey( pDevice,
- &(pDevice->sKey),
- dwKeyIndex & ~(BIT30 | USE_KEYRSC),
- param->u.wpa_key.key_len,
- NULL,
- abyKey,
- KEY_CTL_WEP
- );
-
- }
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = TRUE;
- return ret;
+ if (dwKeyIndex > 3) {
+ return -EINVAL;
+ } else {
+ if (param->u.wpa_key.set_tx) {
+ pDevice->byKeyIndex = (BYTE)dwKeyIndex;
+ pDevice->bTransmitKey = TRUE;
+ dwKeyIndex |= (1 << 31);
+ }
+ KeybSetDefaultKey( pDevice,
+ &(pDevice->sKey),
+ dwKeyIndex & ~(BIT30 | USE_KEYRSC),
+ param->u.wpa_key.key_len,
+ NULL,
+ abyKey,
+ KEY_CTL_WEP
+ );
+
+ }
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ pDevice->bEncryptionEnable = TRUE;
+ return ret;
}
if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq))
return -EINVAL;
- spin_unlock_irq(&pDevice->lock);
- if(param->u.wpa_key.seq && fcpfkernel) {
- memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
- }
- else {
- if (param->u.wpa_key.seq &&
- copy_from_user(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
- }
+ spin_unlock_irq(&pDevice->lock);
+ if (param->u.wpa_key.seq && fcpfkernel) {
+ memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
+ } else {
+ if (param->u.wpa_key.seq &&
+ copy_from_user(&abySeq[0], param->u.wpa_key.seq,
+ param->u.wpa_key.seq_len)) {
+ spin_lock_irq(&pDevice->lock);
+ return -EINVAL;
+ }
}
spin_lock_irq(&pDevice->lock);
if (param->u.wpa_key.seq_len > 0) {
for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) {
- if (ii < 4)
- LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
- else
- HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
- //KeyRSC |= (abySeq[ii] << (ii * 8));
+ if (ii < 4)
+ LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
+ else
+ HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
}
dwKeyIndex |= 1 << 29;
}
- if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
- return -EINVAL;
- }
+ if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
+ return -EINVAL;
+ }
if (param->u.wpa_key.alg_name == WPA_ALG_TKIP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- }
+ pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
+ }
if (param->u.wpa_key.alg_name == WPA_ALG_CCMP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- }
+ pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
+ }
if (param->u.wpa_key.set_tx)
dwKeyIndex |= (1 << 31);
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
- byKeyDecMode = KEY_CTL_CCMP;
- else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
- byKeyDecMode = KEY_CTL_TKIP;
- else
- byKeyDecMode = KEY_CTL_WEP;
-
- // Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (param->u.wpa_key.key_len == MAX_KEY_LEN)
- byKeyDecMode = KEY_CTL_TKIP;
- else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- }
-
- // Check TKIP key length
- if ((byKeyDecMode == KEY_CTL_TKIP) &&
- (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
- // TKIP Key must be 256 bits
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - TKIP Key must be 256 bits\n"));
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
- return -EINVAL;
- }
- // Check AES key length
- if ((byKeyDecMode == KEY_CTL_CCMP) &&
- (param->u.wpa_key.key_len != AES_KEY_LEN)) {
- // AES Key must be 128 bits
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
- return -EINVAL;
- }
+ if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
+ byKeyDecMode = KEY_CTL_CCMP;
+ else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
+ byKeyDecMode = KEY_CTL_TKIP;
+ else
+ byKeyDecMode = KEY_CTL_WEP;
+
+ // Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
+ if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
+ if (param->u.wpa_key.key_len == MAX_KEY_LEN)
+ byKeyDecMode = KEY_CTL_TKIP;
+ else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
+ if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
+ byKeyDecMode = KEY_CTL_WEP;
+ }
- if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
- /* if broadcast, set the key as every key entry's group key */
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
-
- if ((KeybSetAllGroupKey(pDevice,
- &(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- byKeyDecMode
- ) == TRUE) &&
- (KeybSetDefaultKey(pDevice,
- &(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- byKeyDecMode
- ) == TRUE) ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
-
- } else {
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -KeybSetDefaultKey Fail.0\n"));
- return -EINVAL;
- }
-
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
- // BSSID not 0xffffffffffff
- // Pairwise Key can't be WEP
- if (byKeyDecMode == KEY_CTL_WEP) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
- return -EINVAL;
- }
-
- dwKeyIndex |= (1 << 30); // set pairwise key
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
- return -EINVAL;
- }
- if (KeybSetKey(pDevice,
- &(pDevice->sKey),
- &param->addr[0],
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (PQWORD) &(KeyRSC),
- (PBYTE)abyKey,
- byKeyDecMode
- ) == TRUE) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
-
- } else {
- // Key Table Full
- if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
- //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
- return -EINVAL;
-
- } else {
- // Save Key and configure just before associate/reassociate to BSSID
- // we do not implement now
- return -EINVAL;
- }
- }
- } // BSSID not 0xffffffffffff
- if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
- pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
- pDevice->bTransmitKey = TRUE;
+ // Check TKIP key length
+ if ((byKeyDecMode == KEY_CTL_TKIP) &&
+ (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
+ // TKIP Key must be 256 bits
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
+ return -EINVAL;
}
- pDevice->bEncryptionEnable = TRUE;
+ // Check AES key length
+ if ((byKeyDecMode == KEY_CTL_CCMP) &&
+ (param->u.wpa_key.key_len != AES_KEY_LEN)) {
+ // AES Key must be 128 bits
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
+ return -EINVAL;
+ }
-/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx \n",
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][0],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][1],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][2],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][3],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][4]
- );
-*/
+ if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
+ /* if broadcast, set the key as every key entry's group key */
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
+
+ if ((KeybSetAllGroupKey(pDevice, &(pDevice->sKey), dwKeyIndex,
+ param->u.wpa_key.key_len,
+ (PQWORD) &(KeyRSC),
+ (PBYTE)abyKey,
+ byKeyDecMode
+ ) == TRUE) &&
+ (KeybSetDefaultKey(pDevice,
+ &(pDevice->sKey),
+ dwKeyIndex,
+ param->u.wpa_key.key_len,
+ (PQWORD) &(KeyRSC),
+ (PBYTE)abyKey,
+ byKeyDecMode
+ ) == TRUE) ) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
+ // BSSID not 0xffffffffffff
+ // Pairwise Key can't be WEP
+ if (byKeyDecMode == KEY_CTL_WEP) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
+ return -EINVAL;
+ }
+ dwKeyIndex |= (1 << 30); // set pairwise key
+ if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
+ //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
+ return -EINVAL;
+ }
+ if (KeybSetKey(pDevice, &(pDevice->sKey), &param->addr[0],
+ dwKeyIndex, param->u.wpa_key.key_len,
+ (PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode
+ ) == TRUE) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
+ } else {
+ // Key Table Full
+ if (!compare_ether_addr(&param->addr[0], pDevice->abyBSSID)) {
+ //DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
+ return -EINVAL;
+ } else {
+ // Save Key and configure just before associate/reassociate to BSSID
+ // we do not implement now
+ return -EINVAL;
+ }
+ }
+ } // BSSID not 0xffffffffffff
+ if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
+ pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
+ pDevice->bTransmitKey = TRUE;
+ }
+ pDevice->bEncryptionEnable = TRUE;
return ret;
-
}
@@ -454,23 +412,17 @@ int wpa_set_wpadev(PSDevice pDevice, int val)
* Return Value:
*
*/
-
-static int wpa_set_wpa(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_wpa(PSDevice pDevice, struct viawget_wpa_param *param)
{
-
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- pMgmt->bShareKeyAlgorithm = FALSE;
+ pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
+ pMgmt->bShareKeyAlgorithm = FALSE;
- return ret;
+ return ret;
}
-
-
-
/*
* Description:
* set disassociate
@@ -484,25 +436,21 @@ static int wpa_set_wpa(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_set_disassociate(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_disassociate(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
- spin_lock_irq(&pDevice->lock);
- if (pDevice->bLinkPass) {
- if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
- bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL);
- }
- spin_unlock_irq(&pDevice->lock);
+ spin_lock_irq(&pDevice->lock);
+ if (pDevice->bLinkPass) {
+ if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
+ bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ }
+ spin_unlock_irq(&pDevice->lock);
- return ret;
+ return ret;
}
-
-
/*
* Description:
* enable scan process
@@ -516,36 +464,30 @@ static int wpa_set_disassociate(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_set_scan(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
int ret = 0;
/**set ap_scan=1&&scan_ssid=1 under hidden ssid mode**/
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
-printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
- param->u.scan_req.ssid,param->u.scan_req.ssid_len);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
+ printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
+ param->u.scan_req.ssid,param->u.scan_req.ssid_len);
// Set the SSID
-memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
-pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
-pItemSSID->byElementID = WLAN_EID_SSID;
-memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
-pItemSSID->len = param->u.scan_req.ssid_len;
-
- spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
- /* bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); */
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- spin_unlock_irq(&pDevice->lock);
-
- return ret;
-}
+ memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSID->byElementID = WLAN_EID_SSID;
+ memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
+ pItemSSID->len = param->u.scan_req.ssid_len;
+ spin_lock_irq(&pDevice->lock);
+ BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ spin_unlock_irq(&pDevice->lock);
+ return ret;
+}
/*
* Description:
@@ -560,19 +502,15 @@ pItemSSID->len = param->u.scan_req.ssid_len;
* Return Value:
*
*/
-
-static int wpa_get_bssid(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_get_bssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID , 6);
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ int ret = 0;
+ memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID, 6);
return ret;
-
}
-
/*
* Description:
* get bssid
@@ -586,24 +524,20 @@ static int wpa_get_bssid(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_get_ssid(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_get_ssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
int ret = 0;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID , pItemSSID->len);
+ memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID, pItemSSID->len);
param->u.wpa_associate.ssid_len = pItemSSID->len;
- return ret;
+ return ret;
}
-
-
/*
* Description:
* get scan results
@@ -617,135 +551,114 @@ static int wpa_get_ssid(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_get_scan(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_get_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
struct viawget_scan_result *scan_buf;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
- PKnownBSS pBSS;
- PBYTE pBuf;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
+ PKnownBSS pBSS;
+ PBYTE pBuf;
int ret = 0;
u16 count = 0;
- u16 ii, jj;
- long ldBm;//James //add
+ u16 ii;
+ u16 jj;
+ long ldBm; //James //add
//******mike:bubble sort by stronger RSSI*****//
+ PBYTE ptempBSS;
- PBYTE ptempBSS;
-
+ ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC);
+ if (ptempBSS == NULL) {
+ printk("bubble sort kmalloc memory fail@@@\n");
+ ret = -ENOMEM;
+ return ret;
+ }
- ptempBSS = kmalloc(sizeof(KnownBSS), (int)GFP_ATOMIC);
-
- if (ptempBSS == NULL) {
-
- printk("bubble sort kmalloc memory fail@@@\n");
-
- ret = -ENOMEM;
-
- return ret;
-
- }
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
-
- for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
-
- if ((pMgmt->sBSSList[jj].bActive != TRUE) ||
-
- ((pMgmt->sBSSList[jj].uRSSI>pMgmt->sBSSList[jj+1].uRSSI) &&(pMgmt->sBSSList[jj+1].bActive!=FALSE))) {
-
- memcpy(ptempBSS,&pMgmt->sBSSList[jj],sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj],&pMgmt->sBSSList[jj+1],sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj+1],ptempBSS,sizeof(KnownBSS));
-
- }
-
- }
-
- }
-
- kfree(ptempBSS);
-
- // printk("bubble sort result:\n");
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
+ if ((pMgmt->sBSSList[jj].bActive != TRUE)
+ || ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI)
+ && (pMgmt->sBSSList[jj + 1].bActive != FALSE))) {
+ memcpy(ptempBSS,&pMgmt->sBSSList[jj], sizeof(KnownBSS));
+ memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1],
+ sizeof(KnownBSS));
+ memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS));
+ }
+ }
+ }
+ kfree(ptempBSS);
count = 0;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
- count++;
- }
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSS = &(pMgmt->sBSSList[ii]);
+ if (!pBSS->bActive)
+ continue;
+ count++;
+ }
- pBuf = kcalloc(count, sizeof(struct viawget_scan_result), (int)GFP_ATOMIC);
+ pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC);
- if (pBuf == NULL) {
- ret = -ENOMEM;
- return ret;
- }
- scan_buf = (struct viawget_scan_result *)pBuf;
+ if (pBuf == NULL) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ scan_buf = (struct viawget_scan_result *)pBuf;
pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; ii < MAX_BSS_NUM ; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (pBSS->bActive) {
- if (jj >= count)
- break;
- memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
- scan_buf->ssid_len = pItemSSID->len;
- scan_buf->freq = frequency_list[pBSS->uChannel-1];
- scan_buf->caps = pBSS->wCapInfo; //DavidWang for sharemode
-
- RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
- if(-ldBm<50){
+ for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSS = &(pMgmt->sBSSList[ii]);
+ if (pBSS->bActive) {
+ if (jj >= count)
+ break;
+ memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
+ pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
+ memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
+ scan_buf->ssid_len = pItemSSID->len;
+ scan_buf->freq = frequency_list[pBSS->uChannel-1];
+ scan_buf->caps = pBSS->wCapInfo; // DavidWang for sharemode
+
+ RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
+ if (-ldBm < 50)
scan_buf->qual = 100;
- }else if(-ldBm > 90) {
- scan_buf->qual = 0;
- }else {
+ else if (-ldBm > 90)
+ scan_buf->qual = 0;
+ else
scan_buf->qual=(40-(-ldBm-50))*100/40;
- }
//James
- //scan_buf->caps = pBSS->wCapInfo;
- //scan_buf->qual =
- scan_buf->noise = 0;
- scan_buf->level = ldBm;
-
- //scan_buf->maxrate =
- if (pBSS->wWPALen != 0) {
- scan_buf->wpa_ie_len = pBSS->wWPALen;
- memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
- }
- if (pBSS->wRSNLen != 0) {
- scan_buf->rsn_ie_len = pBSS->wRSNLen;
- memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
- }
- scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
- jj ++;
- }
- }
+ //scan_buf->caps = pBSS->wCapInfo;
+ //scan_buf->qual =
+ scan_buf->noise = 0;
+ scan_buf->level = ldBm;
+
+ //scan_buf->maxrate =
+ if (pBSS->wWPALen != 0) {
+ scan_buf->wpa_ie_len = pBSS->wWPALen;
+ memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
+ }
+ if (pBSS->wRSNLen != 0) {
+ scan_buf->rsn_ie_len = pBSS->wRSNLen;
+ memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
+ }
+ scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
+ jj ++;
+ }
+ }
- if (jj < count)
- count = jj;
+ if (jj < count)
+ count = jj;
- if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count)) {
+ if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count))
ret = -EFAULT;
- }
+
param->u.scan_results.scan_count = count;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count);
- kfree(pBuf);
- return ret;
+ kfree(pBuf);
+ return ret;
}
-
-
/*
* Description:
* set associate with AP
@@ -759,25 +672,23 @@ static int wpa_get_scan(PSDevice pDevice,
* Return Value:
*
*/
-
-static int wpa_set_associate(PSDevice pDevice,
- struct viawget_wpa_param *param)
+static int wpa_set_associate(PSDevice pDevice, struct viawget_wpa_param *param)
{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
- BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- BYTE abyWPAIE[64];
- int ret = 0;
- BOOL bwepEnabled=FALSE;
+ PSMgmtObject pMgmt = &pDevice->sMgmtObj;
+ PWLAN_IE_SSID pItemSSID;
+ BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ BYTE abyWPAIE[64];
+ int ret = 0;
+ BOOL bwepEnabled=FALSE;
// set key type & algorithm
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); //Davidwang
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); // Davidwang
if (param->u.wpa_associate.wpa_ie) {
if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
@@ -789,25 +700,25 @@ static int wpa_set_associate(PSDevice pDevice,
}
if (param->u.wpa_associate.mode == 1)
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
+ pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
else
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
+ pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
// set bssid
- if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
- memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
- // set ssid
+ if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
+ memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
+ // set ssid
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSID->byElementID = WLAN_EID_SSID;
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSID->byElementID = WLAN_EID_SSID;
pItemSSID->len = param->u.wpa_associate.ssid_len;
memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len);
- if (param->u.wpa_associate.wpa_ie_len == 0) {
- if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
- pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
+ if (param->u.wpa_associate.wpa_ie_len == 0) {
+ if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
+ pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
+ else
+ pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
} else if (abyWPAIE[0] == RSN_INFO_ELEM) {
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
@@ -817,9 +728,9 @@ static int wpa_set_associate(PSDevice pDevice,
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE)
pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
+ pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA;
+ pMgmt->eAuthenMode = WMAC_AUTH_WPA;
}
switch (param->u.wpa_associate.pairwise_suite) {
@@ -833,7 +744,6 @@ static int wpa_set_associate(PSDevice pDevice,
case CIPHER_WEP104:
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
bwepEnabled = TRUE;
- // printk("****************wpa_set_associate:set CIPHER_WEP40_104\n");
break;
case CIPHER_NONE:
if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
@@ -845,70 +755,64 @@ static int wpa_set_associate(PSDevice pDevice,
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
- pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
- // if ((pMgmt->Roam_dbm > 40)&&(pMgmt->Roam_dbm<80))
- // pDevice->bEnableRoaming = TRUE;
-
- if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { //@wep-sharekey
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pMgmt->bShareKeyAlgorithm = TRUE;
- }
- else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
- if(bwepEnabled==TRUE) { //@open-wep
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- }
- else { //@only open
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
+ pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
+ if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { // @wep-sharekey
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ pMgmt->bShareKeyAlgorithm = TRUE;
+ } else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
+ if(bwepEnabled==TRUE) { //@open-wep
+ pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
+ } else {
+ // @only open
+ pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
- }
-//mike save old encryption status
+ }
+ // mike save old encryption status
pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
- if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
- pDevice->bEncryptionEnable = TRUE;
- else
- pDevice->bEncryptionEnable = FALSE;
-
- if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
- ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
- //mike re-comment:open-wep && sharekey-wep needn't do initial key!!
-
- }
- else
- KeyvInitTable(pDevice,&pDevice->sKey);
+ if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
+ pDevice->bEncryptionEnable = TRUE;
+ else
+ pDevice->bEncryptionEnable = FALSE;
- spin_lock_irq(&pDevice->lock);
- pDevice->bLinkPass = FALSE;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
+ if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
+ ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
+ // mike re-comment:open-wep && sharekey-wep needn't do initial key!!
+ } else {
+ KeyvInitTable(pDevice,&pDevice->sKey);
+ }
-/*******search if ap_scan=2 ,which is associating request in hidden ssid mode ****/
-{
- PKnownBSS pCurr = NULL;
- pCurr = BSSpSearchBSSList(pDevice,
- pMgmt->abyDesireBSSID,
- pMgmt->abyDesireSSID,
- pDevice->eConfigPHYMode
- );
-
- if (pCurr == NULL){
- printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- }
-}
+ spin_lock_irq(&pDevice->lock);
+ pDevice->bLinkPass = FALSE;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ memset(pMgmt->abyCurrBSSID, 0, 6);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ netif_stop_queue(pDevice->dev);
+
+/******* search if ap_scan=2, which is associating request in hidden ssid mode ****/
+ {
+ PKnownBSS pCurr = NULL;
+ pCurr = BSSpSearchBSSList(pDevice,
+ pMgmt->abyDesireBSSID,
+ pMgmt->abyDesireSSID,
+ pDevice->eConfigPHYMode
+ );
+
+ if (pCurr == NULL){
+ printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
+ bScheduleCommand((void *)pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ }
+ }
/****************************************************************/
- bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
+ spin_unlock_irq(&pDevice->lock);
- return ret;
+ return ret;
}
-
/*
* Description:
* wpa_ioctl main function supported for wpa supplicant
@@ -922,7 +826,6 @@ static int wpa_set_associate(PSDevice pDevice,
* Return Value:
*
*/
-
int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
{
struct viawget_wpa_param *param;
@@ -930,10 +833,10 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
int wpa_ioctl = 0;
if (p->length < sizeof(struct viawget_wpa_param) ||
- p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
+ p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = kmalloc((int)p->length, (int)GFP_KERNEL);
+ param = kmalloc((int)p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
@@ -944,63 +847,63 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
switch (param->cmd) {
case VIAWGET_SET_WPA:
- ret = wpa_set_wpa(pDevice, param);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
+ ret = wpa_set_wpa(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
break;
case VIAWGET_SET_KEY:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
- spin_lock_irq(&pDevice->lock);
- ret = wpa_set_keys(pDevice, param, FALSE);
- spin_unlock_irq(&pDevice->lock);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
+ spin_lock_irq(&pDevice->lock);
+ ret = wpa_set_keys(pDevice, param, FALSE);
+ spin_unlock_irq(&pDevice->lock);
break;
case VIAWGET_SET_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
- ret = wpa_set_scan(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
+ ret = wpa_set_scan(pDevice, param);
break;
case VIAWGET_GET_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
- ret = wpa_get_scan(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
+ ret = wpa_get_scan(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_SSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
- ret = wpa_get_ssid(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
+ ret = wpa_get_ssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_BSSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
- ret = wpa_get_bssid(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
+ ret = wpa_get_bssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_SET_ASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
- ret = wpa_set_associate(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
+ ret = wpa_set_associate(pDevice, param);
break;
case VIAWGET_SET_DISASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
- ret = wpa_set_disassociate(pDevice, param);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
+ ret = wpa_set_disassociate(pDevice, param);
break;
case VIAWGET_SET_DROP_UNENCRYPT:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
break;
- case VIAWGET_SET_DEAUTHENTICATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
+ case VIAWGET_SET_DEAUTHENTICATE:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
- param->cmd);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
+ param->cmd);
+ kfree(param);
return -EOPNOTSUPP;
- break;
}
if ((ret == 0) && wpa_ioctl) {
@@ -1012,7 +915,5 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
out:
kfree(param);
-
return ret;
}
-
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 6675c8226cef..c3bb05dd744f 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -406,6 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
/* SSID */
req->ssid.status = P80211ENUM_msgitem_status_data_ok;
req->ssid.data.len = le16_to_cpu(item->ssid.len);
+ req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN);
memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
/* supported rates */
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 35f7b2a485e1..e828fd403c35 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -7,47 +7,32 @@
#include "XGIfb.h"
#include "vb_struct.h"
+#include "../../video/sis/sis.h"
#include "vb_def.h"
#define XGIFAIL(x) do { printk(x "\n"); return -EINVAL; } while (0)
-#ifndef PCI_VENDOR_ID_XG
-#define PCI_VENDOR_ID_XG 0x18CA
+#ifndef PCI_DEVICE_ID_XGI_41
+#define PCI_DEVICE_ID_XGI_41 0x041
#endif
-
-#ifndef PCI_DEVICE_ID_XG_40
-#define PCI_DEVICE_ID_XG_40 0x040
-#endif
-#ifndef PCI_DEVICE_ID_XG_41
-#define PCI_DEVICE_ID_XG_41 0x041
-#endif
-#ifndef PCI_DEVICE_ID_XG_42
-#define PCI_DEVICE_ID_XG_42 0x042
+#ifndef PCI_DEVICE_ID_XGI_42
+#define PCI_DEVICE_ID_XGI_42 0x042
#endif
-#ifndef PCI_DEVICE_ID_XG_20
-#define PCI_DEVICE_ID_XG_20 0x020
-#endif
-#ifndef PCI_DEVICE_ID_XG_27
-#define PCI_DEVICE_ID_XG_27 0x027
+#ifndef PCI_DEVICE_ID_XGI_27
+#define PCI_DEVICE_ID_XGI_27 0x027
#endif
static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20)},
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27)},
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40)},
- {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_20)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_27)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_40)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_42)},
{0}
};
MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
/* To be included in fb.h */
-#ifndef FB_ACCEL_XGI_XABRE
-#define FB_ACCEL_XGI_XABRE 41 /* XGI 330 ("Xabre") */
-#endif
-
-#define SEQ_DATA 0x15
-
#define XGISR (xgifb_info->dev_info.P3c4)
#define XGICR (xgifb_info->dev_info.P3d4)
#define XGIDACA (xgifb_info->dev_info.P3c8)
@@ -60,12 +45,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define XGIDAC2A XGIPART5
#define XGIDAC2D (XGIPART5 + 1)
-#define IND_XGI_PASSWORD 0x05 /* SRs */
-#define IND_XGI_RAMDAC_CONTROL 0x07
-#define IND_XGI_DRAM_SIZE 0x14
-#define IND_XGI_MODULE_ENABLE 0x1E
-#define IND_XGI_PCI_ADDRESS_SET 0x20
-
#define IND_XGI_SCRATCH_REG_CR30 0x30 /* CRs */
#define IND_XGI_SCRATCH_REG_CR31 0x31
#define IND_XGI_SCRATCH_REG_CR32 0x32
@@ -73,10 +52,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define IND_XGI_LCD_PANEL 0x36
#define IND_XGI_SCRATCH_REG_CR37 0x37
-#define IND_XGI_CRT2_WRITE_ENABLE_315 0x2F
-
-#define XGI_PASSWORD 0x86 /* SR05 */
-
#define XGI_DRAM_SIZE_MASK 0xF0 /*SR14 */
#define XGI_DRAM_SIZE_1MB 0x00
#define XGI_DRAM_SIZE_2MB 0x01
@@ -88,37 +63,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
#define XGI_DRAM_SIZE_128MB 0x07
#define XGI_DRAM_SIZE_256MB 0x08
-#define XGI_ENABLE_2D 0x40 /* SR1E */
-
-#define XGI_MEM_MAP_IO_ENABLE 0x01 /* SR20 */
-#define XGI_PCI_ADDR_ENABLE 0x80
-
-#define XGI_SIMULTANEOUS_VIEW_ENABLE 0x01 /* CR30 */
-#define XGI_VB_OUTPUT_COMPOSITE 0x04
-#define XGI_VB_OUTPUT_SVIDEO 0x08
-#define XGI_VB_OUTPUT_SCART 0x10
-#define XGI_VB_OUTPUT_LCD 0x20
-#define XGI_VB_OUTPUT_CRT2 0x40
-#define XGI_VB_OUTPUT_HIVISION 0x80
-
-#define XGI_VB_OUTPUT_DISABLE 0x20 /* CR31 */
-#define XGI_DRIVER_MODE 0x40
-
-#define XGI_VB_COMPOSITE 0x01 /* CR32 */
-#define XGI_VB_SVIDEO 0x02
-#define XGI_VB_SCART 0x04
-#define XGI_VB_LCD 0x08
-#define XGI_VB_CRT2 0x10
-#define XGI_CRT1 0x20
-#define XGI_VB_HIVISION 0x40
-#define XGI_VB_YPBPR 0x80
-#define XGI_VB_TV (XGI_VB_COMPOSITE | XGI_VB_SVIDEO | \
- XGI_VB_SCART | XGI_VB_HIVISION|XGI_VB_YPBPR)
-
-#define XGI_EXTERNAL_CHIP_MASK 0x0E /* CR37 */
-#define XGI310_EXTERNAL_CHIP_LVDS 0x02 /* in CR37 << 1 ! */
-#define XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL 0x03 /* in CR37 << 1 ! */
-
/* ------------------- Global Variables ----------------------------- */
/* display status */
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 2502c49c9c5b..21c037827de4 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -4,6 +4,8 @@
* Base on TW's sis fbdev code.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
/* #include <linux/config.h> */
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -55,7 +57,7 @@ static unsigned int refresh_rate;
#undef XGIFBDEBUG
#ifdef XGIFBDEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#define DPRINTK(fmt, args...) pr_debug("%s: " fmt, __func__ , ## args)
#else
#define DPRINTK(fmt, args...)
#endif
@@ -142,7 +144,7 @@ static inline void dumpVGAReg(void)
#if 1
#define DEBUGPRN(x)
#else
-#define DEBUGPRN(x) printk(KERN_INFO x "\n");
+#define DEBUGPRN(x) pr_info(x "\n");
#endif
/* --------------- Hardware Access Routines -------------------------- */
@@ -369,15 +371,15 @@ static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
XGI_Pr->P3c9 = BaseAddr + 0x19;
XGI_Pr->P3da = BaseAddr + 0x2A;
/* Digital video interface registers (LCD) */
- XGI_Pr->Part1Port = BaseAddr + XGI_CRT2_PORT_04;
+ XGI_Pr->Part1Port = BaseAddr + SIS_CRT2_PORT_04;
/* 301 TV Encoder registers */
- XGI_Pr->Part2Port = BaseAddr + XGI_CRT2_PORT_10;
+ XGI_Pr->Part2Port = BaseAddr + SIS_CRT2_PORT_10;
/* 301 Macrovision registers */
- XGI_Pr->Part3Port = BaseAddr + XGI_CRT2_PORT_12;
+ XGI_Pr->Part3Port = BaseAddr + SIS_CRT2_PORT_12;
/* 301 VGA2 (and LCD) registers */
- XGI_Pr->Part4Port = BaseAddr + XGI_CRT2_PORT_14;
+ XGI_Pr->Part4Port = BaseAddr + SIS_CRT2_PORT_14;
/* 301 palette address port registers */
- XGI_Pr->Part5Port = BaseAddr + XGI_CRT2_PORT_14 + 2;
+ XGI_Pr->Part5Port = BaseAddr + SIS_CRT2_PORT_14 + 2;
}
@@ -424,7 +426,7 @@ static void XGIfb_search_mode(struct xgifb_video_info *xgifb_info,
i++;
}
if (!j)
- printk(KERN_INFO "XGIfb: Invalid mode '%s'\n", name);
+ pr_info("Invalid mode '%s'\n", name);
}
static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
@@ -449,7 +451,7 @@ static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
invalid:
if (!j)
- printk(KERN_INFO "XGIfb: Invalid VESA mode 0x%x'\n", vesamode);
+ pr_info("Invalid VESA mode 0x%x'\n", vesamode);
}
static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
@@ -526,12 +528,6 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
xres = 1600;
yres = 1200;
break;
- /* case LCD_320x480: */ /* TW: FSTN */
- /*
- xres = 320;
- yres = 480;
- break;
- */
default:
xres = 0;
yres = 0;
@@ -692,7 +688,7 @@ static void XGIfb_search_crt2type(const char *name)
i++;
}
if (XGIfb_crt2type < 0)
- printk(KERN_INFO "XGIfb: Invalid CRT2 type: %s\n", name);
+ pr_info("Invalid CRT2 type: %s\n", name);
}
static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
@@ -742,7 +738,7 @@ static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
if (xgifb_info->rate_idx > 0) {
return xgifb_info->rate_idx;
} else {
- printk(KERN_INFO "XGIfb: Unsupported rate %d for %dx%d\n",
+ pr_info("Unsupported rate %d for %dx%d\n",
rate, xres, yres);
return 0;
}
@@ -811,27 +807,27 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
switch (xgifb_info->display2) {
case XGIFB_DISP_CRT:
- cr30 = (XGI_VB_OUTPUT_CRT2 | XGI_SIMULTANEOUS_VIEW_ENABLE);
- cr31 |= XGI_DRIVER_MODE;
+ cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr31 |= SIS_DRIVER_MODE;
break;
case XGIFB_DISP_LCD:
- cr30 = (XGI_VB_OUTPUT_LCD | XGI_SIMULTANEOUS_VIEW_ENABLE);
- cr31 |= XGI_DRIVER_MODE;
+ cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr31 |= SIS_DRIVER_MODE;
break;
case XGIFB_DISP_TV:
if (xgifb_info->TV_type == TVMODE_HIVISION)
- cr30 = (XGI_VB_OUTPUT_HIVISION
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = (SIS_VB_OUTPUT_HIVISION
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
else if (xgifb_info->TV_plug == TVPLUG_SVIDEO)
- cr30 = (XGI_VB_OUTPUT_SVIDEO
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = (SIS_VB_OUTPUT_SVIDEO
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE)
- cr30 = (XGI_VB_OUTPUT_COMPOSITE
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = (SIS_VB_OUTPUT_COMPOSITE
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
else if (xgifb_info->TV_plug == TVPLUG_SCART)
- cr30 = (XGI_VB_OUTPUT_SCART
- | XGI_SIMULTANEOUS_VIEW_ENABLE);
- cr31 |= XGI_DRIVER_MODE;
+ cr30 = (SIS_VB_OUTPUT_SCART
+ | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr31 |= SIS_DRIVER_MODE;
if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL)
cr31 |= 0x01;
@@ -840,7 +836,7 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
break;
default: /* disable CRT2 */
cr30 = 0x00;
- cr31 |= (XGI_DRIVER_MODE | XGI_VB_OUTPUT_DISABLE);
+ cr31 |= (SIS_DRIVER_MODE | SIS_VB_OUTPUT_DISABLE);
}
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
@@ -854,7 +850,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
u8 reg;
unsigned char doit = 1;
/*
- xgifb_reg_set(XGISR,IND_XGI_PASSWORD,XGI_PASSWORD);
+ xgifb_reg_set(XGISR,IND_SIS_PASSWORD,SIS_PASSWORD);
xgifb_reg_set(XGICR, 0x13, 0x00);
xgifb_reg_and_or(XGISR,0x0E, 0xF0, 0x01);
*test*
@@ -890,7 +886,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
reg |= 0x80;
xgifb_reg_set(XGICR, 0x17, reg);
- xgifb_reg_and(XGISR, IND_XGI_RAMDAC_CONTROL, ~0x04);
+ xgifb_reg_and(XGISR, IND_SIS_RAMDAC_CONTROL, ~0x04);
if (xgifb_info->display2 == XGIFB_DISP_TV &&
xgifb_info->hasVB == HASVB_301) {
@@ -923,7 +919,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
break;
}
xgifb_reg_or(XGIPART1,
- IND_XGI_CRT2_WRITE_ENABLE_315,
+ SIS_CRT2_WENABLE_315,
0x01);
if (xgifb_info->TV_type == TVMODE_NTSC) {
@@ -1118,7 +1114,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (!htotal || !vtotal) {
DPRINTK("XGIfb: Invalid 'var' information\n");
return -EINVAL;
- } printk(KERN_DEBUG "XGIfb: var->pixclock=%d, htotal=%d, vtotal=%d\n",
+ } pr_debug("var->pixclock=%d, htotal=%d, vtotal=%d\n",
var->pixclock, htotal, vtotal);
if (var->pixclock && htotal && vtotal) {
@@ -1130,7 +1126,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->refresh_rate = 60;
}
- printk(KERN_DEBUG "XGIfb: Change mode to %dx%dx%d-%dHz\n",
+ pr_debug("Change mode to %dx%dx%d-%dHz\n",
var->xres,
var->yres,
var->bits_per_pixel,
@@ -1158,7 +1154,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->mode_idx = -1;
if (xgifb_info->mode_idx < 0) {
- printk(KERN_ERR "XGIfb: Mode %dx%dx%d not supported\n",
+ pr_err("Mode %dx%dx%d not supported\n",
var->xres, var->yres, var->bits_per_pixel);
xgifb_info->mode_idx = old_mode;
return -EINVAL;
@@ -1177,14 +1173,14 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (XGISetModeNew(xgifb_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no)
== 0) {
- printk(KERN_ERR "XGIfb: Setting mode[0x%x] failed\n",
+ pr_err("Setting mode[0x%x] failed\n",
XGIbios_mode[xgifb_info->mode_idx].mode_no);
return -EINVAL;
}
info->fix.line_length = ((info->var.xres_virtual
* info->var.bits_per_pixel) >> 6);
- xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
+ xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
xgifb_reg_set(XGICR, 0x13, (info->fix.line_length & 0x00ff));
xgifb_reg_set(XGISR,
@@ -1239,7 +1235,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
break;
default:
xgifb_info->video_cmap_len = 16;
- printk(KERN_ERR "XGIfb: Unsupported depth %d",
+ pr_err("Unsupported depth %d",
xgifb_info->video_bpp);
break;
}
@@ -1273,7 +1269,7 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
break;
}
- xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
+ xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
xgifb_reg_set(XGICR, 0x0D, base & 0xFF);
xgifb_reg_set(XGICR, 0x0C, (base >> 8) & 0xFF);
@@ -1282,7 +1278,7 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
xgifb_reg_and_or(XGISR, 0x37, 0xDF, (base >> 21) & 0x04);
if (xgifb_info->display2 != XGIFB_DISP_NONE) {
- xgifb_reg_or(XGIPART1, IND_XGI_CRT2_WRITE_ENABLE_315, 0x01);
+ xgifb_reg_or(XGIPART1, SIS_CRT2_WENABLE_315, 0x01);
xgifb_reg_set(XGIPART1, 0x06, (base & 0xFF));
xgifb_reg_set(XGIPART1, 0x05, ((base >> 8) & 0xFF));
xgifb_reg_set(XGIPART1, 0x04, ((base >> 16) & 0xFF));
@@ -1387,7 +1383,7 @@ static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
fix->line_length = xgifb_info->video_linelength;
fix->mmio_start = xgifb_info->mmio_base;
fix->mmio_len = xgifb_info->mmio_size;
- fix->accel = FB_ACCEL_XGI_XABRE;
+ fix->accel = FB_ACCEL_SIS_XABRE;
DEBUGPRN("end of get_fix");
return 0;
@@ -1441,7 +1437,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
hrate = (drate * 1000) / htotal;
xgifb_info->refresh_rate =
(unsigned int) (hrate * 2 / vtotal);
- printk(KERN_DEBUG
+ pr_debug(
"%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
"%s: drate=%d, hrate=%d, refresh_rate=%d\n",
__func__, var->pixclock, htotal, vtotal,
@@ -1479,7 +1475,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (!found_mode) {
- printk(KERN_ERR "XGIfb: %dx%dx%d is no valid mode\n",
+ pr_err("%dx%dx%d is no valid mode\n",
var->xres, var->yres, var->bits_per_pixel);
search_idx = 0;
while (XGIbios_mode[search_idx].mode_no != 0) {
@@ -1498,11 +1494,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (found_mode) {
var->xres = XGIbios_mode[search_idx].xres;
var->yres = XGIbios_mode[search_idx].yres;
- printk(KERN_DEBUG "XGIfb: Adapted to mode %dx%dx%d\n",
+ pr_debug("Adapted to mode %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
} else {
- printk(KERN_ERR "XGIfb: Failed to find similar mode to %dx%dx%d\n",
+ pr_err("Failed to find similar mode to %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
return -EINVAL;
}
@@ -1634,9 +1630,9 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
/* xorg driver sets 32MB * 1 channel */
if (xgifb_info->chip == XG27)
- xgifb_reg_set(XGISR, IND_XGI_DRAM_SIZE, 0x51);
+ xgifb_reg_set(XGISR, IND_SIS_DRAM_SIZE, 0x51);
- reg = xgifb_reg_get(XGISR, IND_XGI_DRAM_SIZE);
+ reg = xgifb_reg_get(XGISR, IND_SIS_DRAM_SIZE);
switch ((reg & XGI_DRAM_SIZE_MASK) >> 4) {
case XGI_DRAM_SIZE_1MB:
xgifb_info->video_size = 0x100000;
@@ -1711,7 +1707,7 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
/* xgifb_info->video_size = 0x200000; */ /* 1024x768x16 */
/* xgifb_info->video_size = 0x1000000; */ /* benchmark */
- printk("XGIfb: SR14=%x DramSzie %x ChannelNum %x\n",
+ pr_info("SR14=%x DramSzie %x ChannelNum %x\n",
reg,
xgifb_info->video_size, ChannelNum);
return 0;
@@ -1736,7 +1732,7 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
cr32 = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR32);
- if ((cr32 & XGI_CRT1) && !XGIfb_crt1off)
+ if ((cr32 & SIS_CRT1) && !XGIfb_crt1off)
XGIfb_crt1off = 0;
else {
if (cr32 & 0x5F)
@@ -1746,11 +1742,11 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
}
if (!xgifb_info->display2_force) {
- if (cr32 & XGI_VB_TV)
+ if (cr32 & SIS_VB_TV)
xgifb_info->display2 = XGIFB_DISP_TV;
- else if (cr32 & XGI_VB_LCD)
+ else if (cr32 & SIS_VB_LCD)
xgifb_info->display2 = XGIFB_DISP_LCD;
- else if (cr32 & XGI_VB_CRT2)
+ else if (cr32 & SIS_VB_CRT2)
xgifb_info->display2 = XGIFB_DISP_CRT;
else
xgifb_info->display2 = XGIFB_DISP_NONE;
@@ -1759,14 +1755,14 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
if (XGIfb_tvplug != -1)
/* PR/TW: Override with option */
xgifb_info->TV_plug = XGIfb_tvplug;
- else if (cr32 & XGI_VB_HIVISION) {
+ else if (cr32 & SIS_VB_HIVISION) {
xgifb_info->TV_type = TVMODE_HIVISION;
xgifb_info->TV_plug = TVPLUG_SVIDEO;
- } else if (cr32 & XGI_VB_SVIDEO)
+ } else if (cr32 & SIS_VB_SVIDEO)
xgifb_info->TV_plug = TVPLUG_SVIDEO;
- else if (cr32 & XGI_VB_COMPOSITE)
+ else if (cr32 & SIS_VB_COMPOSITE)
xgifb_info->TV_plug = TVPLUG_COMPOSITE;
- else if (cr32 & XGI_VB_SCART)
+ else if (cr32 & SIS_VB_SCART)
xgifb_info->TV_plug = TVPLUG_SCART;
if (xgifb_info->TV_type == 0) {
@@ -1811,11 +1807,11 @@ static void XGIfb_get_VB_type(struct xgifb_video_info *xgifb_info)
if (!XGIfb_has_VB(xgifb_info)) {
reg = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR37);
- switch ((reg & XGI_EXTERNAL_CHIP_MASK) >> 1) {
- case XGI310_EXTERNAL_CHIP_LVDS:
+ switch ((reg & SIS_EXTERNAL_CHIP_MASK) >> 1) {
+ case SIS_EXTERNAL_CHIP_LVDS:
xgifb_info->hasVB = HASVB_LVDS;
break;
- case XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL:
+ case SIS_EXTERNAL_CHIP_LVDS_CHRONTEL:
xgifb_info->hasVB = HASVB_LVDS_CHRONTEL;
break;
default:
@@ -1917,7 +1913,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
hw_info->pjIOAddress = (unsigned char *)xgifb_info->vga_base;
/* XGI_Pr.RelIO = ioremap(pci_resource_start(pdev, 2), 128) + 0x30; */
- printk("XGIfb: Relocate IO address: %lx [%08lx]\n",
+ pr_info("Relocate IO address: %lx [%08lx]\n",
(unsigned long)pci_resource_start(pdev, 2),
xgifb_info->dev_info.RelIO);
@@ -1933,17 +1929,17 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress);
- xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
- reg1 = xgifb_reg_get(XGISR, IND_XGI_PASSWORD);
+ xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
+ reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD);
if (reg1 != 0xa1) { /*I/O error */
- printk("\nXGIfb: I/O error!!!");
+ pr_err("I/O error!!!");
ret = -EIO;
goto error;
}
switch (xgifb_info->chip_id) {
- case PCI_DEVICE_ID_XG_20:
+ case PCI_DEVICE_ID_XGI_20:
xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
if (CR48&GPIOG_READ)
@@ -1951,16 +1947,16 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
else
xgifb_info->chip = XG20;
break;
- case PCI_DEVICE_ID_XG_40:
+ case PCI_DEVICE_ID_XGI_40:
xgifb_info->chip = XG40;
break;
- case PCI_DEVICE_ID_XG_41:
+ case PCI_DEVICE_ID_XGI_41:
xgifb_info->chip = XG41;
break;
- case PCI_DEVICE_ID_XG_42:
+ case PCI_DEVICE_ID_XGI_42:
xgifb_info->chip = XG42;
break;
- case PCI_DEVICE_ID_XG_27:
+ case PCI_DEVICE_ID_XGI_27:
xgifb_info->chip = XG27;
break;
default:
@@ -1968,31 +1964,31 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error;
}
- printk("XGIfb:chipid = %x\n", xgifb_info->chip);
+ pr_info("chipid = %x\n", xgifb_info->chip);
hw_info->jChipType = xgifb_info->chip;
if (XGIfb_get_dram_size(xgifb_info)) {
- printk(KERN_INFO "XGIfb: Fatal error: Unable to determine RAM size.\n");
+ pr_err("Fatal error: Unable to determine RAM size.\n");
ret = -ENODEV;
goto error;
}
/* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
xgifb_reg_or(XGISR,
- IND_XGI_PCI_ADDRESS_SET,
- (XGI_PCI_ADDR_ENABLE | XGI_MEM_MAP_IO_ENABLE));
+ IND_SIS_PCI_ADDRESS_SET,
+ (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
/* Enable 2D accelerator engine */
- xgifb_reg_or(XGISR, IND_XGI_MODULE_ENABLE, XGI_ENABLE_2D);
+ xgifb_reg_or(XGISR, IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D);
hw_info->ulVideoMemorySize = xgifb_info->video_size;
if (!request_mem_region(xgifb_info->video_base,
xgifb_info->video_size,
"XGIfb FB")) {
- printk("unable request memory size %x",
+ pr_err("unable request memory size %x\n",
xgifb_info->video_size);
- printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve frame buffer memory\n");
- printk(KERN_ERR "XGIfb: Is there another framebuffer driver active?\n");
+ pr_err("Fatal error: Unable to reserve frame buffer memory\n");
+ pr_err("Is there another framebuffer driver active?\n");
ret = -ENODEV;
goto error;
}
@@ -2000,7 +1996,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (!request_mem_region(xgifb_info->mmio_base,
xgifb_info->mmio_size,
"XGIfb MMIO")) {
- printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve MMIO region\n");
+ pr_err("Fatal error: Unable to reserve MMIO region\n");
ret = -ENODEV;
goto error_0;
}
@@ -2010,20 +2006,18 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base,
xgifb_info->mmio_size);
- printk(KERN_INFO "XGIfb: Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
+ pr_info("Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
xgifb_info->video_base,
xgifb_info->video_vbase,
xgifb_info->video_size / 1024);
- printk(KERN_INFO "XGIfb: MMIO at 0x%lx, mapped to 0x%p, size %ldk\n",
+ pr_info("MMIO at 0x%lx, mapped to 0x%p, size %ldk\n",
xgifb_info->mmio_base, xgifb_info->mmio_vbase,
xgifb_info->mmio_size / 1024);
- printk("XGIfb: XGIInitNew() ...");
+
pci_set_drvdata(pdev, xgifb_info);
- if (XGIInitNew(pdev))
- printk("OK\n");
- else
- printk("Fail\n");
+ if (!XGIInitNew(pdev))
+ pr_err("XGIInitNew() failed!\n");
xgifb_info->mtrr = (unsigned int) 0;
@@ -2033,13 +2027,12 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
xgifb_info->hasVB = HASVB_NONE;
} else if (xgifb_info->chip == XG21) {
CR38 = xgifb_reg_get(XGICR, 0x38);
- if ((CR38&0xE0) == 0xC0) {
+ if ((CR38&0xE0) == 0xC0)
xgifb_info->display2 = XGIFB_DISP_LCD;
- } else if ((CR38&0xE0) == 0x60) {
+ else if ((CR38&0xE0) == 0x60)
xgifb_info->hasVB = HASVB_CHRONTEL;
- } else {
+ else
xgifb_info->hasVB = HASVB_NONE;
- }
} else {
XGIfb_get_VB_type(xgifb_info);
}
@@ -2053,10 +2046,10 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg >= 0xE0) {
hw_info->ujVBChipID = VB_CHIP_302LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xD0) {
hw_info->ujVBChipID = VB_CHIP_301LV;
- printk(KERN_INFO "XGIfb: XGI301LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI301LV bridge detected (revision 0x%02x)\n", reg);
}
/* else if (reg >= 0xB0) {
hw_info->ujVBChipID = VB_CHIP_301B;
@@ -2065,17 +2058,17 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
} */
else {
hw_info->ujVBChipID = VB_CHIP_301;
- printk("XGIfb: XGI301 bridge detected\n");
+ pr_info("XGI301 bridge detected\n");
}
break;
case HASVB_302:
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg >= 0xE0) {
hw_info->ujVBChipID = VB_CHIP_302LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xD0) {
hw_info->ujVBChipID = VB_CHIP_301LV;
- printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
+ pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xB0) {
reg1 = xgifb_reg_get(XGIPART4, 0x23);
@@ -2083,27 +2076,27 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
} else {
hw_info->ujVBChipID = VB_CHIP_302;
- printk(KERN_INFO "XGIfb: XGI302 bridge detected\n");
+ pr_info("XGI302 bridge detected\n");
}
break;
case HASVB_LVDS:
hw_info->ulExternalChip = 0x1;
- printk(KERN_INFO "XGIfb: LVDS transmitter detected\n");
+ pr_info("LVDS transmitter detected\n");
break;
case HASVB_TRUMPION:
hw_info->ulExternalChip = 0x2;
- printk(KERN_INFO "XGIfb: Trumpion Zurac LVDS scaler detected\n");
+ pr_info("Trumpion Zurac LVDS scaler detected\n");
break;
case HASVB_CHRONTEL:
hw_info->ulExternalChip = 0x4;
- printk(KERN_INFO "XGIfb: Chrontel TV encoder detected\n");
+ pr_info("Chrontel TV encoder detected\n");
break;
case HASVB_LVDS_CHRONTEL:
hw_info->ulExternalChip = 0x5;
- printk(KERN_INFO "XGIfb: LVDS transmitter and Chrontel TV encoder detected\n");
+ pr_info("LVDS transmitter and Chrontel TV encoder detected\n");
break;
default:
- printk(KERN_INFO "XGIfb: No or unknown bridge type detected\n");
+ pr_info("No or unknown bridge type detected\n");
break;
}
@@ -2117,10 +2110,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL);
reg &= 0x0f;
hw_info->ulCRT2LCDType = XGI310paneltype[reg];
-
- } else {
- /* TW: FSTN/DSTN */
- hw_info->ulCRT2LCDType = LCD_320x480;
}
}
@@ -2147,9 +2136,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (tmp & 0x20) {
tmp = xgifb_reg_get(
XGIPART1, 0x13);
- if (tmp & 0x04) {
- /* XGI_Pr.XGI_UseLCDA = 1; */
- }
}
}
}
@@ -2222,12 +2208,12 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
break;
default:
xgifb_info->video_cmap_len = 16;
- printk(KERN_INFO "XGIfb: Unsupported depth %d",
+ pr_info("Unsupported depth %d\n",
xgifb_info->video_bpp);
break;
}
- printk(KERN_INFO "XGIfb: Default mode is %dx%dx%d (%dHz)\n",
+ pr_info("Default mode is %dx%dx%d (%dHz)\n",
xgifb_info->video_width,
xgifb_info->video_height,
xgifb_info->video_bpp,
@@ -2404,7 +2390,7 @@ MODULE_PARM_DESC(filter,
static void __exit xgifb_remove_module(void)
{
pci_unregister_driver(&xgifb_driver);
- printk(KERN_DEBUG "xgifb: Module unloaded\n");
+ pr_debug("Module unloaded\n");
}
module_exit(xgifb_remove_module);
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 2c866bb65a00..37bb730de047 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -3,8 +3,8 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-#include "vb_struct.h"
#include "vgatypes.h"
+#include "vb_struct.h"
enum xgifb_display_type {
XGIFB_DISP_NONE = 0,
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 5beeef99bb14..c7317931f671 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -1,153 +1,48 @@
/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/xgi/initdef.h
* ,v 1.4 2000/12/02 01:16:17 dawes Exp $*/
-#ifndef _INITDEF_
-#define _INITDEF_
+#ifndef _VB_DEF_
+#define _VB_DEF_
+#include "../../video/sis/initdef.h"
#define VB_XGI301C 0x0020 /* for 301C */
-/*end 301b*/
-
-#define VB_YPbPr525p 0x01
-#define VB_YPbPr750p 0x02
#define VB_YPbPr1080i 0x03
#define LVDSCRT1Len 15
-
-#define SupportCHTV 0x0800
#define SupportCRT2in301C 0x0100 /* for 301C */
#define SetCHTVOverScan 0x8000
-#define PanelRGB18Bit 0x0100
-#define PanelRGB24Bit 0x0000
-#define Panel320x480 0x07 /*fstn*/
+#define Panel_320x480 0x07 /*fstn*/
/* [ycchen] 02/12/03 Modify for Multi-Sync. LCD Support */
#define PanelResInfo 0x1F /* CR36 Panel Type/LCDResInfo */
-#define Panel800x600 0x01
-#define Panel1024x768 0x02
-#define Panel1024x768x75 0x22
-#define Panel1280x1024 0x03
-#define Panel1280x1024x75 0x23
-#define Panel640x480 0x04
-#define Panel1280x960 0x07
-#define Panel1400x1050 0x09
-#define Panel1600x1200 0x0B
+#define Panel_1024x768x75 0x22
+#define Panel_1280x1024x75 0x23
#define PanelRef60Hz 0x00
#define PanelRef75Hz 0x20
-#define CRT2DisplayFlag 0x2000
-
#define YPbPr525iVCLK 0x03B
#define YPbPr525iVCLK_2 0x03A
#define XGI_CRT2_PORT_00 (0x00 - 0x030)
-#define XGI_CRT2_PORT_04 (0x04 - 0x030)
-#define XGI_CRT2_PORT_10 (0x10 - 0x30)
-#define XGI_CRT2_PORT_12 (0x12 - 0x30)
-#define XGI_CRT2_PORT_14 (0x14 - 0x30)
-
-#define _PanelType00 0x00
-#define _PanelType01 0x08
-#define _PanelType02 0x10
-#define _PanelType03 0x18
-#define _PanelType04 0x20
-#define _PanelType05 0x28
-#define _PanelType06 0x30
-#define _PanelType07 0x38
-#define _PanelType08 0x40
-#define _PanelType09 0x48
-#define _PanelType0A 0x50
-#define _PanelType0B 0x58
-#define _PanelType0C 0x60
-#define _PanelType0D 0x68
-#define _PanelType0E 0x70
-#define _PanelType0F 0x78
/* =============================================================
for 310
============================================================== */
-/* add LCDDataList for GetLCDPtr */
-#define LCDDataList (VBIOSTablePointerStart+0x22)
-/* */
-/* Modify from 310.inc */
-/* */
-/* */
-
#define ModeSoftSetting 0x04
-#define BoardTVType 0x02
-
-#define SoftDRAMType 0x80 /* DRAMSetting */
-
/* ---------------- SetMode Stack */
#define CRT1Len 15
#define VCLKLen 4
-#define VGA_XGI340 0x0001 /* 340 series */
-
-#define VB_XGI301 0x0001 /* VB Type Info */
-#define VB_XGI301B 0x0002 /* 301 series */
-#define VB_XGI302B 0x0004
-#define VB_NoLCD 0x8000
-#define VB_XGI301LV 0x0008
-#define VB_XGI302LV 0x0010
-#define VB_LVDS_NS 0x0001 /* 3rd party chip */
-
-#define ModeInfoFlag 0x0007
-#define ModeText 0x0000
-#define ModeEGA 0x0002 /* 16 colors mode */
-#define ModeVGA 0x0003 /* 256 colors mode */
-
-#define DACInfoFlag 0x0018
-
-#define MemoryInfoFlag 0x01e0
-#define MemorySizeShift 5
-
-#define Charx8Dot 0x0200
-#define LineCompareOff 0x0400
-#define CRT2Mode 0x0800
-#define HalfDCLK 0x1000
-#define NoSupportSimuTV 0x2000
-#define DoubleScanMode 0x8000
-
-/* -------------- Ext_InfoFlag */
-#define Support16Bpp 0x0005
-#define Support32Bpp 0x0007
#define SupportAllCRT2 0x0078
-#define SupportTV 0x0008
-#define SupportHiVisionTV 0x0010
-#define SupportLCD 0x0020
-#define SupportRAMDAC2 0x0040
#define NoSupportTV 0x0070
#define NoSupportHiVisionTV 0x0060
#define NoSupportLCD 0x0058
-#define SupportTV1024 0x0800 /* 301btest */
-#define SupportYPbPr 0x1000 /* 301lv */
-#define InterlaceMode 0x0080
-#define SyncPP 0x0000
-#define SyncPN 0x4000
-#define SyncNP 0x8000
-#define SyncNN 0xC000
/* -------------- SetMode Stack/Scratch */
-#define SetSimuScanMode 0x0001 /* VBInfo/CR30 & CR31 */
-#define SwitchToCRT2 0x0002
-#define SetCRT2ToTV 0x089C
-#define SetCRT2ToAVIDEO 0x0004
-#define SetCRT2ToSVIDEO 0x0008
-#define SetCRT2ToSCART 0x0010
-#define SetCRT2ToLCD 0x0020
-#define SetCRT2ToRAMDAC 0x0040
-#define SetCRT2ToHiVisionTV 0x0080
-#define SetCRT2ToLCDA 0x0100
-#define SetInSlaveMode 0x0200
-#define SetNotSimuMode 0x0400
-#define SetCRT2ToYPbPr 0x0800
-#define LoadDACFlag 0x1000
-#define DisableCRT2Display 0x2000
-#define DriverMode 0x4000
+#define XGI_SetCRT2ToLCDA 0x0100
#define SetCRT2ToDualEdge 0x8000
-#define ProgrammingCRT2 0x0001 /* Set Flag */
#define ReserveTVOption 0x0008
#define GatingCRT 0x0800
#define DisableChB 0x1000
@@ -155,23 +50,14 @@
#define DisableChA 0x4000
#define EnableChA 0x8000
-#define SetNTSCTV 0x0000 /* TV Info */
-#define SetPALTV 0x0001
-#define SetNTSCJ 0x0002
-#define SetPALMTV 0x0004
-#define SetPALNTV 0x0008
-#define SetYPbPrMode525i 0x0020
-#define SetYPbPrMode525p 0x0040
-#define SetYPbPrMode750p 0x0080
-#define SetYPbPrMode1080i 0x0100
#define SetTVLowResolution 0x0400
#define TVSimuMode 0x0800
#define RPLLDIV2XO 0x1000
#define NTSC1024x768 0x2000
#define SetTVLockMode 0x4000
-#define LCDVESATiming 0x0001 /* LCD Info/CR37 */
-#define EnableLVDSDDA 0x0002
+#define XGI_LCDVESATiming 0x0001 /* LCD Info/CR37 */
+#define XGI_EnableLVDSDDA 0x0002
#define EnableScalingLCD 0x0008
#define SetPWDEnable 0x0004
#define SetLCDtoNonExpanding 0x0010
@@ -184,7 +70,7 @@
#define EnableLCD24bpp 0x0004 /* default */
#define DisableLCD24bpp 0x0000
#define LCDPolarity 0x00c0 /* default: SyncNN */
-#define LCDDualLink 0x0100
+#define XGI_LCDDualLink 0x0100
#define EnableSpectrum 0x0200
#define PWDEnable 0x0400
#define EnableVBCLKDRVLOW 0x4000
@@ -206,31 +92,21 @@
#define TVSense 0xc7
-#define TVOverScan 0x10 /* CR35 */
-
#define YPbPrMode 0xe0
#define YPbPrMode525i 0x00
#define YPbPrMode525p 0x20
#define YPbPrMode750p 0x40
#define YPbPrMode1080i 0x60
-
-#define LCDRGB18Bit 0x01 /* CR37 */
-#define LCDNonExpanding 0x10
-#define LCDSync 0x20
-#define LCDSyncBit 0xe0 /* H/V polarity & sync ID */
-
#define ScalingLCD 0x08
-#define EnableDualEdge 0x01 /* CR38 */
-#define SetToLCDA 0x02
#define SetYPbPr 0x04
/* ---------------------- VUMA Information */
#define DisplayDeviceFromCMOS 0x10
/* ---------------------- HK Evnet Definition */
-#define ModeSwitchStatus 0xf0
+#define XGI_ModeSwitchStatus 0xf0
#define ActiveCRT1 0x10
#define ActiveLCD 0x0020
#define ActiveTV 0x40
@@ -246,28 +122,13 @@
/* translated from asm code 301def.h */
/* */
/* --------------------------------------------------------- */
-#define LCDDataLen 8
-#define TVDataLen 12
#define LVDSCRT1Len_H 8
#define LVDSCRT1Len_V 7
-#define LVDSDataLen 6
-#define LVDSDesDataLen 6
#define LCDDesDataLen 6
#define LVDSDesDataLen2 8
#define LCDDesDataLen2 8
-#define CHTVRegLen 16
-#define StHiTVHT 892
-#define StHiTVVT 1126
-#define StHiTextTVHT 1000
-#define StHiTextTVVT 1126
-#define ExtHiTVHT 2100
-#define ExtHiTVVT 1125
-#define NTSCHT 1716
-#define NTSCVT 525
#define NTSC1024x768HT 1908
-#define PALHT 1728
-#define PALVT 625
#define YPbPrTV525iHT 1716 /* YPbPr */
#define YPbPrTV525iVT 525
@@ -276,22 +137,16 @@
#define YPbPrTV750pHT 1650
#define YPbPrTV750pVT 750
-#define CRT2Delay1 0x04 /* XGI301 */
-#define CRT2Delay2 0x0A /* 301B,302 */
-
-
#define VCLK25_175 0x00
#define VCLK28_322 0x01
#define VCLK31_5 0x02
#define VCLK36 0x03
-#define VCLK40 0x04
#define VCLK43_163 0x05
#define VCLK44_9 0x06
#define VCLK49_5 0x07
#define VCLK50 0x08
#define VCLK52_406 0x09
#define VCLK56_25 0x0A
-#define VCLK65 0x0B
#define VCLK68_179 0x0D
#define VCLK72_852 0x0E
#define VCLK75 0x0F
@@ -300,7 +155,6 @@
#define VCLK83_95 0x13
#define VCLK86_6 0x15
#define VCLK94_5 0x16
-#define VCLK108_2 0x19
#define VCLK113_309 0x1B
#define VCLK116_406 0x1C
#define VCLK135_5 0x1E
@@ -327,16 +181,10 @@
#define VCLK125_999 0x51
#define VCLK148_5 0x52
#define VCLK217_325 0x55
-#define YPbPr750pVCLK 0x57
+#define XGI_YPbPr750pVCLK 0x57
-#define TVVCLKDIV2 0x3A
-#define TVVCLK 0x3B
-#define HiTVVCLKDIV2 0x3C
-#define HiTVVCLK 0x3D
-#define HiTVSimuVCLK 0x3E
-#define HiTVTextVCLK 0x3F
#define VCLK39_77 0x40
-#define YPbPr525pVCLK 0x3A
+#define YPbPr525pVCLK 0x3A
#define NTSC1024VCLK 0x41
#define VCLK35_2 0x49 /* ; 800x480 */
#define VCLK122_61 0x4A
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 4ccd988ffd7c..94d5c35e22fb 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -3,8 +3,8 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
-#include "vgatypes.h"
#include "XGIfb.h"
+#include "vgatypes.h"
#include "vb_def.h"
#include "vb_struct.h"
@@ -1268,7 +1268,7 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
if (pVBInfo->IF_DEF_HiVision == 1) {
if ((temp >> 8) & ActiveHiTV)
- tempcl |= SetCRT2ToHiVisionTV;
+ tempcl |= SetCRT2ToHiVision;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
@@ -1287,7 +1287,7 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
if (pVBInfo->IF_DEF_HiVision == 1) {
if ((temp >> 8) & ActiveHiTV)
- tempcl |= SetCRT2ToHiVisionTV;
+ tempcl |= SetCRT2ToHiVision;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
@@ -1299,9 +1299,9 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
tempcl |= SetSimuScanMode;
if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV)
|| (temp & ActiveCRT2)))
- tempcl ^= (SetSimuScanMode | SwitchToCRT2);
+ tempcl ^= (SetSimuScanMode | SwitchCRT2);
if ((temp & ActiveLCD) && (temp & ActiveTV))
- tempcl ^= (SetSimuScanMode | SwitchToCRT2);
+ tempcl ^= (SetSimuScanMode | SwitchCRT2);
xgifb_reg_set(pVBInfo->P3d4, 0x30, tempcl);
CR31Data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
@@ -1516,11 +1516,11 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
- pVBInfo->Part1Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_04;
- pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10;
- pVBInfo->Part3Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_12;
- pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14;
- pVBInfo->Part5Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 + 2;
+ pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
+ pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
+ pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
+ pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
+ pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
printk("5");
if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 67a316c3c108..2919924213c4 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -61,20 +61,20 @@ static const unsigned short XGINew_VGA_DAC[] = {
void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
{
pVBInfo->SModeIDTable = (struct XGI_StStruct *) XGI330_SModeIDTable;
- pVBInfo->StandTable = (struct XGI_StandTableStruct *) XGI330_StandTable;
+ pVBInfo->StandTable = (struct SiS_StandTable_S *) XGI330_StandTable;
pVBInfo->EModeIDTable = (struct XGI_ExtStruct *) XGI330_EModeIDTable;
pVBInfo->RefIndex = (struct XGI_Ext2Struct *) XGI330_RefIndex;
pVBInfo->XGINEWUB_CRT1Table
= (struct XGI_CRT1TableStruct *) XGI_CRT1Table;
- pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI340New_MCLKData;
+ pVBInfo->MCLKData = (struct SiS_MCLKData *) XGI340New_MCLKData;
pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData;
- pVBInfo->VCLKData = (struct XGI_VCLKDataStruct *) XGI_VCLKData;
- pVBInfo->VBVCLKData = (struct XGI_VBVCLKDataStruct *) XGI_VBVCLKData;
+ pVBInfo->VCLKData = (struct SiS_VCLKData *) XGI_VCLKData;
+ pVBInfo->VBVCLKData = (struct SiS_VBVCLKData *) XGI_VBVCLKData;
pVBInfo->ScreenOffset = XGI330_ScreenOffset;
- pVBInfo->StResInfo = (struct XGI_StResInfoStruct *) XGI330_StResInfo;
+ pVBInfo->StResInfo = (struct SiS_StResInfo_S *) XGI330_StResInfo;
pVBInfo->ModeResInfo
- = (struct XGI_ModeResInfoStruct *) XGI330_ModeResInfo;
+ = (struct SiS_ModeResInfo_S *) XGI330_ModeResInfo;
pVBInfo->pOutputSelect = &XGI330_OutputSelect;
pVBInfo->pSoftSetting = &XGI330_SoftSetting;
@@ -138,7 +138,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->UpdateCRT1 = (struct XGI_XG21CRT1Struct *) XGI_UpdateCRT1Table;
/* 310 customization related */
- if ((pVBInfo->VBType & VB_XGI301LV) || (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) || (pVBInfo->VBType & VB_SIS302LV))
pVBInfo->LCDCapList = XGI_LCDDLCapList;
else
pVBInfo->LCDCapList = XGI_LCDCapList;
@@ -153,7 +153,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
if (ChipType == XG27) {
pVBInfo->MCLKData
- = (struct XGI_MCLKDataStruct *) XGI27New_MCLKData;
+ = (struct SiS_MCLKData *) XGI27New_MCLKData;
pVBInfo->CR40 = XGI27_cr41;
pVBInfo->pXGINew_CR97 = &XG27_CR97;
pVBInfo->pSR36 = &XG27_SR36;
@@ -208,8 +208,8 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
xgifb_reg_set(pVBInfo->P3c4, 0x00, 0x03); /* Set SR0 */
tempah = pVBInfo->StandTable[StandTableIndex].SR[0];
- i = SetCRT2ToLCDA;
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ i = XGI_SetCRT2ToLCDA;
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
tempah |= 0x01;
} else {
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD)) {
@@ -263,7 +263,7 @@ static void XGI_SetATTRegs(unsigned short ModeNo,
ARdata = pVBInfo->StandTable[StandTableIndex].ATTR[i];
if (modeflag & Charx8Dot) { /* ifndef Dot9 */
if (i == 0x13) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
ARdata = 0;
} else {
if (pVBInfo->VBInfo & (SetCRT2ToTV
@@ -356,11 +356,11 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
}
/* 301b */
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
tempax |= SupportLCD;
- if (pVBInfo->LCDResInfo != Panel1280x1024) {
- if (pVBInfo->LCDResInfo != Panel1280x960) {
+ if (pVBInfo->LCDResInfo != Panel_1280x1024) {
+ if (pVBInfo->LCDResInfo != Panel_1280x960) {
if (pVBInfo->LCDInfo &
LCDNonExpanding) {
if (resinfo >= 9) {
@@ -372,10 +372,10 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
}
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { /* for HiTV */
- if ((pVBInfo->VBType & VB_XGI301LV) &&
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) { /* for HiTV */
+ if ((pVBInfo->VBType & VB_SIS301LV) &&
(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
- tempax |= SupportYPbPr;
+ tempax |= SupportYPbPr750p;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (resinfo == 4)
return 0;
@@ -387,7 +387,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
return 0;
}
} else {
- tempax |= SupportHiVisionTV;
+ tempax |= SupportHiVision;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (resinfo == 4)
return 0;
@@ -406,17 +406,17 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO |
SetCRT2ToSVIDEO |
SetCRT2ToSCART |
- SetCRT2ToYPbPr |
- SetCRT2ToHiVisionTV)) {
+ SetCRT2ToYPbPr525750 |
+ SetCRT2ToHiVision)) {
tempax |= SupportTV;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C)) {
tempax |= SupportTV1024;
}
- if (!(pVBInfo->VBInfo & SetPALTV)) {
+ if (!(pVBInfo->VBInfo & TVSetPAL)) {
if (modeflag & NoSupportSimuTV) {
if (pVBInfo->VBInfo &
SetInSlaveMode) {
@@ -436,7 +436,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
if (resinfo > 0x08)
return 0; /* 1024x768 */
- if (pVBInfo->LCDResInfo < Panel1024x768) {
+ if (pVBInfo->LCDResInfo < Panel_1024x768) {
if (resinfo > 0x07)
return 0; /* 800x600 */
@@ -1230,23 +1230,23 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
- unsigned short LCDXlat1VCLK[4] = { VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2 };
- unsigned short LCDXlat2VCLK[4] = { VCLK108_2 + 5,
- VCLK108_2 + 5,
- VCLK108_2 + 5,
- VCLK108_2 + 5 };
+ unsigned short LCDXlat1VCLK[4] = { VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2 };
+ unsigned short LCDXlat2VCLK[4] = { VCLK108_2_315 + 5,
+ VCLK108_2_315 + 5,
+ VCLK108_2_315 + 5,
+ VCLK108_2_315 + 5 };
unsigned short LVDSXlat1VCLK[4] = { VCLK40, VCLK40, VCLK40, VCLK40 };
- unsigned short LVDSXlat2VCLK[4] = { VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2 };
- unsigned short LVDSXlat3VCLK[4] = { VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2,
- VCLK65 + 2 };
+ unsigned short LVDSXlat2VCLK[4] = { VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2 };
+ unsigned short LVDSXlat3VCLK[4] = { VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2,
+ VCLK65_315 + 2 };
unsigned short CRT2Index, VCLKIndex;
unsigned short modeflag, resinfo;
@@ -1266,36 +1266,36 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
if (pVBInfo->IF_DEF_LVDS == 0) {
CRT2Index = CRT2Index >> 6; /* for LCD */
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/
- if (pVBInfo->LCDResInfo != Panel1024x768)
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/
+ if (pVBInfo->LCDResInfo != Panel_1024x768)
VCLKIndex = LCDXlat2VCLK[CRT2Index];
else
VCLKIndex = LCDXlat1VCLK[CRT2Index];
- } else if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ } else if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (pVBInfo->SetFlag & RPLLDIV2XO) {
- VCLKIndex = HiTVVCLKDIV2;
+ VCLKIndex = TVCLKBASE_315 + HiTVVCLKDIV2;
VCLKIndex += 25;
} else {
- VCLKIndex = HiTVVCLK;
+ VCLKIndex = TVCLKBASE_315 + HiTVVCLK;
VCLKIndex += 25;
}
if (pVBInfo->SetFlag & TVSimuMode) {
if (modeflag & Charx8Dot) {
- VCLKIndex = HiTVSimuVCLK;
+ VCLKIndex = TVCLKBASE_315 + HiTVSimuVCLK;
VCLKIndex += 25;
} else {
- VCLKIndex = HiTVTextVCLK;
+ VCLKIndex = TVCLKBASE_315 + HiTVTextVCLK;
VCLKIndex += 25;
}
}
/* 301lv */
- if ((pVBInfo->VBType & VB_XGI301LV) &&
+ if ((pVBInfo->VBType & VB_SIS301LV) &&
!(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
- if (pVBInfo->VBExtInfo == VB_YPbPr750p)
- VCLKIndex = YPbPr750pVCLK;
- else if (pVBInfo->VBExtInfo == VB_YPbPr525p)
+ if (pVBInfo->VBExtInfo == YPbPr750p)
+ VCLKIndex = XGI_YPbPr750pVCLK;
+ else if (pVBInfo->VBExtInfo == YPbPr525p)
VCLKIndex = YPbPr525pVCLK;
else if (pVBInfo->SetFlag & RPLLDIV2XO)
VCLKIndex = YPbPr525iVCLK_2;
@@ -1304,10 +1304,10 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
}
} else if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->SetFlag & RPLLDIV2XO) {
- VCLKIndex = TVVCLKDIV2;
+ VCLKIndex = TVCLKBASE_315 + TVVCLKDIV2;
VCLKIndex += 25;
} else {
- VCLKIndex = TVVCLK;
+ VCLKIndex = TVCLKBASE_315 + TVVCLK;
VCLKIndex += 25;
}
} else { /* for CRT2 */
@@ -1329,11 +1329,11 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
VCLKIndex = CRT2Index;
VCLKIndex = VCLKIndex >> 6;
- if ((pVBInfo->LCDResInfo == Panel800x600) ||
- (pVBInfo->LCDResInfo == Panel320x480))
+ if ((pVBInfo->LCDResInfo == Panel_800x600) ||
+ (pVBInfo->LCDResInfo == Panel_320x480))
VCLKIndex = LVDSXlat1VCLK[VCLKIndex];
- else if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75))
+ else if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
+ (pVBInfo->LCDResInfo == Panel_1024x768x75))
VCLKIndex = LVDSXlat2VCLK[VCLKIndex];
else
VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
@@ -1360,9 +1360,9 @@ static void XGI_SetCRT1VCLK(unsigned short ModeNo,
xgifb_reg_set(pVBInfo->P3c4, 0x2C,
pVBInfo->VCLKData[index].SR2C);
xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
- } else if ((pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) && (pVBInfo->VBInfo
- & SetCRT2ToLCDA)) {
+ } else if ((pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) && (pVBInfo->VBInfo
+ & XGI_SetCRT2ToLCDA)) {
vclkindex = XGI_GetVCLK2Ptr(ModeNo, ModeIdIndex,
RefreshRateTableIndex, HwDeviceExtension,
pVBInfo);
@@ -1801,7 +1801,7 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
Ext_CRT2CRTC;
}
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if (ModeNo <= 0x13)
tempal = pVBInfo->SModeIDTable[ModeIdIndex].
St_CRT2CRTC2;
@@ -2128,30 +2128,30 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_CetLCDDes1024x768Data[tempal];
break;
case 3:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_ExtLCDDLDes1280x1024Data[tempal];
else
return &XGI_ExtLCDDes1280x1024Data[tempal];
break;
case 4:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_StLCDDLDes1280x1024Data[tempal];
else
return &XGI_StLCDDes1280x1024Data[tempal];
break;
case 5:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_CetLCDDLDes1280x1024Data[tempal];
else
return &XGI_CetLCDDes1280x1024Data[tempal];
break;
case 6:
case 7:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &xgifb_lcddldes_1400x1050[tempal];
else
return &xgifb_lcddes_1400x1050[tempal];
@@ -2163,15 +2163,15 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
return &XGI_CetLCDDes1400x1050Data2[tempal];
break;
case 10:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_ExtLCDDLDes1600x1200Data[tempal];
else
return &XGI_ExtLCDDes1600x1200Data[tempal];
break;
case 11:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_StLCDDLDes1600x1200Data[tempal];
else
return &XGI_StLCDDes1600x1200Data[tempal];
@@ -2188,15 +2188,15 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
break;
case 16:
case 17:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &xgifb_lcddldes_1280x1024x75[tempal];
else
return &xgifb_lcddes_1280x1024x75[tempal];
break;
case 18:
- if ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV))
+ if ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV))
return &XGI_CetLCDDLDes1280x1024x75Data[tempal];
else
return &XGI_CetLCDDes1280x1024x75Data[tempal];
@@ -2364,7 +2364,7 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 2;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr = (struct XGI330_LVDSDataStruct *) XGI_GetLcdPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
@@ -2374,18 +2374,18 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->VT = LCDPtr->LCDVT;
}
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
if (!(pVBInfo->LCDInfo & (SetLCDtoNonExpanding
| EnableScalingLCD))) {
- if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75)) {
+ if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
+ (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
pVBInfo->HDE = 1024;
pVBInfo->VDE = 768;
- } else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
- (pVBInfo->LCDResInfo == Panel1280x1024x75)) {
+ } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
+ (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
pVBInfo->HDE = 1280;
pVBInfo->VDE = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
pVBInfo->HDE = 1400;
pVBInfo->VDE = 1050;
} else {
@@ -2415,7 +2415,7 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 0;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr = (struct XGI_LVDSCRT1HDataStruct *)
XGI_GetLcdPtr(tempbx, ModeNo,
ModeIdIndex,
@@ -2430,7 +2430,7 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 1;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
LCDPtr1 = (struct XGI_LVDSCRT1VDataStruct *)
XGI_GetLcdPtr(
tempbx,
@@ -2496,7 +2496,7 @@ static unsigned short XGI_GetLCDCapPtr1(struct vb_device_info *pVBInfo)
}
if (tempbl == 0xFF) {
- pVBInfo->LCDResInfo = Panel1024x768;
+ pVBInfo->LCDResInfo = Panel_1024x768;
pVBInfo->LCDTypeInfo = 0;
i = 0;
}
@@ -2556,15 +2556,15 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
push2 = tempax;
/* GetLCDResInfo */
- if ((pVBInfo->LCDResInfo == Panel1024x768) ||
- (pVBInfo->LCDResInfo == Panel1024x768x75)) {
+ if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
+ (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
tempax = 1024;
tempbx = 768;
- } else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
- (pVBInfo->LCDResInfo == Panel1280x1024x75)) {
+ } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
+ (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
tempax = 1280;
tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
tempax = 1400;
tempbx = 1050;
} else {
@@ -2682,7 +2682,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if (tempbx != pVBInfo->VDE)
tempax |= 0x40;
- if (pVBInfo->LCDInfo & EnableLVDSDDA)
+ if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA)
tempax |= 0x40;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
@@ -2768,7 +2768,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp1 = temp1 / push3;
tempbx = (unsigned short) (temp1 & 0xffff);
- if (pVBInfo->LCDResInfo == Panel1024x768)
+ if (pVBInfo->LCDResInfo == Panel_1024x768)
tempbx -= 1;
tempax = ((tempbx >> 8) & 0xff) << 3;
@@ -2800,7 +2800,7 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
{
unsigned short index;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
index = XGI_GetLCDCapPtr1(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */
@@ -2834,35 +2834,35 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
index = XGI_GetLCDCapPtr(pVBInfo);
tempal = pVBInfo->LCDCapList[index].LCD_VCLK;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
return tempal;
/* {TV} */
if (pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS301B |
+ VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C)) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- tempal = HiTVVCLKDIV2;
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ tempal = TVCLKBASE_315 + HiTVVCLKDIV2;
if (!(pVBInfo->TVInfo & RPLLDIV2XO))
- tempal = HiTVVCLK;
+ tempal = TVCLKBASE_315 + HiTVVCLK;
if (pVBInfo->TVInfo & TVSimuMode) {
- tempal = HiTVSimuVCLK;
+ tempal = TVCLKBASE_315 + HiTVSimuVCLK;
if (!(modeflag & Charx8Dot))
- tempal = HiTVTextVCLK;
+ tempal = TVCLKBASE_315 + HiTVTextVCLK;
}
return tempal;
}
- if (pVBInfo->TVInfo & SetYPbPrMode750p) {
- tempal = YPbPr750pVCLK;
+ if (pVBInfo->TVInfo & TVSetYPbPr750p) {
+ tempal = XGI_YPbPr750pVCLK;
return tempal;
}
- if (pVBInfo->TVInfo & SetYPbPrMode525p) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525p) {
tempal = YPbPr525pVCLK;
return tempal;
}
@@ -2870,9 +2870,9 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
tempal = NTSC1024VCLK;
if (!(pVBInfo->TVInfo & NTSC1024x768)) {
- tempal = TVVCLKDIV2;
+ tempal = TVCLKBASE_315 + TVVCLKDIV2;
if (!(pVBInfo->TVInfo & RPLLDIV2XO))
- tempal = TVVCLK;
+ tempal = TVCLKBASE_315 + TVVCLK;
}
if (pVBInfo->VBInfo & SetCRT2ToTV)
@@ -2898,9 +2898,9 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
unsigned char *di_1, struct vb_device_info *pVBInfo)
{
- if (pVBInfo->VBType & (VB_XGI301 | VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
- if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) && (pVBInfo->SetFlag
+ if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
+ if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) && (pVBInfo->SetFlag
& ProgrammingCRT2)) {
*di_0 = (unsigned char) XGI_VBVCLKData[tempal].SR2B;
*di_1 = XGI_VBVCLKData[tempal].SR2C;
@@ -2926,7 +2926,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
for (i = 0; i < 4; i++) {
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
(unsigned short) (0x10 * i));
- if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
&& (!(pVBInfo->VBInfo & SetInSlaveMode))) {
xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
xgifb_reg_set(pVBInfo->P3c4, 0x2f, di_1);
@@ -2942,8 +2942,8 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
{
unsigned short tempcl, tempch, temp, tempbl, tempax;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempcl = 0;
tempch = 0;
temp = xgifb_reg_get(pVBInfo->P3c4, 0x01);
@@ -2987,12 +2987,12 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
if (temp & 0x02)
tempch |= ActiveSCART;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (temp & 0x01)
tempch |= ActiveHiTV;
}
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
temp = xgifb_reg_get(
pVBInfo->Part2Port,
0x4d);
@@ -3014,7 +3014,7 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
}
}
temp = tempcl;
- tempbl = ~ModeSwitchStatus;
+ tempbl = ~XGI_ModeSwitchStatus;
xgifb_reg_and_or(pVBInfo->P3d4, 0x3d, tempbl, temp);
if (!(pVBInfo->SetFlag & ReserveTVOption))
@@ -3029,19 +3029,19 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
unsigned short flag, tempbx, tempah;
if (pVBInfo->IF_DEF_LVDS == 0) {
- tempbx = VB_XGI302B;
+ tempbx = VB_SIS302B;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
if (flag != 0x02) {
- tempbx = VB_XGI301;
+ tempbx = VB_SIS301;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01);
if (flag >= 0xB0) {
- tempbx = VB_XGI301B;
+ tempbx = VB_SIS301B;
if (flag >= 0xC0) {
tempbx = VB_XGI301C;
if (flag >= 0xD0) {
- tempbx = VB_XGI301LV;
+ tempbx = VB_SIS301LV;
if (flag >= 0xE0) {
- tempbx = VB_XGI302LV;
+ tempbx = VB_SIS302LV;
tempah = xgifb_reg_get(
pVBInfo->Part4Port,
0x39);
@@ -3052,7 +3052,7 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
}
}
- if (tempbx & (VB_XGI301B | VB_XGI302B)) {
+ if (tempbx & (VB_SIS301B | VB_SIS302B)) {
flag = xgifb_reg_get(
pVBInfo->Part4Port,
0x23);
@@ -3078,7 +3078,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
pVBInfo->SetFlag = 0;
- pVBInfo->ModeType = modeflag & ModeInfoFlag;
+ pVBInfo->ModeType = modeflag & ModeTypeMask;
tempbx = 0;
if (pVBInfo->VBType & 0xFFFF) {
@@ -3090,7 +3090,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
push = push << 8;
tempax = temp << 8;
tempbx = tempbx | tempax;
- temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr | SetCRT2ToLCDA
+ temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA
| SetInSlaveMode | DisableCRT2Display);
temp = 0xFFFF ^ temp;
tempbx &= temp;
@@ -3103,9 +3103,9 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
(HwDeviceExtension->jChipType >= XG40)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBType &
- (VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C)) {
if (temp & EnableDualEdge) {
tempbx |=
@@ -3113,7 +3113,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (temp & SetToLCDA)
tempbx |=
- SetCRT2ToLCDA;
+ XGI_SetCRT2ToLCDA;
}
}
}
@@ -3123,8 +3123,8 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->IF_DEF_YPbPr == 1) {
/* [Billy] 07/05/04 */
if (((pVBInfo->IF_DEF_LVDS == 0) &&
- ((pVBInfo->VBType & VB_XGI301LV) ||
- (pVBInfo->VBType & VB_XGI302LV) ||
+ ((pVBInfo->VBType & VB_SIS301LV) ||
+ (pVBInfo->VBType & VB_SIS302LV) ||
(pVBInfo->VBType & VB_XGI301C)))) {
if (temp & SetYPbPr) {
if (pVBInfo->IF_DEF_HiVision == 1) {
@@ -3134,13 +3134,13 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->P3d4,
0x35);
temp &= YPbPrMode;
- tempbx |= SetCRT2ToHiVisionTV;
+ tempbx |= SetCRT2ToHiVision;
if (temp != YPbPrMode1080i) {
tempbx &=
- (~SetCRT2ToHiVisionTV);
+ (~SetCRT2ToHiVision);
tempbx |=
- SetCRT2ToYPbPr;
+ SetCRT2ToYPbPr525750;
}
}
}
@@ -3172,30 +3172,30 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->IF_DEF_LCDA == 1) { /* Select Display Device */
if (!(pVBInfo->VBType & VB_NoLCD)) {
- if (tempbx & SetCRT2ToLCDA) {
+ if (tempbx & XGI_SetCRT2ToLCDA) {
if (tempbx & SetSimuScanMode)
tempbx &= (~(SetCRT2ToLCD |
SetCRT2ToRAMDAC |
- SwitchToCRT2));
+ SwitchCRT2));
else
tempbx &= (~(SetCRT2ToLCD |
SetCRT2ToRAMDAC |
SetCRT2ToTV |
- SwitchToCRT2));
+ SwitchCRT2));
}
}
}
/* shampoo add */
/* for driver abnormal */
- if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode))) {
+ if (!(tempbx & (SwitchCRT2 | SetSimuScanMode))) {
if (pVBInfo->IF_DEF_CRT2Monitor == 1) {
if (tempbx & SetCRT2ToRAMDAC) {
tempbx &= (0xFF00 |
SetCRT2ToRAMDAC |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
} else {
tempbx &= (~(SetCRT2ToRAMDAC |
@@ -3208,37 +3208,37 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (tempbx & SetCRT2ToLCD) {
tempbx &= (0xFF00 |
SetCRT2ToLCD |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
}
if (tempbx & SetCRT2ToSCART) {
tempbx &= (0xFF00 |
SetCRT2ToSCART |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (tempbx & SetCRT2ToYPbPr)
+ if (tempbx & SetCRT2ToYPbPr525750)
tempbx &= (0xFF00 |
- SwitchToCRT2 |
+ SwitchCRT2 |
SetSimuScanMode);
}
if (pVBInfo->IF_DEF_HiVision == 1) {
- if (tempbx & SetCRT2ToHiVisionTV)
+ if (tempbx & SetCRT2ToHiVision)
tempbx &= (0xFF00 |
- SetCRT2ToHiVisionTV |
- SwitchToCRT2 |
+ SetCRT2ToHiVision |
+ SwitchCRT2 |
SetSimuScanMode);
}
if (tempax & DisableCRT2Display) { /* Set Display Device Info */
- if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode)))
+ if (!(tempbx & (SwitchCRT2 | SetSimuScanMode)))
tempbx = DisableCRT2Display;
}
@@ -3246,7 +3246,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if ((!(tempbx & DriverMode)) ||
(!(modeflag & CRT2Mode))) {
if (pVBInfo->IF_DEF_LCDA == 1) {
- if (!(tempbx & SetCRT2ToLCDA))
+ if (!(tempbx & XGI_SetCRT2ToLCDA))
tempbx |= (SetInSlaveMode |
SetSimuScanMode);
}
@@ -3255,9 +3255,9 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
/* LCD+TV can't support in slave mode
* (Force LCDA+TV->LCDB) */
if ((tempbx & SetInSlaveMode) &&
- (tempbx & SetCRT2ToLCDA)) {
+ (tempbx & XGI_SetCRT2ToLCDA)) {
tempbx ^= (SetCRT2ToLCD |
- SetCRT2ToLCDA |
+ XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge);
pVBInfo->SetFlag |= ReserveTVOption;
}
@@ -3291,43 +3291,43 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
temp = xgifb_reg_get(pVBInfo->P3d4, 0x35);
tempbx = temp;
- if (tempbx & SetPALTV) {
+ if (tempbx & TVSetPAL) {
tempbx &= (SetCHTVOverScan |
- SetPALMTV |
- SetPALNTV |
- SetPALTV);
- if (tempbx & SetPALMTV)
+ TVSetPALM |
+ TVSetPALN |
+ TVSetPAL);
+ if (tempbx & TVSetPALM)
/* set to NTSC if PAL-M */
- tempbx &= ~SetPALTV;
+ tempbx &= ~TVSetPAL;
} else
tempbx &= (SetCHTVOverScan |
- SetNTSCJ |
- SetPALTV);
+ TVSetNTSCJ |
+ TVSetPAL);
}
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBInfo & SetCRT2ToSCART)
- tempbx |= SetPALTV;
+ tempbx |= TVSetPAL;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
index1 = xgifb_reg_get(pVBInfo->P3d4, 0x35);
index1 &= YPbPrMode;
if (index1 == YPbPrMode525i)
- tempbx |= SetYPbPrMode525i;
+ tempbx |= TVSetYPbPr525i;
if (index1 == YPbPrMode525p)
- tempbx = tempbx | SetYPbPrMode525p;
+ tempbx = tempbx | TVSetYPbPr525p;
if (index1 == YPbPrMode750p)
- tempbx = tempbx | SetYPbPrMode750p;
+ tempbx = tempbx | TVSetYPbPr750p;
}
}
if (pVBInfo->IF_DEF_HiVision == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
- tempbx = tempbx | SetYPbPrMode1080i | SetPALTV;
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
+ tempbx = tempbx | TVSetHiVision | TVSetPAL;
}
if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
@@ -3335,25 +3335,25 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
(!(pVBInfo->VBInfo & SetNotSimuMode)))
tempbx |= TVSimuMode;
- if (!(tempbx & SetPALTV) &&
+ if (!(tempbx & TVSetPAL) &&
(modeflag > 13) &&
(resinfo == 8)) /* NTSC 1024x768, */
tempbx |= NTSC1024x768;
tempbx |= RPLLDIV2XO;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (pVBInfo->VBInfo & SetInSlaveMode)
tempbx &= (~RPLLDIV2XO);
} else {
if (tempbx &
- (SetYPbPrMode525p | SetYPbPrMode750p))
+ (TVSetYPbPr525p | TVSetYPbPr750p))
tempbx &= (~RPLLDIV2XO);
else if (!(pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS301B |
+ VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C))) {
if (tempbx & TVSimuMode)
tempbx &= (~RPLLDIV2XO);
@@ -3386,13 +3386,13 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
tempbx = temp & 0x0F;
if (tempbx == 0)
- tempbx = Panel1024x768; /* default */
+ tempbx = Panel_1024x768; /* default */
/* LCD75 [2003/8/22] Vicent */
- if ((tempbx == Panel1024x768) || (tempbx == Panel1280x1024)) {
+ if ((tempbx == Panel_1024x768) || (tempbx == Panel_1280x1024)) {
if (pVBInfo->VBInfo & DriverMode) {
tempax = xgifb_reg_get(pVBInfo->P3d4, 0x33);
- if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempax &= 0x0F;
else
tempax = tempax >> 4;
@@ -3411,7 +3411,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
/* End of LCD75 */
- if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)))
+ if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
return 0;
tempbx = 0;
@@ -3427,30 +3427,30 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
tempax = pVBInfo->LCDCapList[LCDIdIndex].LCD_Capability;
if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
- if (((pVBInfo->VBType & VB_XGI302LV) || (pVBInfo->VBType
- & VB_XGI301C)) && (tempax & LCDDualLink)) {
+ if (((pVBInfo->VBType & VB_SIS302LV) || (pVBInfo->VBType
+ & VB_XGI301C)) && (tempax & XGI_LCDDualLink)) {
tempbx |= SetLCDDualLink;
}
}
if (pVBInfo->IF_DEF_LVDS == 0) {
- if ((pVBInfo->LCDResInfo == Panel1400x1050) && (pVBInfo->VBInfo
+ if ((pVBInfo->LCDResInfo == Panel_1400x1050) && (pVBInfo->VBInfo
& SetCRT2ToLCD) && (ModeNo > 0x13) && (resinfo
== 9) && (!(tempbx & EnableScalingLCD)))
- /* set to center in 1280x1024 LCDB for Panel1400x1050 */
+ /* set to center in 1280x1024 LCDB for Panel_1400x1050 */
tempbx |= SetLCDtoNonExpanding;
}
if (pVBInfo->IF_DEF_ExpLink == 1) {
if (modeflag & HalfDCLK) {
if (!(tempbx & SetLCDtoNonExpanding)) {
- tempbx |= EnableLVDSDDA;
+ tempbx |= XGI_EnableLVDSDDA;
} else {
if (ModeNo > 0x13) {
if (pVBInfo->LCDResInfo
- == Panel1024x768) {
+ == Panel_1024x768) {
if (resinfo == 4) {/* 512x384 */
- tempbx |= EnableLVDSDDA;
+ tempbx |= XGI_EnableLVDSDDA;
}
}
}
@@ -3460,9 +3460,9 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (pVBInfo->VBInfo & SetNotSimuMode)
- tempbx |= LCDVESATiming;
+ tempbx |= XGI_LCDVESATiming;
} else {
- tempbx |= LCDVESATiming;
+ tempbx |= XGI_LCDVESATiming;
}
pVBInfo->LCDInfo = tempbx;
@@ -3477,7 +3477,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
SetInSlaveMode |
SetCRT2ToLCD);
pVBInfo->VBInfo |=
- SetCRT2ToLCDA |
+ XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge;
}
}
@@ -3801,27 +3801,27 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->LCDResInfo == Panel1600x1200) {
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (pVBInfo->LCDResInfo == Panel_1600x1200) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (yres == 1024)
yres = 1056;
}
}
- if (pVBInfo->LCDResInfo == Panel1280x1024) {
+ if (pVBInfo->LCDResInfo == Panel_1280x1024) {
if (yres == 400)
yres = 405;
else if (yres == 350)
yres = 360;
- if (pVBInfo->LCDInfo & LCDVESATiming) {
+ if (pVBInfo->LCDInfo & XGI_LCDVESATiming) {
if (yres == 360)
yres = 375;
}
}
- if (pVBInfo->LCDResInfo == Panel1024x768) {
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (!(pVBInfo->LCDInfo
& LCDNonExpanding)) {
if (yres == 350)
@@ -3848,7 +3848,7 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
{
- if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) &&
+ if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
(pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
return 1;
@@ -3918,8 +3918,8 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
{
unsigned short tempax = 0, tempbx, modeflag, resinfo;
- struct XGI_LCDDataStruct *LCDPtr = NULL;
- struct XGI_TVDataStruct *TVPtr = NULL;
+ struct SiS_LCDData *LCDPtr = NULL;
+ struct SiS_TVData *TVPtr = NULL;
if (ModeNo <= 0x13) {
/* si+St_ResInfo */
@@ -3942,8 +3942,8 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 4;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
- LCDPtr = (struct XGI_LCDDataStruct *) XGI_GetLcdPtr(tempbx,
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
+ LCDPtr = (struct SiS_LCDData *) XGI_GetLcdPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
@@ -3954,11 +3954,11 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->HT = LCDPtr->LCDHT;
pVBInfo->VT = LCDPtr->LCDVT;
- if (pVBInfo->LCDResInfo == Panel1024x768) {
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
tempax = 1024;
tempbx = 768;
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (pVBInfo->VGAVDE == 357)
tempbx = 527;
else if (pVBInfo->VGAVDE == 420)
@@ -3971,10 +3971,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 768;
} else
tempbx = 768;
- } else if (pVBInfo->LCDResInfo == Panel1024x768x75) {
+ } else if (pVBInfo->LCDResInfo == Panel_1024x768x75) {
tempax = 1024;
tempbx = 768;
- } else if (pVBInfo->LCDResInfo == Panel1280x1024) {
+ } else if (pVBInfo->LCDResInfo == Panel_1280x1024) {
tempax = 1280;
if (pVBInfo->VGAVDE == 360)
tempbx = 768;
@@ -3984,10 +3984,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 864;
else
tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1280x1024x75) {
+ } else if (pVBInfo->LCDResInfo == Panel_1280x1024x75) {
tempax = 1280;
tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel1280x960) {
+ } else if (pVBInfo->LCDResInfo == Panel_1280x960) {
tempax = 1280;
if (pVBInfo->VGAVDE == 350)
tempbx = 700;
@@ -3997,7 +3997,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 960;
else
tempbx = 960;
- } else if (pVBInfo->LCDResInfo == Panel1400x1050) {
+ } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
tempax = 1400;
tempbx = 1050;
@@ -4005,10 +4005,10 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = 1280;
tempbx = 1024;
}
- } else if (pVBInfo->LCDResInfo == Panel1600x1200) {
+ } else if (pVBInfo->LCDResInfo == Panel_1600x1200) {
tempax = 1600;
tempbx = 1200; /* alan 10/14/2003 */
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (pVBInfo->VGAVDE == 350)
tempbx = 875;
else if (pVBInfo->VGAVDE == 400)
@@ -4028,7 +4028,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & (SetCRT2ToTV)) {
tempbx = 4;
- TVPtr = (struct XGI_TVDataStruct *) XGI_GetTVPtr(tempbx,
+ TVPtr = (struct SiS_TVData *) XGI_GetTVPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
@@ -4041,7 +4041,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->RVBHRS = TVPtr->RVBHRS;
pVBInfo->NewFlickerMode = TVPtr->FlickerMode;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (resinfo == 0x08)
pVBInfo->NewFlickerMode = 0x40;
else if (resinfo == 0x09)
@@ -4066,16 +4066,16 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
}
- } else if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if (pVBInfo->TVInfo & SetYPbPrMode750p) {
+ } else if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ if (pVBInfo->TVInfo & TVSetYPbPr750p) {
tempax = YPbPrTV750pHT; /* Ext750pTVHT */
tempbx = YPbPrTV750pVT; /* Ext750pTVVT */
}
- if (pVBInfo->TVInfo & SetYPbPrMode525p) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525p) {
tempax = YPbPrTV525pHT; /* Ext525pTVHT */
tempbx = YPbPrTV525pVT; /* Ext525pTVVT */
- } else if (pVBInfo->TVInfo & SetYPbPrMode525i) {
+ } else if (pVBInfo->TVInfo & TVSetYPbPr525i) {
tempax = YPbPrTV525iHT; /* Ext525iTVHT */
tempbx = YPbPrTV525iVT; /* Ext525iTVVT */
if (pVBInfo->TVInfo & NTSC1024x768)
@@ -4084,7 +4084,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
} else {
tempax = PALHT;
tempbx = PALVT;
- if (!(pVBInfo->TVInfo & SetPALTV)) {
+ if (!(pVBInfo->TVInfo & TVSetPAL)) {
tempax = NTSCHT;
tempbx = NTSCVT;
if (pVBInfo->TVInfo & NTSC1024x768)
@@ -4109,7 +4109,7 @@ static void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
- if (pVBInfo->VBType & VB_XGI301) { /* shampoo 0129 */
+ if (pVBInfo->VBType & VB_SIS301) { /* shampoo 0129 */
/* 301 */
xgifb_reg_set(pVBInfo->Part4Port, 0x0A, 0x10);
xgifb_reg_set(pVBInfo->Part4Port, 0x0B, di_1);
@@ -4139,7 +4139,7 @@ static unsigned short XGI_GetColorDepth(unsigned short ModeNo,
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- index = (modeflag & ModeInfoFlag) - ModeEGA;
+ index = (modeflag & ModeTypeMask) - ModeEGA;
if (index < 0)
index = 0;
@@ -4435,7 +4435,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
tempcx = 0x08;
- if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
+ if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
modeflag |= Charx8Dot;
tempax = pVBInfo->VGAHDE; /* 0x04 Horizontal Display End */
@@ -4451,12 +4451,12 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempbx & 0xFF00) >> 8;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (!(pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)))
+ if (!(pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)))
temp += 2;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
if (pVBInfo->VBExtInfo == VB_YPbPr1080i) {
if (resinfo == 7)
temp -= 2;
@@ -4487,7 +4487,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = (tempax / tempcx) - 5;
tempcx = tempax; /* 20030401 0x07 horizontal Retrace Start */
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
temp = (tempbx & 0x00FF) - 1;
if (!(modeflag & HalfDCLK)) {
temp -= 6;
@@ -4513,19 +4513,19 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
} else if (!(modeflag & HalfDCLK)) {
temp -= 4;
- if (pVBInfo->LCDResInfo != Panel1280x960 &&
+ if (pVBInfo->LCDResInfo != Panel_1280x960 &&
pVBInfo->VGAHDE >= 800) {
temp -= 7;
if (pVBInfo->ModeType == ModeEGA &&
pVBInfo->VGAVDE == 1024) {
temp += 15;
if (pVBInfo->LCDResInfo !=
- Panel1280x1024)
+ Panel_1280x1024)
temp += 7;
}
if (pVBInfo->VGAHDE >= 1280 &&
- pVBInfo->LCDResInfo != Panel1280x960 &&
+ pVBInfo->LCDResInfo != Panel_1280x960 &&
(pVBInfo->LCDInfo & LCDNonExpanding))
temp += 28;
}
@@ -4619,8 +4619,8 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
push2 = tempbx;
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
- if (pVBInfo->LCDResInfo == Panel1024x768) {
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (pVBInfo->LCDResInfo == Panel_1024x768) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (tempbx == 350)
tempbx += 5;
if (tempbx == 480)
@@ -4669,19 +4669,19 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx += tempax;
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
+ if (pVBInfo->TVInfo & TVSetHiVision) {
tempbx -= 10;
} else {
if (pVBInfo->TVInfo & TVSimuMode) {
- if (pVBInfo->TVInfo & SetPALTV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
if (pVBInfo->VBType &
- VB_XGI301LV) {
+ VB_SIS301LV) {
if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p |
- SetYPbPrMode750p |
- SetYPbPrMode1080i)))
+ (TVSetYPbPr525p |
+ TVSetYPbPr750p |
+ TVSetHiVision)))
tempbx += 40;
} else {
tempbx += 40;
@@ -4694,12 +4694,12 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
} else {
if (pVBInfo->TVInfo & TVSimuMode) {
- if (pVBInfo->TVInfo & SetPALTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p |
- SetYPbPrMode750p |
- SetYPbPrMode1080i)))
+ (TVSetYPbPr525p |
+ TVSetYPbPr750p |
+ TVSetHiVision)))
tempbx += 40;
} else {
tempbx += 40;
@@ -4713,7 +4713,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax += tempbx;
push1 = tempax; /* push ax */
- if ((pVBInfo->TVInfo & SetPALTV)) {
+ if ((pVBInfo->TVInfo & TVSetPAL)) {
if (tempbx <= 513) {
if (tempax >= 513)
tempbx = 513;
@@ -4761,7 +4761,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (temp >> 1) & 0x09;
- if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
+ if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
temp |= 0x01;
xgifb_reg_set(pVBInfo->Part1Port, 0x16, temp); /* 0x16 SR01 */
@@ -4813,13 +4813,13 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToSCART)
tempax |= 0x0200;
- if (!(pVBInfo->TVInfo & SetPALTV))
+ if (!(pVBInfo->TVInfo & TVSetPAL))
tempax |= 0x1000;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempax |= 0x0100;
- if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))
+ if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
tempax &= 0xfe00;
tempax = (tempax & 0xff00) >> 8;
@@ -4827,10 +4827,10 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x0, tempax);
TimingPoint = pVBInfo->NTSCTiming;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
TimingPoint = pVBInfo->PALTiming;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
TimingPoint = pVBInfo->HiTVExtTiming;
if (pVBInfo->VBInfo & SetInSlaveMode)
@@ -4843,14 +4843,14 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
TimingPoint = pVBInfo->HiTVTextTiming;
}
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525i)
TimingPoint = pVBInfo->YPbPr525iTiming;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
TimingPoint = pVBInfo->YPbPr525pTiming;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
TimingPoint = pVBInfo->YPbPr750pTiming;
}
@@ -4868,10 +4868,10 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp &= 0x80;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xFF, temp);
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempax = 950;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
tempax = 520;
else
tempax = 440;
@@ -4884,15 +4884,15 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempax & 0xFF00) >> 8;
temp += (unsigned short) TimingPoint[0];
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO
| SetCRT2ToSVIDEO | SetCRT2ToSCART
- | SetCRT2ToYPbPr)) {
+ | SetCRT2ToYPbPr525750)) {
tempcx = pVBInfo->VGAHDE;
if (tempcx >= 1024) {
temp = 0x17; /* NTSC */
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
temp = 0x19; /* PAL */
}
}
@@ -4903,15 +4903,15 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempax & 0xFF00) >> 8;
temp += TimingPoint[1];
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if ((pVBInfo->VBInfo & (SetCRT2ToAVIDEO
| SetCRT2ToSVIDEO | SetCRT2ToSCART
- | SetCRT2ToYPbPr))) {
+ | SetCRT2ToYPbPr525750))) {
tempcx = pVBInfo->VGAHDE;
if (tempcx >= 1024) {
temp = 0x1D; /* NTSC */
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
temp = 0x52; /* PAL */
}
}
@@ -4936,7 +4936,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
push1 = tempcx; /* push cx */
tempcx += 7;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempcx -= 4;
temp = tempcx & 0x00FF;
@@ -4954,7 +4954,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = push2;
tempbx = tempbx + 8;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
tempbx = tempbx - 4;
tempcx = tempbx;
}
@@ -4970,7 +4970,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x28, 0x0F, temp);
tempcx += 8;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
tempcx -= 4;
temp = tempcx & 0xFF;
@@ -5005,9 +5005,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->VBType &
- (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
+ (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p | SetYPbPrMode750p)))
+ (TVSetYPbPr525p | TVSetYPbPr750p)))
tempbx = tempbx >> 1;
} else
tempbx = tempbx >> 1;
@@ -5016,9 +5016,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx -= 2;
temp = tempbx & 0x00FF;
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
+ if (pVBInfo->TVInfo & TVSetHiVision) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (ModeNo == 0x2f)
temp += 1;
@@ -5037,9 +5037,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempcx & 0xFF00) >> 8;
temp |= ((tempbx & 0xFF00) >> 8) << 6;
- if (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
+ if (!(pVBInfo->VBInfo & SetCRT2ToHiVision)) {
+ if (pVBInfo->VBType & VB_SIS301LV) {
+ if (pVBInfo->TVInfo & TVSetHiVision) {
temp |= 0x10;
if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
@@ -5054,18 +5054,18 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x30, temp);
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) { /* TV gatingno */
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) { /* TV gatingno */
tempbx = pVBInfo->VDE;
tempcx = tempbx - 2;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (!(pVBInfo->TVInfo & (SetYPbPrMode525p
- | SetYPbPrMode750p)))
+ if (!(pVBInfo->TVInfo & (TVSetYPbPr525p
+ | TVSetYPbPr750p)))
tempbx = tempbx >> 1;
}
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
temp = 0;
if (tempcx & 0x0400)
temp |= 0x20;
@@ -5118,8 +5118,8 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
/* 301b */
tempecx = 8 * 1024;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempecx = tempecx * 8;
}
@@ -5133,8 +5133,8 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = (unsigned short) tempeax;
/* 301b */
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempcx = ((tempax & 0xFF00) >> 5) >> 8;
}
/* end 301b */
@@ -5161,7 +5161,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp |= 0x18;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x46, ~0x1F, temp);
- if (pVBInfo->TVInfo & SetPALTV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
tempbx = 0x0382;
tempcx = 0x007e;
} else {
@@ -5178,13 +5178,13 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = temp << 2;
temp |= ((tempbx & 0xFF00) >> 8) & 0x03;
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
temp |= 0x10;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
temp |= 0x20;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
temp |= 0x60;
}
@@ -5192,7 +5192,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3));
- if (!(pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))) {
+ if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) {
if (pVBInfo->TVInfo & NTSC1024x768) {
TimingPoint = XGI_NTSC1024AdjTime;
for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
@@ -5205,12 +5205,12 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
/* [ycchen] 01/14/03 Modify for 301C PALM Support */
if (pVBInfo->VBType & VB_XGI301C) {
- if (pVBInfo->TVInfo & SetPALMTV)
+ if (pVBInfo->TVInfo & TVSetPALM)
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
0x08); /* PALM Mode */
}
- if (pVBInfo->TVInfo & SetPALMTV) {
+ if (pVBInfo->TVInfo & TVSetPALM) {
tempax = (unsigned char) xgifb_reg_get(pVBInfo->Part2Port,
0x01);
tempax--;
@@ -5219,7 +5219,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF);
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
if (!(pVBInfo->VBInfo & SetInSlaveMode))
xgifb_reg_set(pVBInfo->Part2Port, 0x0B, 0x00);
}
@@ -5267,11 +5267,11 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x2B, 0x0F, temp);
temp = 0x01;
- if (pVBInfo->LCDResInfo == Panel1280x1024) {
+ if (pVBInfo->LCDResInfo == Panel_1280x1024) {
if (pVBInfo->ModeType == ModeEGA) {
if (pVBInfo->VGAHDE >= 1024) {
temp = 0x02;
- if (pVBInfo->LCDInfo & LCDVESATiming)
+ if (pVBInfo->LCDInfo & XGI_LCDVESATiming)
temp = 0x01;
}
}
@@ -5305,14 +5305,14 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempah = pVBInfo->LCDResInfo;
tempah &= PanelResInfo;
- if ((tempah == Panel1024x768) || (tempah == Panel1024x768x75)) {
+ if ((tempah == Panel_1024x768) || (tempah == Panel_1024x768x75)) {
tempbx = 1024;
tempcx = 768;
- } else if ((tempah == Panel1280x1024) ||
- (tempah == Panel1280x1024x75)) {
+ } else if ((tempah == Panel_1280x1024) ||
+ (tempah == Panel_1280x1024x75)) {
tempbx = 1280;
tempcx = 1024;
- } else if (tempah == Panel1400x1050) {
+ } else if (tempah == Panel_1400x1050) {
tempbx = 1400;
tempcx = 1050;
} else {
@@ -5375,7 +5375,7 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempcx = tempcx >> 1;
}
- if (pVBInfo->VBType & VB_XGI302LV)
+ if (pVBInfo->VBType & VB_SIS302LV)
tempbx += 1;
if (pVBInfo->VBType & VB_XGI301C) /* tap4 */
@@ -5405,7 +5405,7 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempcx = tempcx >> 1;
}
- if (pVBInfo->VBType & VB_XGI302LV)
+ if (pVBInfo->VBType & VB_SIS302LV)
tempbx += 1;
tempcx += tempbx;
@@ -5422,10 +5422,10 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = tempcx & 0x00FF; /* RHSYEXP2S=lcdhre */
xgifb_reg_set(pVBInfo->Part2Port, 0x21, temp);
- if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
+ if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
if (pVBInfo->VGAVDE == 525) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C)) {
temp = 0xC6;
} else
@@ -5436,8 +5436,8 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
if (pVBInfo->VGAVDE == 420) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C)) {
temp = 0x4F;
} else
@@ -5473,18 +5473,18 @@ static struct XGI301C_Tap4TimingStruct *XGI_GetTap4Ptr(unsigned short tempcx,
else
Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; /* NTSC */
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
Tap4TimingPtr = PALTap4Timing;
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
- if ((pVBInfo->TVInfo & SetYPbPrMode525i) ||
- (pVBInfo->TVInfo & SetYPbPrMode525p))
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ if ((pVBInfo->TVInfo & TVSetYPbPr525i) ||
+ (pVBInfo->TVInfo & TVSetYPbPr525p))
Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
Tap4TimingPtr = YPbPr750pTap4Timing;
}
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
Tap4TimingPtr = xgifb_tap4_timing;
i = 0;
@@ -5510,7 +5510,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->Part2Port, i, Tap4TimingPtr->Reg[j]);
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV))) {
+ (!(pVBInfo->VBInfo & SetCRT2ToHiVision))) {
/* Set Vertical Scaling */
Tap4TimingPtr = XGI_GetTap4Ptr(1, pVBInfo);
for (i = 0xC0, j = 0; i < 0xFF; i++, j++)
@@ -5520,7 +5520,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
}
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)))
+ (!(pVBInfo->VBInfo & SetCRT2ToHiVision)))
/* Enable V.Scaling */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x04);
else
@@ -5543,7 +5543,7 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
xgifb_reg_set(pVBInfo->Part3Port, 0x00, 0x00);
- if (pVBInfo->TVInfo & SetPALTV) {
+ if (pVBInfo->TVInfo & TVSetPAL) {
xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
} else {
@@ -5554,15 +5554,15 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->VBInfo & SetCRT2ToTV))
return;
- if (pVBInfo->TVInfo & SetPALMTV) {
+ if (pVBInfo->TVInfo & TVSetPALM) {
xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
xgifb_reg_set(pVBInfo->Part3Port, 0x3D, 0xA8);
}
- if ((pVBInfo->VBInfo & SetCRT2ToHiVisionTV) || (pVBInfo->VBInfo
- & SetCRT2ToYPbPr)) {
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
+ if ((pVBInfo->VBInfo & SetCRT2ToHiVision) || (pVBInfo->VBInfo
+ & SetCRT2ToYPbPr525750)) {
+ if (pVBInfo->TVInfo & TVSetYPbPr525i)
return;
tempdi = pVBInfo->HiTVGroup3Data;
@@ -5572,17 +5572,17 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
tempdi = pVBInfo->HiTVGroup3Text;
}
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
tempdi = pVBInfo->Ren525pGroup3;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
tempdi = pVBInfo->Ren750pGroup3;
for (i = 0; i <= 0x3E; i++)
xgifb_reg_set(pVBInfo->Part3Port, i, tempdi[i]);
if (pVBInfo->VBType & VB_XGI301C) { /* Marcovision */
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
xgifb_reg_set(pVBInfo->Part3Port, 0x28, 0x3f);
}
}
@@ -5637,7 +5637,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
if (XGI_IsLCDDualLink(pVBInfo))
tempbx = tempbx >> 1;
- if (tempcx & SetCRT2ToHiVisionTV) {
+ if (tempcx & SetCRT2ToHiVision) {
temp = 0;
if (tempbx <= 1024)
temp = 0xA0;
@@ -5656,7 +5656,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
- if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p)) {
+ if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p)) {
temp = 0x00;
if (pVBInfo->VGAHDE == 1280)
temp = 0x40;
@@ -5667,7 +5667,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
tempebx = pVBInfo->VDE;
- if (tempcx & SetCRT2ToHiVisionTV) {
+ if (tempcx & SetCRT2ToHiVision) {
if (!(temp & 0xE000))
tempbx = tempbx >> 1;
}
@@ -5705,8 +5705,8 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part4Port, 0x19, temp);
/* 301b */
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
temp = 0x0028;
xgifb_reg_set(pVBInfo->Part4Port, 0x1C, temp);
tempax = pVBInfo->VGAHDE;
@@ -5735,7 +5735,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempax & 0x00FF);
xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
- if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVisionTV)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVision)) {
if (pVBInfo->VGAHDE > 800)
xgifb_reg_or(pVBInfo->Part4Port, 0x1E, 0x08);
@@ -5744,8 +5744,8 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (!(pVBInfo->TVInfo & (NTSC1024x768
- | SetYPbPrMode525p | SetYPbPrMode750p
- | SetYPbPrMode1080i))) {
+ | TVSetYPbPr525p | TVSetYPbPr750p
+ | TVSetHiVision))) {
temp |= 0x0001;
if ((pVBInfo->VBInfo & SetInSlaveMode)
&& (!(pVBInfo->TVInfo
@@ -5785,7 +5785,7 @@ static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
Pdata = pVBInfo->Part5Port + 1;
if (pVBInfo->ModeType == ModeVGA) {
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
- | CRT2DisplayFlag))) {
+ | DisableCRT2Display))) {
XGINew_EnableCRT2(pVBInfo);
}
}
@@ -6074,7 +6074,7 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
tempax = pVBInfo->VBInfo;
if (tempax & SetCRT2ToDualEdge)
return 0;
- else if (tempax & (DisableCRT2Display | SwitchToCRT2 | SetSimuScanMode))
+ else if (tempax & (DisableCRT2Display | SwitchCRT2 | SetSimuScanMode))
return 1;
return 0;
@@ -6140,15 +6140,15 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
{
unsigned short tempah = 0;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempah = 0x3F;
if (!(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode))) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
tempah = 0x7F; /* Disable Channel A */
- if (!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
/* Disable Channel B */
tempah = 0xBF;
@@ -6166,8 +6166,8 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
/* disable part4_1f */
xgifb_reg_and(pVBInfo->Part4Port, 0x1F, tempah);
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
- if (((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)))
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
+ if (((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
|| (XGI_DisableChISLCD(pVBInfo))
|| (XGI_IsLCDON(pVBInfo)))
/* LVDS Driver power down */
@@ -6175,16 +6175,16 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
}
if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
- & (DisableCRT2Display | SetCRT2ToLCDA
+ & (DisableCRT2Display | XGI_SetCRT2ToLCDA
| SetSimuScanMode))) {
if (pVBInfo->SetFlag & GatingCRT)
XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo);
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
- & SetCRT2ToLCDA))
+ & XGI_SetCRT2ToLCDA))
/* Power down */
xgifb_reg_and(pVBInfo->Part1Port, 0x1e, 0xdf);
}
@@ -6198,7 +6198,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if ((pVBInfo->SetFlag & DisableChB) ||
(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
- ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) &&
+ ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) &&
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
@@ -6206,7 +6206,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if ((pVBInfo->SetFlag & DisableChB) ||
(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
- (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) ||
+ (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) ||
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))) {
/* save Part1 index 0 */
@@ -6227,7 +6227,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xDF);
}
- if (pVBInfo->VBInfo & (DisableCRT2Display | SetCRT2ToLCDA
+ if (pVBInfo->VBInfo & (DisableCRT2Display | XGI_SetCRT2ToLCDA
| SetSimuScanMode))
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
@@ -6254,15 +6254,15 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
tempbx = 2;
- if (pVBInfo->TVInfo & SetYPbPrMode1080i)
+ if (pVBInfo->TVInfo & TVSetHiVision)
tempbx = 4;
- if (pVBInfo->TVInfo & SetYPbPrMode525i)
+ if (pVBInfo->TVInfo & TVSetYPbPr525i)
tempbx = 6;
- if (pVBInfo->TVInfo & SetYPbPrMode525p)
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
tempbx = 8;
- if (pVBInfo->TVInfo & SetYPbPrMode750p)
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
tempbx = 10;
if (pVBInfo->TVInfo & TVSimuMode)
tempbx++;
@@ -6293,23 +6293,23 @@ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
*tempcl = 0;
*tempch = 0;
- if (pVBInfo->TVInfo & SetPALTV)
+ if (pVBInfo->TVInfo & TVSetPAL)
*tempbx = 1;
- if (pVBInfo->TVInfo & SetPALMTV)
+ if (pVBInfo->TVInfo & TVSetPALM)
*tempbx = 2;
- if (pVBInfo->TVInfo & SetPALNTV)
+ if (pVBInfo->TVInfo & TVSetPALN)
*tempbx = 3;
if (pVBInfo->TVInfo & NTSC1024x768) {
*tempbx = 4;
- if (pVBInfo->TVInfo & SetPALMTV)
+ if (pVBInfo->TVInfo & TVSetPALM)
*tempbx = 5;
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if ((!(pVBInfo->VBInfo & SetInSlaveMode)) || (pVBInfo->TVInfo
& TVSimuMode)) {
*tempbx += 8;
@@ -6317,8 +6317,8 @@ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
}
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C))
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C))
(*tempch)++;
}
@@ -6328,9 +6328,9 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
unsigned char tempah, tempbl, tempbh;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA
| SetCRT2ToTV | SetCRT2ToRAMDAC)) {
tempbl = 0;
tempbh = 0;
@@ -6338,20 +6338,20 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
index = XGI_GetTVPtrIndex(pVBInfo); /* Get TV Delay */
tempbl = pVBInfo->XGI_TVDelayList[index];
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C))
tempbl = pVBInfo->XGI_TVDelayList2[index];
if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempbl = tempbl >> 4;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
/* Get LCD Delay */
index = XGI_GetLCDCapPtr(pVBInfo);
tempbh = pVBInfo->LCDCapList[index].
LCD_DelayCompensation;
- if (!(pVBInfo->VBInfo & SetCRT2ToLCDA))
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
tempbl = tempbh;
}
@@ -6365,7 +6365,7 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
tempah |= tempbl;
}
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) { /* Channel A */
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) { /* Channel A */
tempah &= 0x0F;
tempah |= tempbh;
}
@@ -6475,13 +6475,13 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
tempcx = pVBInfo->LCDCapList[XGI_GetLCDCapPtr(pVBInfo)].LCD_Capability;
if (pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
+ (VB_SIS301B |
+ VB_SIS302B |
+ VB_SIS301LV |
+ VB_SIS302LV |
VB_XGI301C)) { /* 301LV/302LV only */
if (pVBInfo->VBType &
- (VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
+ (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
/* Set 301LV Capability */
xgifb_reg_set(pVBInfo->Part4Port, 0x24,
(unsigned char) (tempcx & 0x1F));
@@ -6493,14 +6493,14 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
| EnablePLLSPLOW)) >> 8));
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & SetCRT2ToLCD)
XGI_SetLCDCap_B(tempcx, pVBInfo);
- else if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ else if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
XGI_SetLCDCap_A(tempcx, pVBInfo);
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (tempcx & EnableSpectrum)
SetSpectrum(pVBInfo);
}
@@ -6524,7 +6524,7 @@ static void XGI_SetAntiFlicker(unsigned short ModeNo,
unsigned char tempah;
- if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))
+ if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
return;
tempbx = XGI_GetTVPtrIndex(pVBInfo);
@@ -6648,8 +6648,8 @@ static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x38, filterPtr[index++]);
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
xgifb_reg_set(pVBInfo->Part2Port, 0x48, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x49, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x4A, filterPtr[index++]);
@@ -6668,7 +6668,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
{
XGI_SetDelayComp(pVBInfo);
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
XGI_SetLCDCap(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToTV) {
@@ -6676,7 +6676,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
- if (pVBInfo->VBType & VB_XGI301)
+ if (pVBInfo->VBType & VB_SIS301)
XGI_SetEdgeEnhance(ModeNo, ModeIdIndex, pVBInfo);
}
}
@@ -6732,15 +6732,15 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempbl = 0xff;
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV
- | SetCRT2ToLCD | SetCRT2ToLCDA)) {
- if ((pVBInfo->VBInfo & SetCRT2ToLCDA) &&
+ | SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
+ if ((pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
(!(pVBInfo->VBInfo & SetSimuScanMode))) {
tempbl &= 0xf7;
tempah |= 0x01;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2e,
tempbl, tempah);
} else {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
tempbl &= 0xf7;
tempah |= 0x01;
}
@@ -6780,7 +6780,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
}
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD
- | SetCRT2ToLCDA)) {
+ | XGI_SetCRT2ToLCDA)) {
tempah &= (~0x08);
if ((pVBInfo->ModeType == ModeVGA) && (!(pVBInfo->VBInfo
& SetInSlaveMode))) {
@@ -6807,24 +6807,24 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah |= 0x40;
}
- if ((pVBInfo->LCDResInfo == Panel1280x1024)
- || (pVBInfo->LCDResInfo == Panel1280x1024x75))
+ if ((pVBInfo->LCDResInfo == Panel_1280x1024)
+ || (pVBInfo->LCDResInfo == Panel_1280x1024x75))
tempah |= 0x80;
- if (pVBInfo->LCDResInfo == Panel1280x960)
+ if (pVBInfo->LCDResInfo == Panel_1280x960)
tempah |= 0x80;
xgifb_reg_set(pVBInfo->Part4Port, 0x0C, tempah);
}
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
tempah = 0;
tempbl = 0xfb;
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
tempbl = 0xff;
- if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempah |= 0x04; /* shampoo 0129 */
}
@@ -6849,7 +6849,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah = 0;
tempbl = 0x7f;
- if (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) {
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) {
tempbl = 0xff;
if (!(pVBInfo->VBInfo & SetCRT2ToDualEdge))
tempah |= 0x80;
@@ -6857,7 +6857,7 @@ static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
xgifb_reg_and_or(pVBInfo->Part4Port, 0x23, tempbl, tempah);
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->LCDInfo & SetLCDDualLink) {
xgifb_reg_or(pVBInfo->Part4Port, 0x27, 0x20);
xgifb_reg_or(pVBInfo->Part4Port, 0x34, 0x10);
@@ -6872,7 +6872,7 @@ static void XGI_CloseCRTC(struct xgi_hw_device_info *HwDeviceExtension,
tempbx = 0;
- if (pVBInfo->VBInfo & SetCRT2ToLCDA)
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempbx = 0x08A0;
}
@@ -6937,10 +6937,10 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
index--;
if (pVBInfo->SetFlag & ProgrammingCRT2) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
- | VB_XGI301LV | VB_XGI302LV
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
+ | VB_SIS301LV | VB_SIS302LV
| VB_XGI301C))
/* 301b */
temp = LCDARefreshIndex[
@@ -6983,7 +6983,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
break;
temp = pVBInfo->RefIndex[RefreshRateTableIndex + i].
Ext_InfoFlag;
- temp &= ModeInfoFlag;
+ temp &= ModeTypeMask;
if (temp < pVBInfo->ModeType)
break;
i++;
@@ -7163,8 +7163,8 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
{
unsigned short tempah;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
if (!(pVBInfo->SetFlag & DisableChA)) {
if (pVBInfo->SetFlag & EnableChA) {
/* Power on */
@@ -7207,11 +7207,11 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
|| (!(pVBInfo->VBInfo & DisableCRT2Display))) {
xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
0x20); /* shampoo 0129 */
- if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (!XGI_DisableChISLCD(pVBInfo)) {
if (XGI_EnableChISLCD(pVBInfo) ||
(pVBInfo->VBInfo &
- (SetCRT2ToLCD | SetCRT2ToLCDA)))
+ (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
/* LVDS PLL power on */
xgifb_reg_and(
pVBInfo->Part4Port,
@@ -7229,12 +7229,12 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
tempah = 0xc0;
if (!(pVBInfo->VBInfo & SetSimuScanMode)) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
if (pVBInfo->VBInfo &
SetCRT2ToDualEdge) {
tempah = tempah & 0x40;
if (pVBInfo->VBInfo &
- SetCRT2ToLCDA)
+ XGI_SetCRT2ToLCDA)
tempah = tempah ^ 0xC0;
if (pVBInfo->SetFlag &
@@ -7271,7 +7271,7 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
} /* 301 */
else { /* LVDS */
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD
- | SetCRT2ToLCDA))
+ | XGI_SetCRT2ToLCDA))
/* enable CRT2 */
xgifb_reg_or(pVBInfo->Part1Port, 0x1E, 0x20);
@@ -7311,9 +7311,9 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
pVBInfo->SetFlag &= temp;
pVBInfo->SelectCRT2Rate = 0;
- if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
- | VB_XGI302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
+ | VB_SIS302LV | VB_XGI301C)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA
| SetInSlaveMode)) {
pVBInfo->SetFlag |= ProgrammingCRT2;
}
@@ -7415,11 +7415,11 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
- pVBInfo->Part1Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_04;
- pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10;
- pVBInfo->Part3Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_12;
- pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14;
- pVBInfo->Part5Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 + 2;
+ pVBInfo->Part1Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_04;
+ pVBInfo->Part2Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_10;
+ pVBInfo->Part3Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_12;
+ pVBInfo->Part4Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14;
+ pVBInfo->Part5Port = pVBInfo->BaseAddr + SIS_CRT2_PORT_14 + 2;
/* for x86 Linux, XG21 LVDS */
if (HwDeviceExtension->jChipType == XG21) {
@@ -7452,20 +7452,20 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
- if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA)) {
XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
HwDeviceExtension, pVBInfo);
}
} else {
- if (!(pVBInfo->VBInfo & SwitchToCRT2)) {
+ if (!(pVBInfo->VBInfo & SwitchCRT2)) {
XGI_SetCRT1Group(xgifb_info,
HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
HwDeviceExtension,
pVBInfo);
@@ -7473,7 +7473,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
}
}
- if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchToCRT2)) {
+ if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchCRT2)) {
switch (HwDeviceExtension->ujVBChipID) {
case VB_CHIP_301:
XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
@@ -7504,10 +7504,10 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
if (ModeNo <= 0x13) {
pVBInfo->ModeType = pVBInfo->SModeIDTable[ModeIdIndex].
- St_ModeFlag & ModeInfoFlag;
+ St_ModeFlag & ModeTypeMask;
} else {
pVBInfo->ModeType = pVBInfo->EModeIDTable[ModeIdIndex].
- Ext_ModeFlag & ModeInfoFlag;
+ Ext_ModeFlag & ModeTypeMask;
}
pVBInfo->SetFlag = 0;
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index 6556a0d6ff82..a5bd56af92b1 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -1,15 +1,6 @@
#ifndef _VB_STRUCT_
#define _VB_STRUCT_
-
-struct XGI_LCDDataStruct {
- unsigned short RVBHCMAX;
- unsigned short RVBHCFACT;
- unsigned short VGAHT;
- unsigned short VGAVT;
- unsigned short LCDHT;
- unsigned short LCDVT;
-};
-
+#include "../../video/sis/vstruct.h"
struct XGI_LVDSCRT1HDataStruct {
unsigned char Reg[8];
@@ -19,22 +10,6 @@ struct XGI_LVDSCRT1VDataStruct {
unsigned char Reg[7];
};
-struct XGI_TVDataStruct {
- unsigned short RVBHCMAX;
- unsigned short RVBHCFACT;
- unsigned short VGAHT;
- unsigned short VGAVT;
- unsigned short TVHDE;
- unsigned short TVVDE;
- unsigned short RVBHRS;
- unsigned char FlickerMode;
- unsigned short HALFRVBHRS;
- unsigned char RY1COE;
- unsigned char RY2COE;
- unsigned char RY3COE;
- unsigned char RY4COE;
-};
-
struct XGI_StStruct {
unsigned char St_ModeID;
unsigned short St_ModeFlag;
@@ -47,18 +22,6 @@ struct XGI_StStruct {
unsigned char VB_StTVYFilterIndex;
};
-struct XGI_StandTableStruct {
- unsigned char CRT_COLS;
- unsigned char ROWS;
- unsigned char CHAR_HEIGHT;
- unsigned short CRT_LEN;
- unsigned char SR[4];
- unsigned char MISC;
- unsigned char CRTC[0x19];
- unsigned char ATTR[0x14];
- unsigned char GRC[9];
-};
-
struct XGI_ExtStruct {
unsigned char Ext_ModeID;
unsigned short Ext_ModeFlag;
@@ -85,39 +48,11 @@ struct XGI_Ext2Struct {
/* unsigned short ROM_OFFSET; */
};
-
-struct XGI_MCLKDataStruct {
- unsigned char SR28, SR29, SR2A;
- unsigned short CLOCK;
-};
-
struct XGI_ECLKDataStruct {
unsigned char SR2E, SR2F, SR30;
unsigned short CLOCK;
};
-struct XGI_VCLKDataStruct {
- unsigned char SR2B, SR2C;
- unsigned short CLOCK;
-};
-
-struct XGI_VBVCLKDataStruct {
- unsigned char Part4_A, Part4_B;
- unsigned short CLOCK;
-};
-
-struct XGI_StResInfoStruct {
- unsigned short HTotal;
- unsigned short VTotal;
-};
-
-struct XGI_ModeResInfoStruct {
- unsigned short HTotal;
- unsigned short VTotal;
- unsigned char XChar;
- unsigned char YChar;
-};
-
/*add for new UNIVGABIOS*/
struct XGI_LCDDesStruct {
unsigned short LCDHDES;
@@ -350,7 +285,7 @@ struct vb_device_info {
unsigned char *pCRT2Data_4_D;
unsigned char *pCRT2Data_4_E;
unsigned char *pCRT2Data_4_10;
- struct XGI_MCLKDataStruct *MCLKData;
+ struct SiS_MCLKData *MCLKData;
struct XGI_ECLKDataStruct *ECLKData;
unsigned char *XGI_TVDelayList;
@@ -380,15 +315,15 @@ struct vb_device_info {
struct XGI_TimingVStruct *TimingV;
struct XGI_StStruct *SModeIDTable;
- struct XGI_StandTableStruct *StandTable;
+ struct SiS_StandTable_S *StandTable;
struct XGI_ExtStruct *EModeIDTable;
struct XGI_Ext2Struct *RefIndex;
/* XGINew_CRT1TableStruct *CRT1Table; */
struct XGI_CRT1TableStruct *XGINEWUB_CRT1Table;
- struct XGI_VCLKDataStruct *VCLKData;
- struct XGI_VBVCLKDataStruct *VBVCLKData;
- struct XGI_StResInfoStruct *StResInfo;
- struct XGI_ModeResInfoStruct *ModeResInfo;
+ struct SiS_VCLKData *VCLKData;
+ struct SiS_VBVCLKData *VBVCLKData;
+ struct SiS_StResInfo_S *StResInfo;
+ struct SiS_ModeResInfo_S *ModeResInfo;
struct XGI_XG21CRT1Struct *UpdateCRT1;
int ram_type;
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index e7946f1c1143..dddf261ed53d 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1,5 +1,5 @@
/* yilin modify for xgi20 */
-static struct XGI_MCLKDataStruct XGI340New_MCLKData[] = {
+static struct SiS_MCLKData XGI340New_MCLKData[] = {
{0x16, 0x01, 0x01, 166},
{0x19, 0x02, 0x01, 124},
{0x7C, 0x08, 0x01, 200},
@@ -10,7 +10,7 @@ static struct XGI_MCLKDataStruct XGI340New_MCLKData[] = {
{0x5c, 0x23, 0x01, 166}
};
-static struct XGI_MCLKDataStruct XGI27New_MCLKData[] = {
+static struct SiS_MCLKData XGI27New_MCLKData[] = {
{0x5c, 0x23, 0x01, 166},
{0x19, 0x02, 0x01, 124},
{0x7C, 0x08, 0x80, 200},
@@ -296,7 +296,7 @@ static struct XGI_ExtStruct XGI330_EModeIDTable[] = {
0x00, 0x00, 0x00, 0x00, 0x00}
};
-static struct XGI_StandTableStruct XGI330_StandTable[] = {
+static struct SiS_StandTable_S XGI330_StandTable[] = {
/* MD_0_200 */
{
0x28, 0x18, 0x08, 0x0800,
@@ -2353,109 +2353,109 @@ static struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_Vx75[] = {
/*add for new UNIVGABIOS*/
static struct XGI330_LCDDataTablStruct XGI_LCDDataTable[] = {
- {Panel1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCD1024x768Data */
- {Panel1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCD1024x768Data */
- {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCD1024x768Data */
- {Panel1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCD1280x1024Data */
- {Panel1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCD1280x1024Data */
- {Panel1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCD1280x1024Data */
- {Panel1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCD1400x1050Data */
- {Panel1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCD1400x1050Data */
- {Panel1400x1050, 0x0018, 0x0010, 8}, /* XGI_CetLCD1400x1050Data */
- {Panel1600x1200, 0x0019, 0x0001, 9}, /* XGI_ExtLCD1600x1200Data */
- {Panel1600x1200, 0x0019, 0x0000, 10}, /* XGI_StLCD1600x1200Data */
+ {Panel_1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCD1024x768Data */
+ {Panel_1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCD1024x768Data */
+ {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCD1024x768Data */
+ {Panel_1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCD1280x1024Data */
+ {Panel_1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCD1280x1024Data */
+ {Panel_1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCD1280x1024Data */
+ {Panel_1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCD1400x1050Data */
+ {Panel_1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCD1400x1050Data */
+ {Panel_1400x1050, 0x0018, 0x0010, 8}, /* XGI_CetLCD1400x1050Data */
+ {Panel_1600x1200, 0x0019, 0x0001, 9}, /* XGI_ExtLCD1600x1200Data */
+ {Panel_1600x1200, 0x0019, 0x0000, 10}, /* XGI_StLCD1600x1200Data */
{PanelRef60Hz, 0x0008, 0x0008, 11}, /* XGI_NoScalingData */
- {Panel1024x768x75, 0x0019, 0x0001, 12}, /* XGI_ExtLCD1024x768x75Data */
- {Panel1024x768x75, 0x0019, 0x0000, 13}, /* XGI_StLCD1024x768x75Data */
- {Panel1024x768x75, 0x0018, 0x0010, 14}, /* XGI_CetLCD1024x768x75Data */
- {Panel1280x1024x75, 0x0019, 0x0001, 15}, /* XGI_ExtLCD1280x1024x75Data*/
- {Panel1280x1024x75, 0x0019, 0x0000, 16}, /* XGI_StLCD1280x1024x75Data */
- {Panel1280x1024x75, 0x0018, 0x0010, 17}, /* XGI_CetLCD1280x1024x75Data*/
+ {Panel_1024x768x75, 0x0019, 0x0001, 12}, /* XGI_ExtLCD1024x768x75Data */
+ {Panel_1024x768x75, 0x0019, 0x0000, 13}, /* XGI_StLCD1024x768x75Data */
+ {Panel_1024x768x75, 0x0018, 0x0010, 14}, /* XGI_CetLCD1024x768x75Data */
+ {Panel_1280x1024x75, 0x0019, 0x0001, 15}, /* XGI_ExtLCD1280x1024x75Data*/
+ {Panel_1280x1024x75, 0x0019, 0x0000, 16}, /* XGI_StLCD1280x1024x75Data */
+ {Panel_1280x1024x75, 0x0018, 0x0010, 17}, /* XGI_CetLCD1280x1024x75Data*/
{PanelRef75Hz, 0x0008, 0x0008, 18}, /* XGI_NoScalingDatax75 */
{0xFF, 0x0000, 0x0000, 0} /* End of table */
};
static struct XGI330_LCDDataTablStruct XGI_LCDDesDataTable[] = {
- {Panel1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCDDes1024x768Data */
- {Panel1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCDDes1024x768Data */
- {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCDDes1024x768Data */
- {Panel1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCDDes1280x1024Data */
- {Panel1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCDDes1280x1024Data */
- {Panel1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCDDes1280x1024Data */
- {Panel1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCDDes1400x1050Data */
- {Panel1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCDDes1400x1050Data */
- {Panel1400x1050, 0x0418, 0x0010, 8}, /* XGI_CetLCDDes1400x1050Data */
- {Panel1400x1050, 0x0418, 0x0410, 9}, /* XGI_CetLCDDes1400x1050Data2 */
- {Panel1600x1200, 0x0019, 0x0001, 10}, /* XGI_ExtLCDDes1600x1200Data */
- {Panel1600x1200, 0x0019, 0x0000, 11}, /* XGI_StLCDDes1600x1200Data */
+ {Panel_1024x768, 0x0019, 0x0001, 0}, /* XGI_ExtLCDDes1024x768Data */
+ {Panel_1024x768, 0x0019, 0x0000, 1}, /* XGI_StLCDDes1024x768Data */
+ {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_CetLCDDes1024x768Data */
+ {Panel_1280x1024, 0x0019, 0x0001, 3}, /* XGI_ExtLCDDes1280x1024Data */
+ {Panel_1280x1024, 0x0019, 0x0000, 4}, /* XGI_StLCDDes1280x1024Data */
+ {Panel_1280x1024, 0x0018, 0x0010, 5}, /* XGI_CetLCDDes1280x1024Data */
+ {Panel_1400x1050, 0x0019, 0x0001, 6}, /* XGI_ExtLCDDes1400x1050Data */
+ {Panel_1400x1050, 0x0019, 0x0000, 7}, /* XGI_StLCDDes1400x1050Data */
+ {Panel_1400x1050, 0x0418, 0x0010, 8}, /* XGI_CetLCDDes1400x1050Data */
+ {Panel_1400x1050, 0x0418, 0x0410, 9}, /* XGI_CetLCDDes1400x1050Data2 */
+ {Panel_1600x1200, 0x0019, 0x0001, 10}, /* XGI_ExtLCDDes1600x1200Data */
+ {Panel_1600x1200, 0x0019, 0x0000, 11}, /* XGI_StLCDDes1600x1200Data */
{PanelRef60Hz, 0x0008, 0x0008, 12}, /* XGI_NoScalingDesData */
- {Panel1024x768x75, 0x0019, 0x0001, 13}, /*XGI_ExtLCDDes1024x768x75Data*/
- {Panel1024x768x75, 0x0019, 0x0000, 14}, /* XGI_StLCDDes1024x768x75Data*/
- {Panel1024x768x75, 0x0018, 0x0010, 15}, /*XGI_CetLCDDes1024x768x75Data*/
+ {Panel_1024x768x75, 0x0019, 0x0001, 13}, /*XGI_ExtLCDDes1024x768x75Data*/
+ {Panel_1024x768x75, 0x0019, 0x0000, 14}, /* XGI_StLCDDes1024x768x75Data*/
+ {Panel_1024x768x75, 0x0018, 0x0010, 15}, /*XGI_CetLCDDes1024x768x75Data*/
/* XGI_ExtLCDDes1280x1024x75Data */
- {Panel1280x1024x75, 0x0019, 0x0001, 16},
+ {Panel_1280x1024x75, 0x0019, 0x0001, 16},
/* XGI_StLCDDes1280x1024x75Data */
- {Panel1280x1024x75, 0x0019, 0x0000, 17},
+ {Panel_1280x1024x75, 0x0019, 0x0000, 17},
/* XGI_CetLCDDes1280x1024x75Data */
- {Panel1280x1024x75, 0x0018, 0x0010, 18},
+ {Panel_1280x1024x75, 0x0018, 0x0010, 18},
{PanelRef75Hz, 0x0008, 0x0008, 19}, /* XGI_NoScalingDesDatax75 */
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct xgifb_epllcd_crt1[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1 */
- {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2 */
- {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1 */
- {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2 */
- {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1 */
- {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2 */
- {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1 */
- {Panel1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1x75 */
- {Panel1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2x75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1x75*/
- {Panel1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2x75*/
+ {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDSCRT11024x768_1 */
+ {Panel_1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDSCRT11024x768_2 */
+ {Panel_1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDSCRT11280x1024_1 */
+ {Panel_1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDSCRT11280x1024_2 */
+ {Panel_1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDSCRT11400x1050_1 */
+ {Panel_1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDSCRT11400x1050_2 */
+ {Panel_1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDSCRT11600x1200_1 */
+ {Panel_1024x768x75, 0x0018, 0x0000, 7}, /* XGI_LVDSCRT11024x768_1x75 */
+ {Panel_1024x768x75, 0x0018, 0x0010, 8}, /* XGI_LVDSCRT11024x768_2x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0000, 9}, /*XGI_LVDSCRT11280x1024_1x75*/
+ {Panel_1280x1024x75, 0x0018, 0x0010, 10},/*XGI_LVDSCRT11280x1024_2x75*/
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct XGI_EPLLCDDataPtr[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Data_1 */
- {Panel1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDS1024x768Data_2 */
- {Panel1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDS1280x1024Data_1 */
- {Panel1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDS1280x1024Data_2 */
- {Panel1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDS1400x1050Data_1 */
- {Panel1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDS1400x1050Data_2 */
- {Panel1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDS1600x1200Data_1 */
+ {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Data_1 */
+ {Panel_1024x768, 0x0018, 0x0010, 1}, /* XGI_LVDS1024x768Data_2 */
+ {Panel_1280x1024, 0x0018, 0x0000, 2}, /* XGI_LVDS1280x1024Data_1 */
+ {Panel_1280x1024, 0x0018, 0x0010, 3}, /* XGI_LVDS1280x1024Data_2 */
+ {Panel_1400x1050, 0x0018, 0x0000, 4}, /* XGI_LVDS1400x1050Data_1 */
+ {Panel_1400x1050, 0x0018, 0x0010, 5}, /* XGI_LVDS1400x1050Data_2 */
+ {Panel_1600x1200, 0x0018, 0x0000, 6}, /* XGI_LVDS1600x1200Data_1 */
{PanelRef60Hz, 0x0008, 0x0008, 7}, /* XGI_LVDSNoScalingData */
- {Panel1024x768x75, 0x0018, 0x0000, 8}, /* XGI_LVDS1024x768Data_1x75 */
- {Panel1024x768x75, 0x0018, 0x0010, 9}, /* XGI_LVDS1024x768Data_2x75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 10}, /* XGI_LVDS1280x1024Data_1x75*/
- {Panel1280x1024x75, 0x0018, 0x0010, 11}, /*XGI_LVDS1280x1024Data_2x75*/
+ {Panel_1024x768x75, 0x0018, 0x0000, 8}, /* XGI_LVDS1024x768Data_1x75 */
+ {Panel_1024x768x75, 0x0018, 0x0010, 9}, /* XGI_LVDS1024x768Data_2x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0000, 10}, /* XGI_LVDS1280x1024Data_1x75*/
+ {Panel_1280x1024x75, 0x0018, 0x0010, 11}, /*XGI_LVDS1280x1024Data_2x75*/
{PanelRef75Hz, 0x0008, 0x0008, 12}, /* XGI_LVDSNoScalingDatax75 */
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct XGI_EPLLCDDesDataPtr[] = {
- {Panel1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Des_1 */
- {Panel1024x768, 0x0618, 0x0410, 1}, /* XGI_LVDS1024x768Des_3 */
- {Panel1024x768, 0x0018, 0x0010, 2}, /* XGI_LVDS1024x768Des_2 */
- {Panel1280x1024, 0x0018, 0x0000, 3}, /* XGI_LVDS1280x1024Des_1 */
- {Panel1280x1024, 0x0018, 0x0010, 4}, /* XGI_LVDS1280x1024Des_2 */
- {Panel1400x1050, 0x0018, 0x0000, 5}, /* XGI_LVDS1400x1050Des_1 */
- {Panel1400x1050, 0x0018, 0x0010, 6}, /* XGI_LVDS1400x1050Des_2 */
- {Panel1600x1200, 0x0018, 0x0000, 7}, /* XGI_LVDS1600x1200Des_1 */
+ {Panel_1024x768, 0x0018, 0x0000, 0}, /* XGI_LVDS1024x768Des_1 */
+ {Panel_1024x768, 0x0618, 0x0410, 1}, /* XGI_LVDS1024x768Des_3 */
+ {Panel_1024x768, 0x0018, 0x0010, 2}, /* XGI_LVDS1024x768Des_2 */
+ {Panel_1280x1024, 0x0018, 0x0000, 3}, /* XGI_LVDS1280x1024Des_1 */
+ {Panel_1280x1024, 0x0018, 0x0010, 4}, /* XGI_LVDS1280x1024Des_2 */
+ {Panel_1400x1050, 0x0018, 0x0000, 5}, /* XGI_LVDS1400x1050Des_1 */
+ {Panel_1400x1050, 0x0018, 0x0010, 6}, /* XGI_LVDS1400x1050Des_2 */
+ {Panel_1600x1200, 0x0018, 0x0000, 7}, /* XGI_LVDS1600x1200Des_1 */
{PanelRef60Hz, 0x0008, 0x0008, 8}, /* XGI_LVDSNoScalingDesData */
- {Panel1024x768x75, 0x0018, 0x0000, 9}, /* XGI_LVDS1024x768Des_1x75 */
- {Panel1024x768x75, 0x0618, 0x0410, 10}, /* XGI_LVDS1024x768Des_3x75 */
- {Panel1024x768x75, 0x0018, 0x0010, 11}, /* XGI_LVDS1024x768Des_2x75 */
- {Panel1280x1024x75, 0x0018, 0x0000, 12}, /* XGI_LVDS1280x1024Des_1x75 */
- {Panel1280x1024x75, 0x0018, 0x0010, 13}, /* XGI_LVDS1280x1024Des_2x75 */
+ {Panel_1024x768x75, 0x0018, 0x0000, 9}, /* XGI_LVDS1024x768Des_1x75 */
+ {Panel_1024x768x75, 0x0618, 0x0410, 10}, /* XGI_LVDS1024x768Des_3x75 */
+ {Panel_1024x768x75, 0x0018, 0x0010, 11}, /* XGI_LVDS1024x768Des_2x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0000, 12}, /* XGI_LVDS1280x1024Des_1x75 */
+ {Panel_1280x1024x75, 0x0018, 0x0010, 13}, /* XGI_LVDS1280x1024Des_2x75 */
{PanelRef75Hz, 0x0008, 0x0008, 14}, /* XGI_LVDSNoScalingDesDatax75 */
{0xFF, 0x0000, 0x0000, 0}
};
static struct XGI330_LCDDataTablStruct XGI_EPLCHLCDRegPtr[] = {
- {Panel1024x768, 0x0000, 0x0000, 0}, /* XGI_CH7017LV1024x768 */
- {Panel1400x1050, 0x0000, 0x0000, 1}, /* XGI_CH7017LV1400x1050 */
+ {Panel_1024x768, 0x0000, 0x0000, 0}, /* XGI_CH7017LV1024x768 */
+ {Panel_1400x1050, 0x0000, 0x0000, 1}, /* XGI_CH7017LV1400x1050 */
{0xFF, 0x0000, 0x0000, 0}
};
@@ -2501,225 +2501,225 @@ static unsigned short LCDLenList[] = {
/* Dual link only */
static struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
/* LCDCap1024x768 */
- {Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {Panel_1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024 */
- {Panel1280x1024, LCDDualLink+DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1280x1024, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1400x1050 */
- {Panel1400x1050, LCDDualLink+DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1400x1050, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1600x1200 */
- {Panel1600x1200, LCDDualLink+DefaultLCDCap, LCDToFull,
+ {Panel_1600x1200, XGI_LCDDualLink+DefaultLCDCap, LCDToFull,
0x012, 0xC0, 0x03, VCLK162,
0x43, 0x22, 0x70, 0x24, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1024x768x75 */
- {Panel1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
+ {Panel_1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024x75 */
- {Panel1280x1024x75, LCDDualLink+DefaultLCDCap, StLCDBToA,
+ {Panel_1280x1024x75, XGI_LCDDualLink+DefaultLCDCap, StLCDBToA,
0x012, 0x90, 0x03, VCLK135_5,
0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
static struct XGI330_LCDCapStruct XGI_LCDCapList[] = {
/* LCDCap1024x768 */
- {Panel1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {Panel_1024x768, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024 */
- {Panel1280x1024, DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1280x1024, DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1400x1050 */
- {Panel1400x1050, DefaultLCDCap, StLCDBToA,
- 0x012, 0x70, 0x03, VCLK108_2,
+ {Panel_1400x1050, DefaultLCDCap, StLCDBToA,
+ 0x012, 0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1600x1200 */
- {Panel1600x1200, DefaultLCDCap, LCDToFull,
+ {Panel_1600x1200, DefaultLCDCap, LCDToFull,
0x012, 0xC0, 0x03, VCLK162,
0x5A, 0x23, 0x5A, 0x23, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1024x768x75 */
- {Panel1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
+ {Panel_1024x768x75, DefaultLCDCap, 0, 0x012, 0x60, 0, VCLK78_75,
0x2B, 0x61, 0x2B, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024x75 */
- {Panel1280x1024x75, DefaultLCDCap, StLCDBToA,
+ {Panel_1280x1024x75, DefaultLCDCap, StLCDBToA,
0x012, 0x90, 0x03, VCLK135_5,
0x54, 0x42, 0x4A, 0x61, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x30, 0x10},
/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65,
+ {0xFF, DefaultLCDCap, 0, 0x012, 0x88, 0x06, VCLK65_315,
0x6C, 0xC3, 0x35, 0x62, 0x02, 0x14, 0x0A, 0x02, 0x00,
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
static struct XGI_Ext2Struct XGI330_RefIndex[] = {
- {Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
0x00, 0x10, 0x59, 320, 200},/* 00 */
- {Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
0x00, 0x10, 0x00, 320, 400},/* 01 */
- {Support32Bpp + SupportAllCRT2 + SyncNN, RES320x240, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncNN, RES320x240, VCLK25_175,
0x04, 0x20, 0x50, 320, 240},/* 02 */
- {Support32Bpp + SupportAllCRT2 + SyncPP, RES400x300, VCLK40,
+ {Mode32Bpp + SupportAllCRT2 + SyncPP, RES400x300, VCLK40,
0x05, 0x32, 0x51, 400, 300},/* 03 */
- {Support32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES512x384,
- VCLK65, 0x06, 0x43, 0x52, 512, 384},/* 04 */
- {Support32Bpp + SupportAllCRT2 + SyncPN, RES640x400, VCLK25_175,
+ {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES512x384,
+ VCLK65_315, 0x06, 0x43, 0x52, 512, 384},/* 04 */
+ {Mode32Bpp + SupportAllCRT2 + SyncPN, RES640x400, VCLK25_175,
0x00, 0x14, 0x2f, 640, 400},/* 05 */
- {Support32Bpp + SupportAllCRT2 + SyncNN, RES640x480x60, VCLK25_175,
+ {Mode32Bpp + SupportAllCRT2 + SyncNN, RES640x480x60, VCLK25_175,
0x04, 0x24, 0x2e, 640, 480},/* 06 640x480x60Hz (LCD 640x480x60z) */
- {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x72, VCLK31_5,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x72, VCLK31_5,
0x04, 0x24, 0x2e, 640, 480},/* 07 640x480x72Hz (LCD 640x480x70Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x75, VCLK31_5,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x75, VCLK31_5,
0x47, 0x24, 0x2e, 640, 480},/* 08 640x480x75Hz (LCD 640x480x75Hz) */
- {Support32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x85, VCLK36,
+ {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x85, VCLK36,
0x8A, 0x24, 0x2e, 640, 480},/* 09 640x480x85Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x100, VCLK43_163,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x100, VCLK43_163,
0x00, 0x24, 0x2e, 640, 480},/* 0a 640x480x100Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x120, VCLK52_406,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x120, VCLK52_406,
0x00, 0x24, 0x2e, 640, 480},/* 0b 640x480x120Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x160, VCLK72_852,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x160, VCLK72_852,
0x00, 0x24, 0x2e, 640, 480},/* 0c 640x480x160Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x200, VCLK86_6,
+ {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x200, VCLK86_6,
0x00, 0x24, 0x2e, 640, 480},/* 0d 640x480x200Hz */
- {Support32Bpp + NoSupportLCD + SyncPP, RES800x600x56, VCLK36,
+ {Mode32Bpp + NoSupportLCD + SyncPP, RES800x600x56, VCLK36,
0x05, 0x36, 0x6a, 800, 600},/* 0e 800x600x56Hz */
- {Support32Bpp + NoSupportTV + SyncPP, RES800x600x60, VCLK40,
+ {Mode32Bpp + NoSupportTV + SyncPP, RES800x600x60, VCLK40,
0x05, 0x36, 0x6a, 800, 600},/* 0f 800x600x60Hz (LCD 800x600x60Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x72, VCLK50,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x72, VCLK50,
0x48, 0x36, 0x6a, 800, 600},/* 10 800x600x72Hz (LCD 800x600x70Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x75, VCLK49_5,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x75, VCLK49_5,
0x8B, 0x36, 0x6a, 800, 600},/* 11 800x600x75Hz (LCD 800x600x75Hz) */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x600x85, VCLK56_25,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x600x85, VCLK56_25,
0x00, 0x36, 0x6a, 800, 600},/* 12 800x600x85Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x100, VCLK68_179,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x100, VCLK68_179,
0x00, 0x36, 0x6a, 800, 600},/* 13 800x600x100Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x120, VCLK83_95,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x120, VCLK83_95,
0x00, 0x36, 0x6a, 800, 600},/* 14 800x600x120Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x160, VCLK116_406,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x160, VCLK116_406,
0x00, 0x36, 0x6a, 800, 600},/* 15 800x600x160Hz */
- {Support32Bpp + InterlaceMode + SyncPP, RES1024x768x43, VCLK44_9,
+ {Mode32Bpp + InterlaceMode + SyncPP, RES1024x768x43, VCLK44_9,
0x00, 0x47, 0x37, 1024, 768},/* 16 1024x768x43Hz */
/* 17 1024x768x60Hz (LCD 1024x768x60Hz) */
- {Support32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES1024x768x60,
- VCLK65, 0x06, 0x47, 0x37, 1024, 768},
- {Support32Bpp + NoSupportHiVisionTV + SyncNN, RES1024x768x70, VCLK75,
+ {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES1024x768x60,
+ VCLK65_315, 0x06, 0x47, 0x37, 1024, 768},
+ {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES1024x768x70, VCLK75,
0x49, 0x47, 0x37, 1024, 768},/* 18 1024x768x70Hz (LCD 1024x768x70Hz) */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1024x768x75, VCLK78_75,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1024x768x75, VCLK78_75,
0x00, 0x47, 0x37, 1024, 768},/* 19 1024x768x75Hz (LCD 1024x768x75Hz) */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x768x85, VCLK94_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x768x85, VCLK94_5,
0x8C, 0x47, 0x37, 1024, 768},/* 1a 1024x768x85Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x100, VCLK113_309,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x100, VCLK113_309,
0x00, 0x47, 0x37, 1024, 768},/* 1b 1024x768x100Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x120, VCLK139_054,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x120, VCLK139_054,
0x00, 0x47, 0x37, 1024, 768},/* 1c 1024x768x120Hz */
- {Support32Bpp + SupportLCD + SyncPP, RES1280x960x60, VCLK108_2,
+ {Mode32Bpp + SupportLCD + SyncPP, RES1280x960x60, VCLK108_2_315,
0x08, 0x58, 0x7b, 1280, 960},/* 1d 1280x960x60Hz */
- {Support32Bpp + InterlaceMode + SyncPP, RES1280x1024x43, VCLK78_75,
+ {Mode32Bpp + InterlaceMode + SyncPP, RES1280x1024x43, VCLK78_75,
0x00, 0x58, 0x3a, 1280, 1024},/* 1e 1280x1024x43Hz */
- {Support32Bpp + NoSupportTV + SyncPP, RES1280x1024x60, VCLK108_2,
+ {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x60, VCLK108_2_315,
0x07, 0x58, 0x3a, 1280, 1024},/*1f 1280x1024x60Hz (LCD 1280x1024x60Hz)*/
- {Support32Bpp + NoSupportTV + SyncPP, RES1280x1024x75, VCLK135_5,
+ {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x75, VCLK135_5,
0x00, 0x58, 0x3a, 1280, 1024},/*20 1280x1024x75Hz (LCD 1280x1024x75Hz)*/
- {Support32Bpp + SyncPP, RES1280x1024x85, VCLK157_5,
+ {Mode32Bpp + SyncPP, RES1280x1024x85, VCLK157_5,
0x00, 0x58, 0x3a, 1280, 1024},/* 21 1280x1024x85Hz */
/* 22 1600x1200x60Hz */
- {Support32Bpp + SupportLCD + SyncPP + SupportCRT2in301C,
+ {Mode32Bpp + SupportLCD + SyncPP + SupportCRT2in301C,
RES1600x1200x60, VCLK162, 0x09, 0x7A, 0x3c, 1600, 1200},
- {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x65, VCLK175,
+ {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x65, VCLK175,
0x00, 0x69, 0x3c, 1600, 1200},/* 23 1600x1200x65Hz */
- {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x70, VCLK189,
+ {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x70, VCLK189,
0x00, 0x69, 0x3c, 1600, 1200},/* 24 1600x1200x70Hz */
- {Support32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x75, VCLK202_5,
+ {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x75, VCLK202_5,
0x00, 0x69, 0x3c, 1600, 1200},/* 25 1600x1200x75Hz */
- {Support32Bpp + SyncPP, RES1600x1200x85, VCLK229_5,
+ {Mode32Bpp + SyncPP, RES1600x1200x85, VCLK229_5,
0x00, 0x69, 0x3c, 1600, 1200},/* 26 1600x1200x85Hz */
- {Support32Bpp + SyncPP, RES1600x1200x100, VCLK269_655,
+ {Mode32Bpp + SyncPP, RES1600x1200x100, VCLK269_655,
0x00, 0x69, 0x3c, 1600, 1200},/* 27 1600x1200x100Hz */
- {Support32Bpp + SyncPP, RES1600x1200x120, VCLK323_586,
+ {Mode32Bpp + SyncPP, RES1600x1200x120, VCLK323_586,
0x00, 0x69, 0x3c, 1600, 1200},/* 28 1600x1200x120Hz */
- {Support32Bpp + SupportLCD + SyncNP, RES1920x1440x60, VCLK234,
+ {Mode32Bpp + SupportLCD + SyncNP, RES1920x1440x60, VCLK234,
0x00, 0x00, 0x68, 1920, 1440},/* 29 1920x1440x60Hz */
- {Support32Bpp + SyncPN, RES1920x1440x65, VCLK254_817,
+ {Mode32Bpp + SyncPN, RES1920x1440x65, VCLK254_817,
0x00, 0x00, 0x68, 1920, 1440},/* 2a 1920x1440x65Hz */
- {Support32Bpp + SyncPN, RES1920x1440x70, VCLK277_015,
+ {Mode32Bpp + SyncPN, RES1920x1440x70, VCLK277_015,
0x00, 0x00, 0x68, 1920, 1440},/* 2b 1920x1440x70Hz */
- {Support32Bpp + SyncPN, RES1920x1440x75, VCLK291_132,
+ {Mode32Bpp + SyncPN, RES1920x1440x75, VCLK291_132,
0x00, 0x00, 0x68, 1920, 1440},/* 2c 1920x1440x75Hz */
- {Support32Bpp + SyncPN, RES1920x1440x85, VCLK330_615,
+ {Mode32Bpp + SyncPN, RES1920x1440x85, VCLK330_615,
0x00, 0x00, 0x68, 1920, 1440},/* 2d 1920x1440x85Hz */
- {Support16Bpp + SyncPN, RES1920x1440x100, VCLK388_631,
+ {Mode16Bpp + SyncPN, RES1920x1440x100, VCLK388_631,
0x00, 0x00, 0x68, 1920, 1440},/* 2e 1920x1440x100Hz */
- {Support32Bpp + SupportLCD + SyncPN, RES2048x1536x60, VCLK266_952,
+ {Mode32Bpp + SupportLCD + SyncPN, RES2048x1536x60, VCLK266_952,
0x00, 0x00, 0x6c, 2048, 1536},/* 2f 2048x1536x60Hz */
- {Support32Bpp + SyncPN, RES2048x1536x65, VCLK291_766,
+ {Mode32Bpp + SyncPN, RES2048x1536x65, VCLK291_766,
0x00, 0x00, 0x6c, 2048, 1536},/* 30 2048x1536x65Hz */
- {Support32Bpp + SyncPN, RES2048x1536x70, VCLK315_195,
+ {Mode32Bpp + SyncPN, RES2048x1536x70, VCLK315_195,
0x00, 0x00, 0x6c, 2048, 1536},/* 31 2048x1536x70Hz */
- {Support32Bpp + SyncPN, RES2048x1536x75, VCLK340_477,
+ {Mode32Bpp + SyncPN, RES2048x1536x75, VCLK340_477,
0x00, 0x00, 0x6c, 2048, 1536},/* 32 2048x1536x75Hz */
- {Support16Bpp + SyncPN, RES2048x1536x85, VCLK375_847,
+ {Mode16Bpp + SyncPN, RES2048x1536x85, VCLK375_847,
0x00, 0x00, 0x6c, 2048, 1536},/* 33 2048x1536x85Hz */
- {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 +
- SyncPP + SupportYPbPr, RES800x480x60, VCLK39_77,
+ {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
+ SyncPP + SupportYPbPr750p, RES800x480x60, VCLK39_77,
0x08, 0x00, 0x70, 800, 480},/* 34 800x480x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x75, VCLK49_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x75, VCLK49_5,
0x08, 0x00, 0x70, 800, 480},/* 35 800x480x75Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x85, VCLK56_25,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x85, VCLK56_25,
0x08, 0x00, 0x70, 800, 480},/* 36 800x480x85Hz */
- {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 +
- SyncPP + SupportYPbPr, RES1024x576x60, VCLK65,
+ {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
+ SyncPP + SupportYPbPr750p, RES1024x576x60, VCLK65_315,
0x09, 0x00, 0x71, 1024, 576},/* 37 1024x576x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x75, VCLK78_75,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x75, VCLK78_75,
0x09, 0x00, 0x71, 1024, 576},/* 38 1024x576x75Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x85, VCLK94_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x85, VCLK94_5,
0x09, 0x00, 0x71, 1024, 576},/* 39 1024x576x85Hz */
- {Support32Bpp + SupportHiVisionTV + SupportRAMDAC2 +
- SyncPP + SupportYPbPr, RES1280x720x60, VCLK108_2,
+ {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
+ SyncPP + SupportYPbPr750p, RES1280x720x60, VCLK108_2_315,
0x0A, 0x00, 0x75, 1280, 720},/* 3a 1280x720x60Hz*/
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x75, VCLK135_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x75, VCLK135_5,
0x0A, 0x00, 0x75, 1280, 720},/* 3b 1280x720x75Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x85, VCLK157_5,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x85, VCLK157_5,
0x0A, 0x00, 0x75, 1280, 720},/* 3c 1280x720x85Hz */
- {Support32Bpp + SupportTV + SyncNN, RES720x480x60, VCLK28_322,
+ {Mode32Bpp + SupportTV + SyncNN, RES720x480x60, VCLK28_322,
0x06, 0x00, 0x31, 720, 480},/* 3d 720x480x60Hz */
- {Support32Bpp + SupportTV + SyncPP, RES720x576x56, VCLK36,
+ {Mode32Bpp + SupportTV + SyncPP, RES720x576x56, VCLK36,
0x06, 0x00, 0x32, 720, 576},/* 3e 720x576x56Hz */
- {Support32Bpp + InterlaceMode + NoSupportLCD + SyncPP, RES856x480x79I,
+ {Mode32Bpp + InterlaceMode + NoSupportLCD + SyncPP, RES856x480x79I,
VCLK35_2, 0x00, 0x00, 0x00, 856, 480},/* 3f 856x480x79I */
- {Support32Bpp + NoSupportLCD + SyncNN, RES856x480x60, VCLK35_2,
+ {Mode32Bpp + NoSupportLCD + SyncNN, RES856x480x60, VCLK35_2,
0x00, 0x00, 0x00, 856, 480},/* 40 856x480x60Hz */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1280x768x60,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1280x768x60,
VCLK79_411, 0x08, 0x48, 0x23, 1280, 768},/* 41 1280x768x60Hz */
- {Support32Bpp + NoSupportHiVisionTV + SyncPP, RES1400x1050x60,
+ {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1400x1050x60,
VCLK122_61, 0x08, 0x69, 0x26, 1400, 1050},/* 42 1400x1050x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x60, VCLK80_350,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x60, VCLK80_350,
0x37, 0x00, 0x20, 1152, 864},/* 43 1152x864x60Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x75, VCLK107_385,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x75, VCLK107_385,
0x37, 0x00, 0x20, 1152, 864},/* 44 1152x864x75Hz */
- {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x75,
+ {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x75,
VCLK125_999, 0x3A, 0x88, 0x7b, 1280, 960},/* 45 1280x960x75Hz */
- {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x85,
+ {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x85,
VCLK148_5, 0x0A, 0x88, 0x7b, 1280, 960},/* 46 1280x960x85Hz */
- {Support32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x120,
+ {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x120,
VCLK217_325, 0x3A, 0x88, 0x7b, 1280, 960},/* 47 1280x960x120Hz */
- {Support32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x160, VCLK139_054,
+ {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x160, VCLK139_054,
0x30, 0x47, 0x37, 1024, 768},/* 48 1024x768x160Hz */
};
@@ -2729,7 +2729,7 @@ static unsigned char XGI330_ScreenOffset[] = {
0x57, 0x48
};
-static struct XGI_StResInfoStruct XGI330_StResInfo[] = {
+static struct SiS_StResInfo_S XGI330_StResInfo[] = {
{640, 400},
{640, 350},
{720, 400},
@@ -2737,7 +2737,7 @@ static struct XGI_StResInfoStruct XGI330_StResInfo[] = {
{640, 480}
};
-static struct XGI_ModeResInfoStruct XGI330_ModeResInfo[] = {
+static struct SiS_ModeResInfo_S XGI330_ModeResInfo[] = {
{ 320, 200, 8, 8},
{ 320, 240, 8, 8},
{ 320, 400, 8, 8},
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 9e166bbb00c4..a7208e315815 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -2,6 +2,9 @@
#define _VGATYPES_
#include <linux/ioctl.h>
+#include <linux/fb.h> /* for struct fb_var_screeninfo for sis.h */
+#include "../../video/sis/vgatypes.h"
+#include "../../video/sis/sis.h" /* for LCD_TYPE */
#ifndef XGI_VB_CHIP_TYPE
enum XGI_VB_CHIP_TYPE {
@@ -19,6 +22,12 @@ enum XGI_VB_CHIP_TYPE {
};
#endif
+
+#define XGI_LCD_TYPE
+/* Since the merge with video/sis the LCD_TYPEs are used from
+ drivers/video/sis/sis.h . Nevertheless we keep this (for the moment) for
+ future reference until the code is merged completely and we are sure
+ nothing of this should be added to the sis.h header */
#ifndef XGI_LCD_TYPE
enum XGI_LCD_TYPE {
LCD_INVALID = 0,
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 7fabcb2bc80d..94e48aa9f36b 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -1,13 +1,12 @@
config ZCACHE
tristate "Dynamic compression of swap pages and clean pagecache pages"
- depends on CLEANCACHE || FRONTSWAP
- select XVMALLOC
- select LZO_COMPRESS
- select LZO_DECOMPRESS
+ depends on (CLEANCACHE || FRONTSWAP) && CRYPTO
+ select ZSMALLOC
+ select CRYPTO_LZO
default n
help
Zcache doubles RAM efficiency while providing a significant
- performance boosts on many workloads. Zcache uses lzo1x
+ performance boosts on many workloads. Zcache uses
compression and an in-kernel implementation of transcendent
memory to store clean page cache pages and swap in RAM,
providing a noticeable reduction in disk I/O.
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index ef7c52bb1df9..d7020b774039 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -6,9 +6,10 @@
*
* Zcache provides an in-kernel "host implementation" for transcendent memory
* and, thus indirectly, for cleancache and frontswap. Zcache includes two
- * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
+ * page-accessible memory [1] interfaces, both utilizing the crypto compression
+ * API:
* 1) "compression buddies" ("zbud") is used for ephemeral pages
- * 2) xvmalloc is used for persistent pages.
+ * 2) zsmalloc is used for persistent pages.
* Xvmalloc (based on the TLSF allocator) has very low fragmentation
* so maximizes space efficiency, while zbud allows pairs (and potentially,
* in the future, more than a pair of) compressed pages to be closely linked
@@ -23,15 +24,16 @@
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/list.h>
-#include <linux/lzo.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/math64.h>
+#include <linux/crypto.h>
+#include <linux/string.h>
#include "tmem.h"
-#include "../zram/xvmalloc.h" /* if built in drivers/staging */
+#include "../zsmalloc/zsmalloc.h"
#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
@@ -60,7 +62,7 @@ MODULE_LICENSE("GPL");
struct zcache_client {
struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
- struct xv_pool *xvpool;
+ struct zs_pool *zspool;
bool allocated;
atomic_t refcount;
};
@@ -81,6 +83,38 @@ static inline bool is_local_client(struct zcache_client *cli)
return cli == &zcache_host;
}
+/* crypto API for zcache */
+#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
+static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
+static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
+
+enum comp_op {
+ ZCACHE_COMPOP_COMPRESS,
+ ZCACHE_COMPOP_DECOMPRESS
+};
+
+static inline int zcache_comp_op(enum comp_op op,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct crypto_comp *tfm;
+ int ret;
+
+ BUG_ON(!zcache_comp_pcpu_tfms);
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
+ BUG_ON(!tfm);
+ switch (op) {
+ case ZCACHE_COMPOP_COMPRESS:
+ ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
+ break;
+ case ZCACHE_COMPOP_DECOMPRESS:
+ ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
+ break;
+ }
+ put_cpu();
+ return ret;
+}
+
/**********
* Compression buddies ("zbud") provides for packing two (or, possibly
* in the future, more) compressed ephemeral pages into a single "raw"
@@ -407,7 +441,7 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
{
struct zbud_page *zbpg;
unsigned budnum = zbud_budnum(zh);
- size_t out_len = PAGE_SIZE;
+ unsigned int out_len = PAGE_SIZE;
char *to_va, *from_va;
unsigned size;
int ret = 0;
@@ -424,8 +458,9 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
to_va = kmap_atomic(page, KM_USER0);
size = zh->size;
from_va = zbud_data(zh, size);
- ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
- BUG_ON(ret != LZO_E_OK);
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
+ to_va, &out_len);
+ BUG_ON(ret);
BUG_ON(out_len != PAGE_SIZE);
kunmap_atomic(to_va, KM_USER0);
out:
@@ -622,8 +657,8 @@ static int zbud_show_cumul_chunk_counts(char *buf)
#endif
/**********
- * This "zv" PAM implementation combines the TLSF-based xvMalloc
- * with lzo1x compression to maximize the amount of data that can
+ * This "zv" PAM implementation combines the slab-based zsmalloc
+ * with the crypto compression API to maximize the amount of data that can
* be packed into a physical page.
*
* Zv represents a PAM page with the index and object (plus a "size" value
@@ -636,6 +671,7 @@ struct zv_hdr {
uint32_t pool_id;
struct tmem_oid oid;
uint32_t index;
+ size_t size;
DECL_SENTINEL
};
@@ -657,72 +693,74 @@ static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
static atomic_t zv_curr_dist_counts[NCHUNKS];
static atomic_t zv_cumul_dist_counts[NCHUNKS];
-static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
+static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id,
struct tmem_oid *oid, uint32_t index,
void *cdata, unsigned clen)
{
- struct page *page;
- struct zv_hdr *zv = NULL;
- uint32_t offset;
- int alloc_size = clen + sizeof(struct zv_hdr);
- int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
- int ret;
+ struct zv_hdr *zv;
+ u32 size = clen + sizeof(struct zv_hdr);
+ int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ void *handle = NULL;
+ char *buf;
BUG_ON(!irqs_disabled());
BUG_ON(chunks >= NCHUNKS);
- ret = xv_malloc(xvpool, alloc_size,
- &page, &offset, ZCACHE_GFP_MASK);
- if (unlikely(ret))
+ handle = zs_malloc(pool, size);
+ if (!handle)
goto out;
atomic_inc(&zv_curr_dist_counts[chunks]);
atomic_inc(&zv_cumul_dist_counts[chunks]);
- zv = kmap_atomic(page, KM_USER0) + offset;
+ zv = (struct zv_hdr *)((char *)cdata - sizeof(*zv));
zv->index = index;
zv->oid = *oid;
zv->pool_id = pool_id;
+ zv->size = clen;
SET_SENTINEL(zv, ZVH);
- memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
- kunmap_atomic(zv, KM_USER0);
+ buf = zs_map_object(pool, handle);
+ memcpy(buf, zv, clen + sizeof(*zv));
+ zs_unmap_object(pool, handle);
out:
- return zv;
+ return handle;
}
-static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
+static void zv_free(struct zs_pool *pool, void *handle)
{
unsigned long flags;
- struct page *page;
- uint32_t offset;
- uint16_t size = xv_get_object_size(zv);
- int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
+ struct zv_hdr *zv;
+ uint16_t size;
+ int chunks;
+ zv = zs_map_object(pool, handle);
ASSERT_SENTINEL(zv, ZVH);
+ size = zv->size + sizeof(struct zv_hdr);
+ INVERT_SENTINEL(zv, ZVH);
+ zs_unmap_object(pool, handle);
+
+ chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
BUG_ON(chunks >= NCHUNKS);
atomic_dec(&zv_curr_dist_counts[chunks]);
- size -= sizeof(*zv);
- BUG_ON(size == 0);
- INVERT_SENTINEL(zv, ZVH);
- page = virt_to_page(zv);
- offset = (unsigned long)zv & ~PAGE_MASK;
+
local_irq_save(flags);
- xv_free(xvpool, page, offset);
+ zs_free(pool, handle);
local_irq_restore(flags);
}
-static void zv_decompress(struct page *page, struct zv_hdr *zv)
+static void zv_decompress(struct page *page, void *handle)
{
- size_t clen = PAGE_SIZE;
+ unsigned int clen = PAGE_SIZE;
char *to_va;
- unsigned size;
int ret;
+ struct zv_hdr *zv;
+ zv = zs_map_object(zcache_host.zspool, handle);
+ BUG_ON(zv->size == 0);
ASSERT_SENTINEL(zv, ZVH);
- size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0);
to_va = kmap_atomic(page, KM_USER0);
- ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
- size, to_va, &clen);
+ ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
+ zv->size, to_va, &clen);
kunmap_atomic(to_va, KM_USER0);
- BUG_ON(ret != LZO_E_OK);
+ zs_unmap_object(zcache_host.zspool, handle);
+ BUG_ON(ret);
BUG_ON(clen != PAGE_SIZE);
}
@@ -948,8 +986,8 @@ int zcache_new_client(uint16_t cli_id)
goto out;
cli->allocated = 1;
#ifdef CONFIG_FRONTSWAP
- cli->xvpool = xv_create_pool();
- if (cli->xvpool == NULL)
+ cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
+ if (cli->zspool == NULL)
goto out;
#endif
ret = 0;
@@ -1180,7 +1218,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
}
/* reject if mean compression is too poor */
if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
- total_zsize = xv_get_total_size_bytes(cli->xvpool);
+ total_zsize = zs_get_total_size_bytes(cli->zspool);
zv_mean_zsize = div_u64(total_zsize,
curr_pers_pampd_count);
if (zv_mean_zsize > zv_max_mean_zsize) {
@@ -1188,7 +1226,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
goto out;
}
}
- pampd = (void *)zv_create(cli->xvpool, pool->pool_id,
+ pampd = (void *)zv_create(cli->zspool, pool->pool_id,
oid, index, cdata, clen);
if (pampd == NULL)
goto out;
@@ -1246,7 +1284,7 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
atomic_dec(&zcache_curr_eph_pampd_count);
BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
} else {
- zv_free(cli->xvpool, (struct zv_hdr *)pampd);
+ zv_free(cli->zspool, pampd);
atomic_dec(&zcache_curr_pers_pampd_count);
BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
}
@@ -1285,25 +1323,24 @@ static struct tmem_pamops zcache_pamops = {
* zcache compression/decompression and related per-cpu stuff
*/
-#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
-#define LZO_DSTMEM_PAGE_ORDER 1
-static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
+#define ZCACHE_DSTMEM_ORDER 1
static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
{
int ret = 0;
unsigned char *dmem = __get_cpu_var(zcache_dstmem);
- unsigned char *wmem = __get_cpu_var(zcache_workmem);
char *from_va;
BUG_ON(!irqs_disabled());
- if (unlikely(dmem == NULL || wmem == NULL))
- goto out; /* no buffer, so can't compress */
+ if (unlikely(dmem == NULL))
+ goto out; /* no buffer or no compressor so can't compress */
+ *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
from_va = kmap_atomic(from, KM_USER0);
mb();
- ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
- BUG_ON(ret != LZO_E_OK);
+ ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
+ (unsigned int *)out_len);
+ BUG_ON(ret);
*out_va = dmem;
kunmap_atomic(from_va, KM_USER0);
ret = 1;
@@ -1311,29 +1348,48 @@ out:
return ret;
}
+static int zcache_comp_cpu_up(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
+ if (IS_ERR(tfm))
+ return NOTIFY_BAD;
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
+ return NOTIFY_OK;
+}
+
+static void zcache_comp_cpu_down(int cpu)
+{
+ struct crypto_comp *tfm;
+
+ tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
+ crypto_free_comp(tfm);
+ *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
+}
static int zcache_cpu_notifier(struct notifier_block *nb,
unsigned long action, void *pcpu)
{
- int cpu = (long)pcpu;
+ int ret, cpu = (long)pcpu;
struct zcache_preload *kp;
switch (action) {
case CPU_UP_PREPARE:
+ ret = zcache_comp_cpu_up(cpu);
+ if (ret != NOTIFY_OK) {
+ pr_err("zcache: can't allocate compressor transform\n");
+ return ret;
+ }
per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_REPEAT,
- LZO_DSTMEM_PAGE_ORDER),
- per_cpu(zcache_workmem, cpu) =
- kzalloc(LZO1X_MEM_COMPRESS,
- GFP_KERNEL | __GFP_REPEAT);
+ GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
break;
case CPU_DEAD:
case CPU_UP_CANCELED:
+ zcache_comp_cpu_down(cpu);
free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
- LZO_DSTMEM_PAGE_ORDER);
+ ZCACHE_DSTMEM_ORDER);
per_cpu(zcache_dstmem, cpu) = NULL;
- kfree(per_cpu(zcache_workmem, cpu));
- per_cpu(zcache_workmem, cpu) = NULL;
kp = &per_cpu(zcache_preloads, cpu);
while (kp->nr) {
kmem_cache_free(zcache_objnode_cache,
@@ -1918,6 +1974,44 @@ static int __init no_frontswap(char *s)
__setup("nofrontswap", no_frontswap);
+static int __init enable_zcache_compressor(char *s)
+{
+ strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
+ zcache_enabled = 1;
+ return 1;
+}
+__setup("zcache=", enable_zcache_compressor);
+
+
+static int zcache_comp_init(void)
+{
+ int ret = 0;
+
+ /* check crypto algorithm */
+ if (*zcache_comp_name != '\0') {
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret)
+ pr_info("zcache: %s not supported\n",
+ zcache_comp_name);
+ }
+ if (!ret)
+ strcpy(zcache_comp_name, "lzo");
+ ret = crypto_has_comp(zcache_comp_name, 0, 0);
+ if (!ret) {
+ ret = 1;
+ goto out;
+ }
+ pr_info("zcache: using %s compressor\n", zcache_comp_name);
+
+ /* alloc percpu transforms */
+ ret = 0;
+ zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
+ if (!zcache_comp_pcpu_tfms)
+ ret = 1;
+out:
+ return ret;
+}
+
static int __init zcache_init(void)
{
int ret = 0;
@@ -1940,6 +2034,11 @@ static int __init zcache_init(void)
pr_err("zcache: can't register cpu notifier\n");
goto out;
}
+ ret = zcache_comp_init();
+ if (ret) {
+ pr_err("zcache: compressor initialization failed\n");
+ goto out;
+ }
for_each_online_cpu(cpu) {
void *pcpu = (void *)(long)cpu;
zcache_cpu_notifier(&zcache_cpu_notifier_block,
@@ -1975,7 +2074,7 @@ static int __init zcache_init(void)
old_ops = zcache_frontswap_register_ops();
pr_info("zcache: frontswap enabled using kernel "
- "transcendent memory and xvmalloc\n");
+ "transcendent memory and zsmalloc\n");
if (old_ops.init != NULL)
pr_warning("zcache: frontswap_ops overridden");
}
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index 3bec4dba3fe5..ee23a86ea7d5 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -1,11 +1,7 @@
-config XVMALLOC
- bool
- default n
-
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS
- select XVMALLOC
+ select ZSMALLOC
select LZO_COMPRESS
select LZO_DECOMPRESS
default n
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
index 2a6d3213a756..7f4a3019e9c4 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/staging/zram/Makefile
@@ -1,4 +1,3 @@
zram-y := zram_drv.o zram_sysfs.o
obj-$(CONFIG_ZRAM) += zram.o
-obj-$(CONFIG_XVMALLOC) += xvmalloc.o \ No newline at end of file
diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/zram/xvmalloc.c
deleted file mode 100644
index 1f9c5082b6d5..000000000000
--- a/drivers/staging/zram/xvmalloc.c
+++ /dev/null
@@ -1,510 +0,0 @@
-/*
- * xvmalloc memory allocator
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifdef CONFIG_ZRAM_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/highmem.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-
-#include "xvmalloc.h"
-#include "xvmalloc_int.h"
-
-static void stat_inc(u64 *value)
-{
- *value = *value + 1;
-}
-
-static void stat_dec(u64 *value)
-{
- *value = *value - 1;
-}
-
-static int test_flag(struct block_header *block, enum blockflags flag)
-{
- return block->prev & BIT(flag);
-}
-
-static void set_flag(struct block_header *block, enum blockflags flag)
-{
- block->prev |= BIT(flag);
-}
-
-static void clear_flag(struct block_header *block, enum blockflags flag)
-{
- block->prev &= ~BIT(flag);
-}
-
-/*
- * Given <page, offset> pair, provide a dereferencable pointer.
- * This is called from xv_malloc/xv_free path, so it
- * needs to be fast.
- */
-static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type)
-{
- unsigned char *base;
-
- base = kmap_atomic(page, type);
- return base + offset;
-}
-
-static void put_ptr_atomic(void *ptr, enum km_type type)
-{
- kunmap_atomic(ptr, type);
-}
-
-static u32 get_blockprev(struct block_header *block)
-{
- return block->prev & PREV_MASK;
-}
-
-static void set_blockprev(struct block_header *block, u16 new_offset)
-{
- block->prev = new_offset | (block->prev & FLAGS_MASK);
-}
-
-static struct block_header *BLOCK_NEXT(struct block_header *block)
-{
- return (struct block_header *)
- ((char *)block + block->size + XV_ALIGN);
-}
-
-/*
- * Get index of free list containing blocks of maximum size
- * which is less than or equal to given size.
- */
-static u32 get_index_for_insert(u32 size)
-{
- if (unlikely(size > XV_MAX_ALLOC_SIZE))
- size = XV_MAX_ALLOC_SIZE;
- size &= ~FL_DELTA_MASK;
- return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
-}
-
-/*
- * Get index of free list having blocks of size greater than
- * or equal to requested size.
- */
-static u32 get_index(u32 size)
-{
- if (unlikely(size < XV_MIN_ALLOC_SIZE))
- size = XV_MIN_ALLOC_SIZE;
- size = ALIGN(size, FL_DELTA);
- return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT;
-}
-
-/**
- * find_block - find block of at least given size
- * @pool: memory pool to search from
- * @size: size of block required
- * @page: page containing required block
- * @offset: offset within the page where block is located.
- *
- * Searches two level bitmap to locate block of at least
- * the given size. If such a block is found, it provides
- * <page, offset> to identify this block and returns index
- * in freelist where we found this block.
- * Otherwise, returns 0 and <page, offset> params are not touched.
- */
-static u32 find_block(struct xv_pool *pool, u32 size,
- struct page **page, u32 *offset)
-{
- ulong flbitmap, slbitmap;
- u32 flindex, slindex, slbitstart;
-
- /* There are no free blocks in this pool */
- if (!pool->flbitmap)
- return 0;
-
- /* Get freelist index correspoding to this size */
- slindex = get_index(size);
- slbitmap = pool->slbitmap[slindex / BITS_PER_LONG];
- slbitstart = slindex % BITS_PER_LONG;
-
- /*
- * If freelist is not empty at this index, we found the
- * block - head of this list. This is approximate best-fit match.
- */
- if (test_bit(slbitstart, &slbitmap)) {
- *page = pool->freelist[slindex].page;
- *offset = pool->freelist[slindex].offset;
- return slindex;
- }
-
- /*
- * No best-fit found. Search a bit further in bitmap for a free block.
- * Second level bitmap consists of series of 32-bit chunks. Search
- * further in the chunk where we expected a best-fit, starting from
- * index location found above.
- */
- slbitstart++;
- slbitmap >>= slbitstart;
-
- /* Skip this search if we were already at end of this bitmap chunk */
- if ((slbitstart != BITS_PER_LONG) && slbitmap) {
- slindex += __ffs(slbitmap) + 1;
- *page = pool->freelist[slindex].page;
- *offset = pool->freelist[slindex].offset;
- return slindex;
- }
-
- /* Now do a full two-level bitmap search to find next nearest fit */
- flindex = slindex / BITS_PER_LONG;
-
- flbitmap = (pool->flbitmap) >> (flindex + 1);
- if (!flbitmap)
- return 0;
-
- flindex += __ffs(flbitmap) + 1;
- slbitmap = pool->slbitmap[flindex];
- slindex = (flindex * BITS_PER_LONG) + __ffs(slbitmap);
- *page = pool->freelist[slindex].page;
- *offset = pool->freelist[slindex].offset;
-
- return slindex;
-}
-
-/*
- * Insert block at <page, offset> in freelist of given pool.
- * freelist used depends on block size.
- */
-static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
- struct block_header *block)
-{
- u32 flindex, slindex;
- struct block_header *nextblock;
-
- slindex = get_index_for_insert(block->size);
- flindex = slindex / BITS_PER_LONG;
-
- block->link.prev_page = NULL;
- block->link.prev_offset = 0;
- block->link.next_page = pool->freelist[slindex].page;
- block->link.next_offset = pool->freelist[slindex].offset;
- pool->freelist[slindex].page = page;
- pool->freelist[slindex].offset = offset;
-
- if (block->link.next_page) {
- nextblock = get_ptr_atomic(block->link.next_page,
- block->link.next_offset, KM_USER1);
- nextblock->link.prev_page = page;
- nextblock->link.prev_offset = offset;
- put_ptr_atomic(nextblock, KM_USER1);
- /* If there was a next page then the free bits are set. */
- return;
- }
-
- __set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
- __set_bit(flindex, &pool->flbitmap);
-}
-
-/*
- * Remove block from freelist. Index 'slindex' identifies the freelist.
- */
-static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
- struct block_header *block, u32 slindex)
-{
- u32 flindex = slindex / BITS_PER_LONG;
- struct block_header *tmpblock;
-
- if (block->link.prev_page) {
- tmpblock = get_ptr_atomic(block->link.prev_page,
- block->link.prev_offset, KM_USER1);
- tmpblock->link.next_page = block->link.next_page;
- tmpblock->link.next_offset = block->link.next_offset;
- put_ptr_atomic(tmpblock, KM_USER1);
- }
-
- if (block->link.next_page) {
- tmpblock = get_ptr_atomic(block->link.next_page,
- block->link.next_offset, KM_USER1);
- tmpblock->link.prev_page = block->link.prev_page;
- tmpblock->link.prev_offset = block->link.prev_offset;
- put_ptr_atomic(tmpblock, KM_USER1);
- }
-
- /* Is this block is at the head of the freelist? */
- if (pool->freelist[slindex].page == page
- && pool->freelist[slindex].offset == offset) {
-
- pool->freelist[slindex].page = block->link.next_page;
- pool->freelist[slindex].offset = block->link.next_offset;
-
- if (pool->freelist[slindex].page) {
- struct block_header *tmpblock;
- tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
- pool->freelist[slindex].offset,
- KM_USER1);
- tmpblock->link.prev_page = NULL;
- tmpblock->link.prev_offset = 0;
- put_ptr_atomic(tmpblock, KM_USER1);
- } else {
- /* This freelist bucket is empty */
- __clear_bit(slindex % BITS_PER_LONG,
- &pool->slbitmap[flindex]);
- if (!pool->slbitmap[flindex])
- __clear_bit(flindex, &pool->flbitmap);
- }
- }
-
- block->link.prev_page = NULL;
- block->link.prev_offset = 0;
- block->link.next_page = NULL;
- block->link.next_offset = 0;
-}
-
-/*
- * Allocate a page and add it to freelist of given pool.
- */
-static int grow_pool(struct xv_pool *pool, gfp_t flags)
-{
- struct page *page;
- struct block_header *block;
-
- page = alloc_page(flags);
- if (unlikely(!page))
- return -ENOMEM;
-
- stat_inc(&pool->total_pages);
-
- spin_lock(&pool->lock);
- block = get_ptr_atomic(page, 0, KM_USER0);
-
- block->size = PAGE_SIZE - XV_ALIGN;
- set_flag(block, BLOCK_FREE);
- clear_flag(block, PREV_FREE);
- set_blockprev(block, 0);
-
- insert_block(pool, page, 0, block);
-
- put_ptr_atomic(block, KM_USER0);
- spin_unlock(&pool->lock);
-
- return 0;
-}
-
-/*
- * Create a memory pool. Allocates freelist, bitmaps and other
- * per-pool metadata.
- */
-struct xv_pool *xv_create_pool(void)
-{
- u32 ovhd_size;
- struct xv_pool *pool;
-
- ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
- pool = kzalloc(ovhd_size, GFP_KERNEL);
- if (!pool)
- return NULL;
-
- spin_lock_init(&pool->lock);
-
- return pool;
-}
-EXPORT_SYMBOL_GPL(xv_create_pool);
-
-void xv_destroy_pool(struct xv_pool *pool)
-{
- kfree(pool);
-}
-EXPORT_SYMBOL_GPL(xv_destroy_pool);
-
-/**
- * xv_malloc - Allocate block of given size from pool.
- * @pool: pool to allocate from
- * @size: size of block to allocate
- * @page: page no. that holds the object
- * @offset: location of object within page
- *
- * On success, <page, offset> identifies block allocated
- * and 0 is returned. On failure, <page, offset> is set to
- * 0 and -ENOMEM is returned.
- *
- * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail.
- */
-int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
- u32 *offset, gfp_t flags)
-{
- int error;
- u32 index, tmpsize, origsize, tmpoffset;
- struct block_header *block, *tmpblock;
-
- *page = NULL;
- *offset = 0;
- origsize = size;
-
- if (unlikely(!size || size > XV_MAX_ALLOC_SIZE))
- return -ENOMEM;
-
- size = ALIGN(size, XV_ALIGN);
-
- spin_lock(&pool->lock);
-
- index = find_block(pool, size, page, offset);
-
- if (!*page) {
- spin_unlock(&pool->lock);
- if (flags & GFP_NOWAIT)
- return -ENOMEM;
- error = grow_pool(pool, flags);
- if (unlikely(error))
- return error;
-
- spin_lock(&pool->lock);
- index = find_block(pool, size, page, offset);
- }
-
- if (!*page) {
- spin_unlock(&pool->lock);
- return -ENOMEM;
- }
-
- block = get_ptr_atomic(*page, *offset, KM_USER0);
-
- remove_block(pool, *page, *offset, block, index);
-
- /* Split the block if required */
- tmpoffset = *offset + size + XV_ALIGN;
- tmpsize = block->size - size;
- tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN);
- if (tmpsize) {
- tmpblock->size = tmpsize - XV_ALIGN;
- set_flag(tmpblock, BLOCK_FREE);
- clear_flag(tmpblock, PREV_FREE);
-
- set_blockprev(tmpblock, *offset);
- if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
- insert_block(pool, *page, tmpoffset, tmpblock);
-
- if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) {
- tmpblock = BLOCK_NEXT(tmpblock);
- set_blockprev(tmpblock, tmpoffset);
- }
- } else {
- /* This block is exact fit */
- if (tmpoffset != PAGE_SIZE)
- clear_flag(tmpblock, PREV_FREE);
- }
-
- block->size = origsize;
- clear_flag(block, BLOCK_FREE);
-
- put_ptr_atomic(block, KM_USER0);
- spin_unlock(&pool->lock);
-
- *offset += XV_ALIGN;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(xv_malloc);
-
-/*
- * Free block identified with <page, offset>
- */
-void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
-{
- void *page_start;
- struct block_header *block, *tmpblock;
-
- offset -= XV_ALIGN;
-
- spin_lock(&pool->lock);
-
- page_start = get_ptr_atomic(page, 0, KM_USER0);
- block = (struct block_header *)((char *)page_start + offset);
-
- /* Catch double free bugs */
- BUG_ON(test_flag(block, BLOCK_FREE));
-
- block->size = ALIGN(block->size, XV_ALIGN);
-
- tmpblock = BLOCK_NEXT(block);
- if (offset + block->size + XV_ALIGN == PAGE_SIZE)
- tmpblock = NULL;
-
- /* Merge next block if its free */
- if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) {
- /*
- * Blocks smaller than XV_MIN_ALLOC_SIZE
- * are not inserted in any free list.
- */
- if (tmpblock->size >= XV_MIN_ALLOC_SIZE) {
- remove_block(pool, page,
- offset + block->size + XV_ALIGN, tmpblock,
- get_index_for_insert(tmpblock->size));
- }
- block->size += tmpblock->size + XV_ALIGN;
- }
-
- /* Merge previous block if its free */
- if (test_flag(block, PREV_FREE)) {
- tmpblock = (struct block_header *)((char *)(page_start) +
- get_blockprev(block));
- offset = offset - tmpblock->size - XV_ALIGN;
-
- if (tmpblock->size >= XV_MIN_ALLOC_SIZE)
- remove_block(pool, page, offset, tmpblock,
- get_index_for_insert(tmpblock->size));
-
- tmpblock->size += block->size + XV_ALIGN;
- block = tmpblock;
- }
-
- /* No used objects in this page. Free it. */
- if (block->size == PAGE_SIZE - XV_ALIGN) {
- put_ptr_atomic(page_start, KM_USER0);
- spin_unlock(&pool->lock);
-
- __free_page(page);
- stat_dec(&pool->total_pages);
- return;
- }
-
- set_flag(block, BLOCK_FREE);
- if (block->size >= XV_MIN_ALLOC_SIZE)
- insert_block(pool, page, offset, block);
-
- if (offset + block->size + XV_ALIGN != PAGE_SIZE) {
- tmpblock = BLOCK_NEXT(block);
- set_flag(tmpblock, PREV_FREE);
- set_blockprev(tmpblock, offset);
- }
-
- put_ptr_atomic(page_start, KM_USER0);
- spin_unlock(&pool->lock);
-}
-EXPORT_SYMBOL_GPL(xv_free);
-
-u32 xv_get_object_size(void *obj)
-{
- struct block_header *blk;
-
- blk = (struct block_header *)((char *)(obj) - XV_ALIGN);
- return blk->size;
-}
-EXPORT_SYMBOL_GPL(xv_get_object_size);
-
-/*
- * Returns total memory used by allocator (userdata + metadata)
- */
-u64 xv_get_total_size_bytes(struct xv_pool *pool)
-{
- return pool->total_pages << PAGE_SHIFT;
-}
-EXPORT_SYMBOL_GPL(xv_get_total_size_bytes);
diff --git a/drivers/staging/zram/xvmalloc.h b/drivers/staging/zram/xvmalloc.h
deleted file mode 100644
index 5b1a81aa5faf..000000000000
--- a/drivers/staging/zram/xvmalloc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * xvmalloc memory allocator
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifndef _XV_MALLOC_H_
-#define _XV_MALLOC_H_
-
-#include <linux/types.h>
-
-struct xv_pool;
-
-struct xv_pool *xv_create_pool(void);
-void xv_destroy_pool(struct xv_pool *pool);
-
-int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
- u32 *offset, gfp_t flags);
-void xv_free(struct xv_pool *pool, struct page *page, u32 offset);
-
-u32 xv_get_object_size(void *obj);
-u64 xv_get_total_size_bytes(struct xv_pool *pool);
-
-#endif
diff --git a/drivers/staging/zram/xvmalloc_int.h b/drivers/staging/zram/xvmalloc_int.h
deleted file mode 100644
index b5f1f7febcf6..000000000000
--- a/drivers/staging/zram/xvmalloc_int.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * xvmalloc memory allocator
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifndef _XV_MALLOC_INT_H_
-#define _XV_MALLOC_INT_H_
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-/* User configurable params */
-
-/* Must be power of two */
-#ifdef CONFIG_64BIT
-#define XV_ALIGN_SHIFT 3
-#else
-#define XV_ALIGN_SHIFT 2
-#endif
-#define XV_ALIGN (1 << XV_ALIGN_SHIFT)
-#define XV_ALIGN_MASK (XV_ALIGN - 1)
-
-/* This must be greater than sizeof(link_free) */
-#define XV_MIN_ALLOC_SIZE 32
-#define XV_MAX_ALLOC_SIZE (PAGE_SIZE - XV_ALIGN)
-
-/*
- * Free lists are separated by FL_DELTA bytes
- * This value is 3 for 4k pages and 4 for 64k pages, for any
- * other page size, a conservative (PAGE_SHIFT - 9) is used.
- */
-#if PAGE_SHIFT == 16
-#define FL_DELTA_SHIFT 4
-#else
-#define FL_DELTA_SHIFT (PAGE_SHIFT - 9)
-#endif
-#define FL_DELTA (1 << FL_DELTA_SHIFT)
-#define FL_DELTA_MASK (FL_DELTA - 1)
-#define NUM_FREE_LISTS ((XV_MAX_ALLOC_SIZE - XV_MIN_ALLOC_SIZE) \
- / FL_DELTA + 1)
-
-#define MAX_FLI DIV_ROUND_UP(NUM_FREE_LISTS, BITS_PER_LONG)
-
-/* End of user params */
-
-enum blockflags {
- BLOCK_FREE,
- PREV_FREE,
- __NR_BLOCKFLAGS,
-};
-
-#define FLAGS_MASK XV_ALIGN_MASK
-#define PREV_MASK (~FLAGS_MASK)
-
-struct freelist_entry {
- struct page *page;
- u16 offset;
- u16 pad;
-};
-
-struct link_free {
- struct page *prev_page;
- struct page *next_page;
- u16 prev_offset;
- u16 next_offset;
-};
-
-struct block_header {
- union {
- /* This common header must be XV_ALIGN bytes */
- u8 common[XV_ALIGN];
- struct {
- u16 size;
- u16 prev;
- };
- };
- struct link_free link;
-};
-
-struct xv_pool {
- ulong flbitmap;
- ulong slbitmap[MAX_FLI];
- u64 total_pages; /* stats */
- struct freelist_entry freelist[NUM_FREE_LISTS];
- spinlock_t lock;
-};
-
-#endif
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 2a2a92d389e6..5833156d2282 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -135,13 +135,9 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
static void zram_free_page(struct zram *zram, size_t index)
{
- u32 clen;
- void *obj;
+ void *handle = zram->table[index].handle;
- struct page *page = zram->table[index].page;
- u32 offset = zram->table[index].offset;
-
- if (unlikely(!page)) {
+ if (unlikely(!handle)) {
/*
* No memory is allocated for zero filled pages.
* Simply clear zero page flag.
@@ -154,27 +150,24 @@ static void zram_free_page(struct zram *zram, size_t index)
}
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- clen = PAGE_SIZE;
- __free_page(page);
+ __free_page(handle);
zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_dec(&zram->stats.pages_expand);
goto out;
}
- obj = kmap_atomic(page, KM_USER0) + offset;
- clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
- kunmap_atomic(obj, KM_USER0);
+ zs_free(zram->mem_pool, handle);
- xv_free(zram->mem_pool, page, offset);
- if (clen <= PAGE_SIZE / 2)
+ if (zram->table[index].size <= PAGE_SIZE / 2)
zram_stat_dec(&zram->stats.good_compress);
out:
- zram_stat64_sub(zram, &zram->stats.compr_size, clen);
+ zram_stat64_sub(zram, &zram->stats.compr_size,
+ zram->table[index].size);
zram_stat_dec(&zram->stats.pages_stored);
- zram->table[index].page = NULL;
- zram->table[index].offset = 0;
+ zram->table[index].handle = NULL;
+ zram->table[index].size = 0;
}
static void handle_zero_page(struct bio_vec *bvec)
@@ -196,7 +189,7 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
unsigned char *user_mem, *cmem;
user_mem = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(zram->table[index].page, KM_USER1);
+ cmem = kmap_atomic(zram->table[index].handle, KM_USER1);
memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
kunmap_atomic(cmem, KM_USER1);
@@ -227,7 +220,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
}
/* Requested page is not present in compressed area */
- if (unlikely(!zram->table[index].page)) {
+ if (unlikely(!zram->table[index].handle)) {
pr_debug("Read before write: sector=%lu, size=%u",
(ulong)(bio->bi_sector), bio->bi_size);
handle_zero_page(bvec);
@@ -254,11 +247,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
uncmem = user_mem;
clen = PAGE_SIZE;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
+ zram->table[index].size,
uncmem, &clen);
if (is_partial_io(bvec)) {
@@ -267,7 +259,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
kfree(uncmem);
}
- kunmap_atomic(cmem, KM_USER1);
+ zs_unmap_object(zram->mem_pool, zram->table[index].handle);
kunmap_atomic(user_mem, KM_USER0);
/* Should NEVER happen. Return bio error if it does. */
@@ -290,13 +282,12 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
unsigned char *cmem;
if (zram_test_flag(zram, index, ZRAM_ZERO) ||
- !zram->table[index].page) {
+ !zram->table[index].handle) {
memset(mem, 0, PAGE_SIZE);
return 0;
}
- cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
- zram->table[index].offset;
+ cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
/* Page is stored uncompressed since it's incompressible */
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
@@ -306,9 +297,9 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
}
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
+ zram->table[index].size,
mem, &clen);
- kunmap_atomic(cmem, KM_USER0);
+ zs_unmap_object(zram->mem_pool, zram->table[index].handle);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -326,6 +317,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int ret;
u32 store_offset;
size_t clen;
+ void *handle;
struct zobj_header *zheader;
struct page *page, *page_store;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
@@ -355,7 +347,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* System overwrites unused sectors. Free memory associated
* with this sector now.
*/
- if (zram->table[index].page ||
+ if (zram->table[index].handle ||
zram_test_flag(zram, index, ZRAM_ZERO))
zram_free_page(zram, index);
@@ -407,26 +399,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
store_offset = 0;
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_inc(&zram->stats.pages_expand);
- zram->table[index].page = page_store;
+ handle = page_store;
src = kmap_atomic(page, KM_USER0);
+ cmem = kmap_atomic(page_store, KM_USER1);
goto memstore;
}
- if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
- &zram->table[index].page, &store_offset,
- GFP_NOIO | __GFP_HIGHMEM)) {
+ handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
+ if (!handle) {
pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen);
ret = -ENOMEM;
goto out;
}
+ cmem = zs_map_object(zram->mem_pool, handle);
memstore:
- zram->table[index].offset = store_offset;
-
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
-
#if 0
/* Back-reference needed for memory defragmentation */
if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
@@ -438,9 +426,15 @@ memstore:
memcpy(cmem, src, clen);
- kunmap_atomic(cmem, KM_USER1);
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+ kunmap_atomic(cmem, KM_USER1);
kunmap_atomic(src, KM_USER0);
+ } else {
+ zs_unmap_object(zram->mem_pool, handle);
+ }
+
+ zram->table[index].handle = handle;
+ zram->table[index].size = clen;
/* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen);
@@ -598,25 +592,20 @@ void __zram_reset_device(struct zram *zram)
/* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- struct page *page;
- u16 offset;
-
- page = zram->table[index].page;
- offset = zram->table[index].offset;
-
- if (!page)
+ void *handle = zram->table[index].handle;
+ if (!handle)
continue;
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- __free_page(page);
+ __free_page(handle);
else
- xv_free(zram->mem_pool, page, offset);
+ zs_free(zram->mem_pool, handle);
}
vfree(zram->table);
zram->table = NULL;
- xv_destroy_pool(zram->mem_pool);
+ zs_destroy_pool(zram->mem_pool);
zram->mem_pool = NULL;
/* Reset stats */
@@ -674,7 +663,7 @@ int zram_init_device(struct zram *zram)
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
- zram->mem_pool = xv_create_pool();
+ zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
if (!zram->mem_pool) {
pr_err("Error creating memory pool\n");
ret = -ENOMEM;
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index e5cd2469b6a0..572faa8762bb 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -18,7 +18,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include "xvmalloc.h"
+#include "../zsmalloc/zsmalloc.h"
/*
* Some arbitrary value. This is just to catch
@@ -51,7 +51,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*
* NOTE: max_zpage_size must be less than or equal to:
- * XV_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
+ * ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
* otherwise, xv_malloc() would always return failure.
*/
@@ -81,8 +81,8 @@ enum zram_pageflags {
/* Allocated for each disk page */
struct table {
- struct page *page;
- u16 offset;
+ void *handle;
+ u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */
u8 flags;
} __attribute__((aligned(4)));
@@ -102,7 +102,7 @@ struct zram_stats {
};
struct zram {
- struct xv_pool *mem_pool;
+ struct zs_pool *mem_pool;
void *compress_workmem;
void *compress_buffer;
struct table *table;
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index d521122826f6..d2875c5690e8 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -187,7 +187,7 @@ static ssize_t mem_used_total_show(struct device *dev,
struct zram *zram = dev_to_zram(dev);
if (zram->init_done) {
- val = xv_get_total_size_bytes(zram->mem_pool) +
+ val = zs_get_total_size_bytes(zram->mem_pool) +
((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
}
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
new file mode 100644
index 000000000000..3e7a8d4d2172
--- /dev/null
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -0,0 +1,11 @@
+config ZSMALLOC
+ tristate "Memory allocator for compressed pages"
+ depends on SPARSEMEM
+ default n
+ help
+ zsmalloc is a slab-based memory allocator designed to store
+ compressed RAM pages. zsmalloc uses virtual memory mapping
+ in order to reduce fragmentation. However, this results in a
+ non-standard allocator interface where a handle, not a pointer, is
+ returned by an alloc(). This handle must be mapped in order to
+ access the allocated space.
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
new file mode 100644
index 000000000000..b134848a590d
--- /dev/null
+++ b/drivers/staging/zsmalloc/Makefile
@@ -0,0 +1,3 @@
+zsmalloc-y := zsmalloc-main.o
+
+obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
new file mode 100644
index 000000000000..189fb42313bb
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -0,0 +1,756 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifdef CONFIG_ZSMALLOC_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+
+#include "zsmalloc.h"
+#include "zsmalloc_int.h"
+
+/*
+ * A zspage's class index and fullness group
+ * are encoded in its (first)page->mapping
+ */
+#define CLASS_IDX_BITS 28
+#define FULLNESS_BITS 4
+#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
+#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
+
+/*
+ * Object location (<PFN>, <obj_idx>) is encoded as
+ * as single (void *) handle value.
+ *
+ * Note that object index <obj_idx> is relative to system
+ * page <PFN> it is stored in, so for each sub-page belonging
+ * to a zspage, obj_idx starts with 0.
+ */
+#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
+#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
+
+/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
+static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
+
+static int is_first_page(struct page *page)
+{
+ return test_bit(PG_private, &page->flags);
+}
+
+static int is_last_page(struct page *page)
+{
+ return test_bit(PG_private_2, &page->flags);
+}
+
+static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
+ enum fullness_group *fullness)
+{
+ unsigned long m;
+ BUG_ON(!is_first_page(page));
+
+ m = (unsigned long)page->mapping;
+ *fullness = m & FULLNESS_MASK;
+ *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
+}
+
+static void set_zspage_mapping(struct page *page, unsigned int class_idx,
+ enum fullness_group fullness)
+{
+ unsigned long m;
+ BUG_ON(!is_first_page(page));
+
+ m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
+ (fullness & FULLNESS_MASK);
+ page->mapping = (struct address_space *)m;
+}
+
+static int get_size_class_index(int size)
+{
+ int idx = 0;
+
+ if (likely(size > ZS_MIN_ALLOC_SIZE))
+ idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
+ ZS_SIZE_CLASS_DELTA);
+
+ return idx;
+}
+
+static enum fullness_group get_fullness_group(struct page *page)
+{
+ int inuse, max_objects;
+ enum fullness_group fg;
+ BUG_ON(!is_first_page(page));
+
+ inuse = page->inuse;
+ max_objects = page->objects;
+
+ if (inuse == 0)
+ fg = ZS_EMPTY;
+ else if (inuse == max_objects)
+ fg = ZS_FULL;
+ else if (inuse <= max_objects / fullness_threshold_frac)
+ fg = ZS_ALMOST_EMPTY;
+ else
+ fg = ZS_ALMOST_FULL;
+
+ return fg;
+}
+
+static void insert_zspage(struct page *page, struct size_class *class,
+ enum fullness_group fullness)
+{
+ struct page **head;
+
+ BUG_ON(!is_first_page(page));
+
+ if (fullness >= _ZS_NR_FULLNESS_GROUPS)
+ return;
+
+ head = &class->fullness_list[fullness];
+ if (*head)
+ list_add_tail(&page->lru, &(*head)->lru);
+
+ *head = page;
+}
+
+static void remove_zspage(struct page *page, struct size_class *class,
+ enum fullness_group fullness)
+{
+ struct page **head;
+
+ BUG_ON(!is_first_page(page));
+
+ if (fullness >= _ZS_NR_FULLNESS_GROUPS)
+ return;
+
+ head = &class->fullness_list[fullness];
+ BUG_ON(!*head);
+ if (list_empty(&(*head)->lru))
+ *head = NULL;
+ else if (*head == page)
+ *head = (struct page *)list_entry((*head)->lru.next,
+ struct page, lru);
+
+ list_del_init(&page->lru);
+}
+
+static enum fullness_group fix_fullness_group(struct zs_pool *pool,
+ struct page *page)
+{
+ int class_idx;
+ struct size_class *class;
+ enum fullness_group currfg, newfg;
+
+ BUG_ON(!is_first_page(page));
+
+ get_zspage_mapping(page, &class_idx, &currfg);
+ newfg = get_fullness_group(page);
+ if (newfg == currfg)
+ goto out;
+
+ class = &pool->size_class[class_idx];
+ remove_zspage(page, class, currfg);
+ insert_zspage(page, class, newfg);
+ set_zspage_mapping(page, class_idx, newfg);
+
+out:
+ return newfg;
+}
+
+/*
+ * We have to decide on how many pages to link together
+ * to form a zspage for each size class. This is important
+ * to reduce wastage due to unusable space left at end of
+ * each zspage which is given as:
+ * wastage = Zp - Zp % size_class
+ * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
+ *
+ * For example, for size class of 3/8 * PAGE_SIZE, we should
+ * link together 3 PAGE_SIZE sized pages to form a zspage
+ * since then we can perfectly fit in 8 such objects.
+ */
+static int get_zspage_order(int class_size)
+{
+ int i, max_usedpc = 0;
+ /* zspage order which gives maximum used size per KB */
+ int max_usedpc_order = 1;
+
+ for (i = 1; i <= max_zspage_order; i++) {
+ int zspage_size;
+ int waste, usedpc;
+
+ zspage_size = i * PAGE_SIZE;
+ waste = zspage_size % class_size;
+ usedpc = (zspage_size - waste) * 100 / zspage_size;
+
+ if (usedpc > max_usedpc) {
+ max_usedpc = usedpc;
+ max_usedpc_order = i;
+ }
+ }
+
+ return max_usedpc_order;
+}
+
+/*
+ * A single 'zspage' is composed of many system pages which are
+ * linked together using fields in struct page. This function finds
+ * the first/head page, given any component page of a zspage.
+ */
+static struct page *get_first_page(struct page *page)
+{
+ if (is_first_page(page))
+ return page;
+ else
+ return page->first_page;
+}
+
+static struct page *get_next_page(struct page *page)
+{
+ struct page *next;
+
+ if (is_last_page(page))
+ next = NULL;
+ else if (is_first_page(page))
+ next = (struct page *)page->private;
+ else
+ next = list_entry(page->lru.next, struct page, lru);
+
+ return next;
+}
+
+/* Encode <page, obj_idx> as a single handle value */
+static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
+{
+ unsigned long handle;
+
+ if (!page) {
+ BUG_ON(obj_idx);
+ return NULL;
+ }
+
+ handle = page_to_pfn(page) << OBJ_INDEX_BITS;
+ handle |= (obj_idx & OBJ_INDEX_MASK);
+
+ return (void *)handle;
+}
+
+/* Decode <page, obj_idx> pair from the given object handle */
+static void obj_handle_to_location(void *handle, struct page **page,
+ unsigned long *obj_idx)
+{
+ unsigned long hval = (unsigned long)handle;
+
+ *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
+ *obj_idx = hval & OBJ_INDEX_MASK;
+}
+
+static unsigned long obj_idx_to_offset(struct page *page,
+ unsigned long obj_idx, int class_size)
+{
+ unsigned long off = 0;
+
+ if (!is_first_page(page))
+ off = page->index;
+
+ return off + obj_idx * class_size;
+}
+
+static void free_zspage(struct page *first_page)
+{
+ struct page *nextp, *tmp;
+
+ BUG_ON(!is_first_page(first_page));
+ BUG_ON(first_page->inuse);
+
+ nextp = (struct page *)page_private(first_page);
+
+ clear_bit(PG_private, &first_page->flags);
+ clear_bit(PG_private_2, &first_page->flags);
+ set_page_private(first_page, 0);
+ first_page->mapping = NULL;
+ first_page->freelist = NULL;
+ reset_page_mapcount(first_page);
+ __free_page(first_page);
+
+ /* zspage with only 1 system page */
+ if (!nextp)
+ return;
+
+ list_for_each_entry_safe(nextp, tmp, &nextp->lru, lru) {
+ list_del(&nextp->lru);
+ clear_bit(PG_private_2, &nextp->flags);
+ nextp->index = 0;
+ __free_page(nextp);
+ }
+}
+
+/* Initialize a newly allocated zspage */
+static void init_zspage(struct page *first_page, struct size_class *class)
+{
+ unsigned long off = 0;
+ struct page *page = first_page;
+
+ BUG_ON(!is_first_page(first_page));
+ while (page) {
+ struct page *next_page;
+ struct link_free *link;
+ unsigned int i, objs_on_page;
+
+ /*
+ * page->index stores offset of first object starting
+ * in the page. For the first page, this is always 0,
+ * so we use first_page->index (aka ->freelist) to store
+ * head of corresponding zspage's freelist.
+ */
+ if (page != first_page)
+ page->index = off;
+
+ link = (struct link_free *)kmap_atomic(page) +
+ off / sizeof(*link);
+ objs_on_page = (PAGE_SIZE - off) / class->size;
+
+ for (i = 1; i <= objs_on_page; i++) {
+ off += class->size;
+ if (off < PAGE_SIZE) {
+ link->next = obj_location_to_handle(page, i);
+ link += class->size / sizeof(*link);
+ }
+ }
+
+ /*
+ * We now come to the last (full or partial) object on this
+ * page, which must point to the first object on the next
+ * page (if present)
+ */
+ next_page = get_next_page(page);
+ link->next = obj_location_to_handle(next_page, 0);
+ kunmap_atomic(link);
+ page = next_page;
+ off = (off + class->size) % PAGE_SIZE;
+ }
+}
+
+/*
+ * Allocate a zspage for the given size class
+ */
+static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
+{
+ int i, error;
+ struct page *first_page = NULL;
+
+ /*
+ * Allocate individual pages and link them together as:
+ * 1. first page->private = first sub-page
+ * 2. all sub-pages are linked together using page->lru
+ * 3. each sub-page is linked to the first page using page->first_page
+ *
+ * For each size class, First/Head pages are linked together using
+ * page->lru. Also, we set PG_private to identify the first page
+ * (i.e. no other sub-page has this flag set) and PG_private_2 to
+ * identify the last page.
+ */
+ error = -ENOMEM;
+ for (i = 0; i < class->zspage_order; i++) {
+ struct page *page, *prev_page;
+
+ page = alloc_page(flags);
+ if (!page)
+ goto cleanup;
+
+ INIT_LIST_HEAD(&page->lru);
+ if (i == 0) { /* first page */
+ set_bit(PG_private, &page->flags);
+ set_page_private(page, 0);
+ first_page = page;
+ first_page->inuse = 0;
+ }
+ if (i == 1)
+ first_page->private = (unsigned long)page;
+ if (i >= 1)
+ page->first_page = first_page;
+ if (i >= 2)
+ list_add(&page->lru, &prev_page->lru);
+ if (i == class->zspage_order - 1) /* last page */
+ set_bit(PG_private_2, &page->flags);
+
+ prev_page = page;
+ }
+
+ init_zspage(first_page, class);
+
+ first_page->freelist = obj_location_to_handle(first_page, 0);
+ /* Maximum number of objects we can store in this zspage */
+ first_page->objects = class->zspage_order * PAGE_SIZE / class->size;
+
+ error = 0; /* Success */
+
+cleanup:
+ if (unlikely(error) && first_page) {
+ free_zspage(first_page);
+ first_page = NULL;
+ }
+
+ return first_page;
+}
+
+static struct page *find_get_zspage(struct size_class *class)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
+ page = class->fullness_list[i];
+ if (page)
+ break;
+ }
+
+ return page;
+}
+
+
+/*
+ * If this becomes a separate module, register zs_init() with
+ * module_init(), zs_exit with module_exit(), and remove zs_initialized
+*/
+static int zs_initialized;
+
+static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
+ void *pcpu)
+{
+ int cpu = (long)pcpu;
+ struct mapping_area *area;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ area = &per_cpu(zs_map_area, cpu);
+ if (area->vm)
+ break;
+ area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes);
+ if (!area->vm)
+ return notifier_from_errno(-ENOMEM);
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ area = &per_cpu(zs_map_area, cpu);
+ if (area->vm)
+ free_vm_area(area->vm);
+ area->vm = NULL;
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zs_cpu_nb = {
+ .notifier_call = zs_cpu_notifier
+};
+
+static void zs_exit(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
+ unregister_cpu_notifier(&zs_cpu_nb);
+}
+
+static int zs_init(void)
+{
+ int cpu, ret;
+
+ register_cpu_notifier(&zs_cpu_nb);
+ for_each_online_cpu(cpu) {
+ ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
+ if (notifier_to_errno(ret))
+ goto fail;
+ }
+ return 0;
+fail:
+ zs_exit();
+ return notifier_to_errno(ret);
+}
+
+struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+{
+ int i, error, ovhd_size;
+ struct zs_pool *pool;
+
+ if (!name)
+ return NULL;
+
+ ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
+ pool = kzalloc(ovhd_size, GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++) {
+ int size;
+ struct size_class *class;
+
+ size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
+ if (size > ZS_MAX_ALLOC_SIZE)
+ size = ZS_MAX_ALLOC_SIZE;
+
+ class = &pool->size_class[i];
+ class->size = size;
+ class->index = i;
+ spin_lock_init(&class->lock);
+ class->zspage_order = get_zspage_order(size);
+
+ }
+
+ /*
+ * If this becomes a separate module, register zs_init with
+ * module_init, and remove this block
+ */
+ if (!zs_initialized) {
+ error = zs_init();
+ if (error)
+ goto cleanup;
+ zs_initialized = 1;
+ }
+
+ pool->flags = flags;
+ pool->name = name;
+
+ error = 0; /* Success */
+
+cleanup:
+ if (error) {
+ zs_destroy_pool(pool);
+ pool = NULL;
+ }
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(zs_create_pool);
+
+void zs_destroy_pool(struct zs_pool *pool)
+{
+ int i;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++) {
+ int fg;
+ struct size_class *class = &pool->size_class[i];
+
+ for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
+ if (class->fullness_list[fg]) {
+ pr_info("Freeing non-empty class with size "
+ "%db, fullness group %d\n",
+ class->size, fg);
+ }
+ }
+ }
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(zs_destroy_pool);
+
+/**
+ * zs_malloc - Allocate block of given size from pool.
+ * @pool: pool to allocate from
+ * @size: size of block to allocate
+ * @page: page no. that holds the object
+ * @offset: location of object within page
+ *
+ * On success, <page, offset> identifies block allocated
+ * and 0 is returned. On failure, <page, offset> is set to
+ * 0 and -ENOMEM is returned.
+ *
+ * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
+ */
+void *zs_malloc(struct zs_pool *pool, size_t size)
+{
+ void *obj;
+ struct link_free *link;
+ int class_idx;
+ struct size_class *class;
+
+ struct page *first_page, *m_page;
+ unsigned long m_objidx, m_offset;
+
+ if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
+ return NULL;
+
+ class_idx = get_size_class_index(size);
+ class = &pool->size_class[class_idx];
+ BUG_ON(class_idx != class->index);
+
+ spin_lock(&class->lock);
+ first_page = find_get_zspage(class);
+
+ if (!first_page) {
+ spin_unlock(&class->lock);
+ first_page = alloc_zspage(class, pool->flags);
+ if (unlikely(!first_page))
+ return NULL;
+
+ set_zspage_mapping(first_page, class->index, ZS_EMPTY);
+ spin_lock(&class->lock);
+ class->pages_allocated += class->zspage_order;
+ }
+
+ obj = first_page->freelist;
+ obj_handle_to_location(obj, &m_page, &m_objidx);
+ m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
+
+ link = (struct link_free *)kmap_atomic(m_page) +
+ m_offset / sizeof(*link);
+ first_page->freelist = link->next;
+ memset(link, POISON_INUSE, sizeof(*link));
+ kunmap_atomic(link);
+
+ first_page->inuse++;
+ /* Now move the zspage to another fullness group, if required */
+ fix_fullness_group(pool, first_page);
+ spin_unlock(&class->lock);
+
+ return obj;
+}
+EXPORT_SYMBOL_GPL(zs_malloc);
+
+void zs_free(struct zs_pool *pool, void *obj)
+{
+ struct link_free *link;
+ struct page *first_page, *f_page;
+ unsigned long f_objidx, f_offset;
+
+ int class_idx;
+ struct size_class *class;
+ enum fullness_group fullness;
+
+ if (unlikely(!obj))
+ return;
+
+ obj_handle_to_location(obj, &f_page, &f_objidx);
+ first_page = get_first_page(f_page);
+
+ get_zspage_mapping(first_page, &class_idx, &fullness);
+ class = &pool->size_class[class_idx];
+ f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
+
+ spin_lock(&class->lock);
+
+ /* Insert this object in containing zspage's freelist */
+ link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
+ + f_offset);
+ link->next = first_page->freelist;
+ kunmap_atomic(link);
+ first_page->freelist = obj;
+
+ first_page->inuse--;
+ fullness = fix_fullness_group(pool, first_page);
+
+ if (fullness == ZS_EMPTY)
+ class->pages_allocated -= class->zspage_order;
+
+ spin_unlock(&class->lock);
+
+ if (fullness == ZS_EMPTY)
+ free_zspage(first_page);
+}
+EXPORT_SYMBOL_GPL(zs_free);
+
+void *zs_map_object(struct zs_pool *pool, void *handle)
+{
+ struct page *page;
+ unsigned long obj_idx, off;
+
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+
+ BUG_ON(!handle);
+
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = &pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+ area = &get_cpu_var(zs_map_area);
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+ area->vm_addr = kmap_atomic(page);
+ } else {
+ /* this object spans two pages */
+ struct page *nextp;
+
+ nextp = get_next_page(page);
+ BUG_ON(!nextp);
+
+
+ set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
+ set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL));
+
+ /* We pre-allocated VM area so mapping can never fail */
+ area->vm_addr = area->vm->addr;
+ }
+
+ return area->vm_addr + off;
+}
+EXPORT_SYMBOL_GPL(zs_map_object);
+
+void zs_unmap_object(struct zs_pool *pool, void *handle)
+{
+ struct page *page;
+ unsigned long obj_idx, off;
+
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+
+ BUG_ON(!handle);
+
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = &pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+ area = &__get_cpu_var(zs_map_area);
+ if (off + class->size <= PAGE_SIZE) {
+ kunmap_atomic(area->vm_addr);
+ } else {
+ set_pte(area->vm_ptes[0], __pte(0));
+ set_pte(area->vm_ptes[1], __pte(0));
+ __flush_tlb_one((unsigned long)area->vm_addr);
+ __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE);
+ }
+ put_cpu_var(zs_map_area);
+}
+EXPORT_SYMBOL_GPL(zs_unmap_object);
+
+u64 zs_get_total_size_bytes(struct zs_pool *pool)
+{
+ int i;
+ u64 npages = 0;
+
+ for (i = 0; i < ZS_SIZE_CLASSES; i++)
+ npages += pool->size_class[i].pages_allocated;
+
+ return npages << PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
new file mode 100644
index 000000000000..949384ee7491
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -0,0 +1,31 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifndef _ZS_MALLOC_H_
+#define _ZS_MALLOC_H_
+
+#include <linux/types.h>
+
+struct zs_pool;
+
+struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+void zs_destroy_pool(struct zs_pool *pool);
+
+void *zs_malloc(struct zs_pool *pool, size_t size);
+void zs_free(struct zs_pool *pool, void *obj);
+
+void *zs_map_object(struct zs_pool *pool, void *handle);
+void zs_unmap_object(struct zs_pool *pool, void *handle);
+
+u64 zs_get_total_size_bytes(struct zs_pool *pool);
+
+#endif
diff --git a/drivers/staging/zsmalloc/zsmalloc_int.h b/drivers/staging/zsmalloc/zsmalloc_int.h
new file mode 100644
index 000000000000..354a02001434
--- /dev/null
+++ b/drivers/staging/zsmalloc/zsmalloc_int.h
@@ -0,0 +1,126 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifndef _ZS_MALLOC_INT_H_
+#define _ZS_MALLOC_INT_H_
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/*
+ * This must be power of 2 and greater than of equal to sizeof(link_free).
+ * These two conditions ensure that any 'struct link_free' itself doesn't
+ * span more than 1 page which avoids complex case of mapping 2 pages simply
+ * to restore link_free pointer values.
+ */
+#define ZS_ALIGN 8
+
+/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
+#define ZS_MIN_ALLOC_SIZE 32
+#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
+
+/*
+ * On systems with 4K page size, this gives 254 size classes! There is a
+ * trader-off here:
+ * - Large number of size classes is potentially wasteful as free page are
+ * spread across these classes
+ * - Small number of size classes causes large internal fragmentation
+ * - Probably its better to use specific size classes (empirically
+ * determined). NOTE: all those class sizes must be set as multiple of
+ * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
+ *
+ * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
+ * (reason above)
+ */
+#define ZS_SIZE_CLASS_DELTA 16
+#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
+ ZS_SIZE_CLASS_DELTA + 1)
+
+/*
+ * A single 'zspage' is composed of N discontiguous 0-order (single) pages.
+ * This defines upper limit on N.
+ */
+static const int max_zspage_order = 4;
+
+/*
+ * We do not maintain any list for completely empty or full pages
+ */
+enum fullness_group {
+ ZS_ALMOST_FULL,
+ ZS_ALMOST_EMPTY,
+ _ZS_NR_FULLNESS_GROUPS,
+
+ ZS_EMPTY,
+ ZS_FULL
+};
+
+/*
+ * We assign a page to ZS_ALMOST_EMPTY fullness group when:
+ * n <= N / f, where
+ * n = number of allocated objects
+ * N = total number of objects zspage can store
+ * f = 1/fullness_threshold_frac
+ *
+ * Similarly, we assign zspage to:
+ * ZS_ALMOST_FULL when n > N / f
+ * ZS_EMPTY when n == 0
+ * ZS_FULL when n == N
+ *
+ * (see: fix_fullness_group())
+ */
+static const int fullness_threshold_frac = 4;
+
+struct mapping_area {
+ struct vm_struct *vm;
+ pte_t *vm_ptes[2];
+ char *vm_addr;
+};
+
+struct size_class {
+ /*
+ * Size of objects stored in this class. Must be multiple
+ * of ZS_ALIGN.
+ */
+ int size;
+ unsigned int index;
+
+ /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
+ int zspage_order;
+
+ spinlock_t lock;
+
+ /* stats */
+ u64 pages_allocated;
+
+ struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+};
+
+/*
+ * Placed within free objects to form a singly linked list.
+ * For every zspage, first_page->freelist gives head of this list.
+ *
+ * This must be power of 2 and less than or equal to ZS_ALIGN
+ */
+struct link_free {
+ /* Handle of next free chunk (encodes <PFN, obj_idx>) */
+ void *next;
+};
+
+struct zs_pool {
+ struct size_class size_class[ZS_SIZE_CLASSES];
+
+ gfp_t flags; /* allocation flags used when growing pool */
+ const char *name;
+};
+
+#endif