diff options
Diffstat (limited to 'drivers')
228 files changed, 8271 insertions, 3334 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index dd8729d674e..0ed42d8870c 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -211,6 +211,18 @@ config ACPI_HOTPLUG_CPU select ACPI_CONTAINER default y +config ACPI_PROCESSOR_AGGREGATOR + tristate "Processor Aggregator" + depends on ACPI_PROCESSOR + depends on EXPERIMENTAL + depends on X86 + help + ACPI 4.0 defines processor Aggregator, which enables OS to perform + specfic processor configuration and control that applies to all + processors in the platform. Currently only logical processor idling + is defined, which is to reduce power consumption. This driver + support the new device. + config ACPI_THERMAL tristate "Thermal Zone" depends on ACPI_PROCESSOR diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 82cd49dc603..7702118509a 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -62,3 +62,5 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o processor-y := processor_core.o processor_throttling.o processor-y += processor_idle.o processor_thermal.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o + +obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c new file mode 100644 index 00000000000..0d2cdb86158 --- /dev/null +++ b/drivers/acpi/acpi_pad.c @@ -0,0 +1,514 @@ +/* + * acpi_pad.c ACPI Processor Aggregator Driver + * + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include <linux/kernel.h> +#include <linux/cpumask.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/kthread.h> +#include <linux/freezer.h> +#include <linux/cpu.h> +#include <linux/clockchips.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> + +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator" +#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" +#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 +static DEFINE_MUTEX(isolated_cpus_lock); + +#define MWAIT_SUBSTATE_MASK (0xf) +#define MWAIT_CSTATE_MASK (0xf) +#define MWAIT_SUBSTATE_SIZE (4) +#define CPUID_MWAIT_LEAF (5) +#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) +#define CPUID5_ECX_INTERRUPT_BREAK (0x2) +static unsigned long power_saving_mwait_eax; +static void power_saving_mwait_init(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned int highest_cstate = 0; + unsigned int highest_subcstate = 0; + int i; + + if (!boot_cpu_has(X86_FEATURE_MWAIT)) + return; + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + return; + + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); + + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) + return; + + edx >>= MWAIT_SUBSTATE_SIZE; + for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { + if (edx & MWAIT_SUBSTATE_MASK) { + highest_cstate = i; + highest_subcstate = edx & MWAIT_SUBSTATE_MASK; + } + } + power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | + (highest_subcstate - 1); + + for_each_online_cpu(i) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i); + +#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86) + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + case X86_VENDOR_INTEL: + /* + * AMD Fam10h TSC will tick in all + * C/P/S0/S1 states when this bit is set. + */ + if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + return; + + /*FALL THROUGH*/ + default: + /* TSC could halt in idle, so notify users */ + mark_tsc_unstable("TSC halts in idle"); + } +#endif +} + +static unsigned long cpu_weight[NR_CPUS]; +static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; +static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS); +static void round_robin_cpu(unsigned int tsk_index) +{ + struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); + cpumask_var_t tmp; + int cpu; + unsigned long min_weight = -1, preferred_cpu; + + if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) + return; + + mutex_lock(&isolated_cpus_lock); + cpumask_clear(tmp); + for_each_cpu(cpu, pad_busy_cpus) + cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); + cpumask_andnot(tmp, cpu_online_mask, tmp); + /* avoid HT sibilings if possible */ + if (cpumask_empty(tmp)) + cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); + if (cpumask_empty(tmp)) { + mutex_unlock(&isolated_cpus_lock); + return; + } + for_each_cpu(cpu, tmp) { + if (cpu_weight[cpu] < min_weight) { + min_weight = cpu_weight[cpu]; + preferred_cpu = cpu; + } + } + + if (tsk_in_cpu[tsk_index] != -1) + cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); + tsk_in_cpu[tsk_index] = preferred_cpu; + cpumask_set_cpu(preferred_cpu, pad_busy_cpus); + cpu_weight[preferred_cpu]++; + mutex_unlock(&isolated_cpus_lock); + + set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); +} + +static void exit_round_robin(unsigned int tsk_index) +{ + struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); + cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); + tsk_in_cpu[tsk_index] = -1; +} + +static unsigned int idle_pct = 5; /* percentage */ +static unsigned int round_robin_time = 10; /* second */ +static int power_saving_thread(void *data) +{ + struct sched_param param = {.sched_priority = 1}; + int do_sleep; + unsigned int tsk_index = (unsigned long)data; + u64 last_jiffies = 0; + + sched_setscheduler(current, SCHED_RR, ¶m); + + while (!kthread_should_stop()) { + int cpu; + u64 expire_time; + + try_to_freeze(); + + /* round robin to cpus */ + if (last_jiffies + round_robin_time * HZ < jiffies) { + last_jiffies = jiffies; + round_robin_cpu(tsk_index); + } + + do_sleep = 0; + + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we test + * NEED_RESCHED: + */ + smp_mb(); + + expire_time = jiffies + HZ * (100 - idle_pct) / 100; + + while (!need_resched()) { + local_irq_disable(); + cpu = smp_processor_id(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, + &cpu); + stop_critical_timings(); + + __monitor((void *)¤t_thread_info()->flags, 0, 0); + smp_mb(); + if (!need_resched()) + __mwait(power_saving_mwait_eax, 1); + + start_critical_timings(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, + &cpu); + local_irq_enable(); + + if (jiffies > expire_time) { + do_sleep = 1; + break; + } + } + + current_thread_info()->status |= TS_POLLING; + + /* + * current sched_rt has threshold for rt task running time. + * When a rt task uses 95% CPU time, the rt thread will be + * scheduled out for 5% CPU time to not starve other tasks. But + * the mechanism only works when all CPUs have RT task running, + * as if one CPU hasn't RT task, RT task from other CPUs will + * borrow CPU time from this CPU and cause RT task use > 95% + * CPU time. To make 'avoid staration' work, takes a nap here. + */ + if (do_sleep) + schedule_timeout_killable(HZ * idle_pct / 100); + } + + exit_round_robin(tsk_index); + return 0; +} + +static struct task_struct *ps_tsks[NR_CPUS]; +static unsigned int ps_tsk_num; +static int create_power_saving_task(void) +{ + ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, + (void *)(unsigned long)ps_tsk_num, + "power_saving/%d", ps_tsk_num); + if (ps_tsks[ps_tsk_num]) { + ps_tsk_num++; + return 0; + } + return -EINVAL; +} + +static void destroy_power_saving_task(void) +{ + if (ps_tsk_num > 0) { + ps_tsk_num--; + kthread_stop(ps_tsks[ps_tsk_num]); + } +} + +static void set_power_saving_task_num(unsigned int num) +{ + if (num > ps_tsk_num) { + while (ps_tsk_num < num) { + if (create_power_saving_task()) + return; + } + } else if (num < ps_tsk_num) { + while (ps_tsk_num > num) + destroy_power_saving_task(); + } +} + +static int acpi_pad_idle_cpus(unsigned int num_cpus) +{ + get_online_cpus(); + + num_cpus = min_t(unsigned int, num_cpus, num_online_cpus()); + set_power_saving_task_num(num_cpus); + + put_online_cpus(); + return 0; +} + +static uint32_t acpi_pad_idle_cpus_num(void) +{ + return ps_tsk_num; +} + +static ssize_t acpi_pad_rrtime_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long num; + if (strict_strtoul(buf, 0, &num)) + return -EINVAL; + if (num < 1 || num >= 100) + return -EINVAL; + mutex_lock(&isolated_cpus_lock); + round_robin_time = num; + mutex_unlock(&isolated_cpus_lock); + return count; +} + +static ssize_t acpi_pad_rrtime_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time); +} +static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR, + acpi_pad_rrtime_show, + acpi_pad_rrtime_store); + +static ssize_t acpi_pad_idlepct_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long num; + if (strict_strtoul(buf, 0, &num)) + return -EINVAL; + if (num < 1 || num >= 100) + return -EINVAL; + mutex_lock(&isolated_cpus_lock); + idle_pct = num; + mutex_unlock(&isolated_cpus_lock); + return count; +} + +static ssize_t acpi_pad_idlepct_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d", idle_pct); +} +static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR, + acpi_pad_idlepct_show, + acpi_pad_idlepct_store); + +static ssize_t acpi_pad_idlecpus_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long num; + if (strict_strtoul(buf, 0, &num)) + return -EINVAL; + mutex_lock(&isolated_cpus_lock); + acpi_pad_idle_cpus(num); + mutex_unlock(&isolated_cpus_lock); + return count; +} + +static ssize_t acpi_pad_idlecpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return cpumask_scnprintf(buf, PAGE_SIZE, + to_cpumask(pad_busy_cpus_bits)); +} +static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR, + acpi_pad_idlecpus_show, + acpi_pad_idlecpus_store); + +static int acpi_pad_add_sysfs(struct acpi_device *device) +{ + int result; + + result = device_create_file(&device->dev, &dev_attr_idlecpus); + if (result) + return -ENODEV; + result = device_create_file(&device->dev, &dev_attr_idlepct); + if (result) { + device_remove_file(&device->dev, &dev_attr_idlecpus); + return -ENODEV; + } + result = device_create_file(&device->dev, &dev_attr_rrtime); + if (result) { + device_remove_file(&device->dev, &dev_attr_idlecpus); + device_remove_file(&device->dev, &dev_attr_idlepct); + return -ENODEV; + } + return 0; +} + +static void acpi_pad_remove_sysfs(struct acpi_device *device) +{ + device_remove_file(&device->dev, &dev_attr_idlecpus); + device_remove_file(&device->dev, &dev_attr_idlepct); + device_remove_file(&device->dev, &dev_attr_rrtime); +} + +/* Query firmware how many CPUs should be idle */ +static int acpi_pad_pur(acpi_handle handle, int *num_cpus) +{ + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + union acpi_object *package; + int rev, num, ret = -EINVAL; + + status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer); + if (ACPI_FAILURE(status)) + return -EINVAL; + package = buffer.pointer; + if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) + goto out; + rev = package->package.elements[0].integer.value; + num = package->package.elements[1].integer.value; + if (rev != 1) + goto out; + *num_cpus = num; + ret = 0; +out: + kfree(buffer.pointer); + return ret; +} + +/* Notify firmware how many CPUs are idle */ +static void acpi_pad_ost(acpi_handle handle, int stat, + uint32_t idle_cpus) +{ + union acpi_object params[3] = { + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_BUFFER,}, + }; + struct acpi_object_list arg_list = {3, params}; + + params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY; + params[1].integer.value = stat; + params[2].buffer.length = 4; + params[2].buffer.pointer = (void *)&idle_cpus; + acpi_evaluate_object(handle, "_OST", &arg_list, NULL); +} + +static void acpi_pad_handle_notify(acpi_handle handle) +{ + int num_cpus, ret; + uint32_t idle_cpus; + + mutex_lock(&isolated_cpus_lock); + if (acpi_pad_pur(handle, &num_cpus)) { + mutex_unlock(&isolated_cpus_lock); + return; + } + ret = acpi_pad_idle_cpus(num_cpus); + idle_cpus = acpi_pad_idle_cpus_num(); + if (!ret) + acpi_pad_ost(handle, 0, idle_cpus); + else + acpi_pad_ost(handle, 1, 0); + mutex_unlock(&isolated_cpus_lock); +} + +static void acpi_pad_notify(acpi_handle handle, u32 event, + void *data) +{ + struct acpi_device *device = data; + + switch (event) { + case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: + acpi_pad_handle_notify(handle); + acpi_bus_generate_proc_event(device, event, 0); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; + default: + printk(KERN_WARNING"Unsupported event [0x%x]\n", event); + break; + } +} + +static int acpi_pad_add(struct acpi_device *device) +{ + acpi_status status; + + strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); + strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); + + if (acpi_pad_add_sysfs(device)) + return -ENODEV; + + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); + if (ACPI_FAILURE(status)) { + acpi_pad_remove_sysfs(device); + return -ENODEV; + } + + return 0; +} + +static int acpi_pad_remove(struct acpi_device *device, + int type) +{ + mutex_lock(&isolated_cpus_lock); + acpi_pad_idle_cpus(0); + mutex_unlock(&isolated_cpus_lock); + + acpi_remove_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, acpi_pad_notify); + acpi_pad_remove_sysfs(device); + return 0; +} + +static const struct acpi_device_id pad_device_ids[] = { + {"ACPI000C", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, pad_device_ids); + +static struct acpi_driver acpi_pad_driver = { + .name = "processor_aggregator", + .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, + .ids = pad_device_ids, + .ops = { + .add = acpi_pad_add, + .remove = acpi_pad_remove, + }, +}; + +static int __init acpi_pad_init(void) +{ + power_saving_mwait_init(); + if (power_saving_mwait_eax == 0) + return -EINVAL; + + return acpi_bus_register_driver(&acpi_pad_driver); +} + +static void __exit acpi_pad_exit(void) +{ + acpi_bus_unregister_driver(&acpi_pad_driver); +} + +module_init(acpi_pad_init); +module_exit(acpi_pad_exit); +MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>"); +MODULE_DESCRIPTION("ACPI Processor Aggregator Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 3a2cfefc71a..7338b6a3e04 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -67,7 +67,7 @@ struct dock_station { struct list_head dependent_devices; struct list_head hotplug_devices; - struct list_head sibiling; + struct list_head sibling; struct platform_device *dock_device; }; static LIST_HEAD(dock_stations); @@ -275,7 +275,7 @@ int is_dock_device(acpi_handle handle) if (is_dock(handle)) return 1; - list_for_each_entry(dock_station, &dock_stations, sibiling) { + list_for_each_entry(dock_station, &dock_stations, sibling) { if (find_dock_dependent_device(dock_station, handle)) return 1; } @@ -619,7 +619,7 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, * make sure this handle is for a device dependent on the dock, * this would include the dock station itself */ - list_for_each_entry(dock_station, &dock_stations, sibiling) { + list_for_each_entry(dock_station, &dock_stations, sibling) { /* * An ATA bay can be in a dock and itself can be ejected * seperately, so there are two 'dock stations' which need the @@ -651,7 +651,7 @@ void unregister_hotplug_dock_device(acpi_handle handle) if (!dock_station_count) return; - list_for_each_entry(dock_station, &dock_stations, sibiling) { + list_for_each_entry(dock_station, &dock_stations, sibling) { dd = find_dock_dependent_device(dock_station, handle); if (dd) dock_del_hotplug_device(dock_station, dd); @@ -787,7 +787,7 @@ static int acpi_dock_notifier_call(struct notifier_block *this, if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK && event != ACPI_NOTIFY_EJECT_REQUEST) return 0; - list_for_each_entry(dock_station, &dock_stations, sibiling) { + list_for_each_entry(dock_station, &dock_stations, sibling) { if (dock_station->handle == handle) { struct dock_data *dock_data; @@ -958,7 +958,7 @@ static int dock_add(acpi_handle handle) dock_station->last_dock_time = jiffies - HZ; INIT_LIST_HEAD(&dock_station->dependent_devices); INIT_LIST_HEAD(&dock_station->hotplug_devices); - INIT_LIST_HEAD(&dock_station->sibiling); + INIT_LIST_HEAD(&dock_station->sibling); spin_lock_init(&dock_station->dd_lock); mutex_init(&dock_station->hp_lock); ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); @@ -1044,7 +1044,7 @@ static int dock_add(acpi_handle handle) add_dock_dependent_device(dock_station, dd); dock_station_count++; - list_add(&dock_station->sibiling, &dock_stations); + list_add(&dock_station->sibling, &dock_stations); return 0; dock_add_err_unregister: @@ -1149,7 +1149,7 @@ static void __exit dock_exit(void) struct dock_station *tmp; unregister_acpi_bus_notifier(&dock_acpi_notifier); - list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling) + list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling) dock_remove(dock_station); } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index f70796081c4..baef28c1e63 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -119,6 +119,8 @@ static struct acpi_ec { } *boot_ec, *first_ec; static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ +static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ +static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ /* -------------------------------------------------------------------------- Transaction Management @@ -232,10 +234,8 @@ static int ec_poll(struct acpi_ec *ec) } advance_transaction(ec, acpi_ec_read_status(ec)); } while (time_before(jiffies, delay)); - if (!ec->curr->irq_count || - (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)) + if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) break; - /* try restart command if we get any false interrupts */ pr_debug(PREFIX "controller reset, restart transaction\n"); spin_lock_irqsave(&ec->curr_lock, flags); start_transaction(ec); @@ -899,6 +899,44 @@ static const struct acpi_device_id ec_device_ids[] = { {"", 0}, }; +/* Some BIOS do not survive early DSDT scan, skip it */ +static int ec_skip_dsdt_scan(const struct dmi_system_id *id) +{ + EC_FLAGS_SKIP_DSDT_SCAN = 1; + return 0; +} + +/* ASUStek often supplies us with broken ECDT, validate it */ +static int ec_validate_ecdt(const struct dmi_system_id *id) +{ + EC_FLAGS_VALIDATE_ECDT = 1; + return 0; +} + +/* MSI EC needs special treatment, enable it */ +static int ec_flag_msi(const struct dmi_system_id *id) +{ + EC_FLAGS_MSI = 1; + EC_FLAGS_VALIDATE_ECDT = 1; + return 0; +} + +static struct dmi_system_id __initdata ec_dmi_table[] = { + { + ec_skip_dsdt_scan, "Compal JFL92", { + DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), + DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, + { + ec_flag_msi, "MSI hardware", { + DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"), + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL}, + { + ec_validate_ecdt, "ASUS hardware", { + DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, + {}, +}; + + int __init acpi_ec_ecdt_probe(void) { acpi_status status; @@ -911,11 +949,7 @@ int __init acpi_ec_ecdt_probe(void) /* * Generate a boot ec context */ - if (dmi_name_in_vendors("Micro-Star") || - dmi_name_in_vendors("Notebook")) { - pr_info(PREFIX "Enabling special treatment for EC from MSI.\n"); - EC_FLAGS_MSI = 1; - } + dmi_check_system(ec_dmi_table); status = acpi_get_table(ACPI_SIG_ECDT, 1, (struct acpi_table_header **)&ecdt_ptr); if (ACPI_SUCCESS(status)) { @@ -926,7 +960,7 @@ int __init acpi_ec_ecdt_probe(void) boot_ec->handle = ACPI_ROOT_OBJECT; acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); /* Don't trust ECDT, which comes from ASUSTek */ - if (!dmi_name_in_vendors("ASUS") && EC_FLAGS_MSI == 0) + if (!EC_FLAGS_VALIDATE_ECDT) goto install; saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); if (!saved_ec) @@ -934,6 +968,10 @@ int __init acpi_ec_ecdt_probe(void) memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); /* fall through */ } + + if (EC_FLAGS_SKIP_DSDT_SCAN) + return -ENODEV; + /* This workaround is needed only on some broken machines, * which require early EC, but fail to provide ECDT */ printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index d0d550d22a6..f8b6f555ba5 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -398,6 +398,8 @@ acpi_system_write_wakeup_device(struct file *file, if (len > 4) len = 4; + if (len < 0) + return -EFAULT; if (copy_from_user(strbuf, buffer, len)) return -EFAULT; diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index c2d4d6e0936..c567b46dfa0 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -863,13 +863,6 @@ static int acpi_processor_add(struct acpi_device *device) goto err_remove_sysfs; } - if (pr->flags.throttling) { - printk(KERN_INFO PREFIX "%s [%s] (supports", - acpi_device_name(device), acpi_device_bid(device)); - printk(" %d throttling states", pr->throttling.state_count); - printk(")\n"); - } - return 0; err_remove_sysfs: diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 468921bed22..14a7481c97d 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1052,6 +1052,8 @@ static void acpi_device_set_id(struct acpi_device *device) device->flags.bus_address = 1; } + kfree(info); + /* * Some devices don't reliably have _HIDs & _CIDs, so add * synthetic HIDs to make sure drivers can find them. @@ -1325,13 +1327,8 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops, struct acpi_device **child) { acpi_status status; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; void *device = NULL; - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n", - (char *) buffer.pointer); - status = acpi_bus_check_add(handle, 0, ops, &device); if (ACPI_SUCCESS(status)) acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index a4fddb24476..f6e54bf8dd9 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -285,7 +285,7 @@ static int acpi_video_device_brightness_open_fs(struct inode *inode, struct file *file); static ssize_t acpi_video_device_write_brightness(struct file *file, const char __user *buffer, size_t count, loff_t *data); -static struct file_operations acpi_video_device_brightness_fops = { +static const struct file_operations acpi_video_device_brightness_fops = { .owner = THIS_MODULE, .open = acpi_video_device_brightness_open_fs, .read = seq_read, diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 703364b5217..66e181345b3 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -1306,14 +1306,6 @@ static void amb_close (struct atm_vcc * atm_vcc) { return; } -/********** Set socket options for a VC **********/ - -// int amb_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen); - -/********** Set socket options for a VC **********/ - -// int amb_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen); - /********** Send **********/ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 5503bfc8e13..0c302614544 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2031,7 +2031,7 @@ static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname, static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen) + void __user *optval,unsigned int optlen) { return -EINVAL; } diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index b119640e1ee..cd5049af47a 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1244,7 +1244,7 @@ static int fs_getsockopt(struct atm_vcc *vcc,int level,int optname, static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen) + void __user *optval,unsigned int optlen) { func_enter (); func_exit (); diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 10f000dbe44..f766cc46b4c 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -1795,7 +1795,7 @@ fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *op static int -fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) +fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen) { /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 01ce241dbea..4e49021e67e 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2590,7 +2590,7 @@ static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, } static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, - void *optval, int optlen) { + void *optval, unsigned int optlen) { hrz_dev * dev = HRZ_DEV(atm_vcc->dev); PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt"); switch (level) { diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 78c9736c357..b2c1b37ab2e 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2862,7 +2862,7 @@ static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname, } static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname, - void __user *optval, int optlen) + void __user *optval, unsigned int optlen) { IF_EVENT(printk(">ia_setsockopt\n");) return -EINVAL; diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 752b1ba81f7..2e9635be048 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1517,7 +1517,7 @@ static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname, static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen) + void __user *optval,unsigned int optlen) { return -EINVAL; } diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 6fa7b0fdbdf..eb4fa194394 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -38,6 +38,7 @@ #include <linux/slab.h> #include <linux/smp_lock.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <linux/timer.h> @@ -6422,16 +6423,10 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller, return true; } - -/* - DAC960_ProcReadStatus implements reading /proc/rd/status. -*/ - -static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset, - int Count, int *EOF, void *Data) +static int dac960_proc_show(struct seq_file *m, void *v) { unsigned char *StatusMessage = "OK\n"; - int ControllerNumber, BytesAvailable; + int ControllerNumber; for (ControllerNumber = 0; ControllerNumber < DAC960_ControllerCount; ControllerNumber++) @@ -6444,52 +6439,49 @@ static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset, break; } } - BytesAvailable = strlen(StatusMessage) - Offset; - if (Count >= BytesAvailable) - { - Count = BytesAvailable; - *EOF = true; - } - if (Count <= 0) return 0; - *Start = Page; - memcpy(Page, &StatusMessage[Offset], Count); - return Count; + seq_puts(m, StatusMessage); + return 0; } +static int dac960_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, dac960_proc_show, NULL); +} -/* - DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status. -*/ +static const struct file_operations dac960_proc_fops = { + .owner = THIS_MODULE, + .open = dac960_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; -static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset, - int Count, int *EOF, void *Data) +static int dac960_initial_status_proc_show(struct seq_file *m, void *v) { - DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; - int BytesAvailable = Controller->InitialStatusLength - Offset; - if (Count >= BytesAvailable) - { - Count = BytesAvailable; - *EOF = true; - } - if (Count <= 0) return 0; - *Start = Page; - memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count); - return Count; + DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; + seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer); + return 0; } +static int dac960_initial_status_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data); +} -/* - DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status. -*/ +static const struct file_operations dac960_initial_status_proc_fops = { + .owner = THIS_MODULE, + .open = dac960_initial_status_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; -static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, - int Count, int *EOF, void *Data) +static int dac960_current_status_proc_show(struct seq_file *m, void *v) { - DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; + DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private; unsigned char *StatusMessage = "No Rebuild or Consistency Check in Progress\n"; int ProgressMessageLength = strlen(StatusMessage); - int BytesAvailable; if (jiffies != Controller->LastCurrentStatusTime) { Controller->CurrentStatusLength = 0; @@ -6513,49 +6505,41 @@ static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, } Controller->LastCurrentStatusTime = jiffies; } - BytesAvailable = Controller->CurrentStatusLength - Offset; - if (Count >= BytesAvailable) - { - Count = BytesAvailable; - *EOF = true; - } - if (Count <= 0) return 0; - *Start = Page; - memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count); - return Count; + seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer); + return 0; } +static int dac960_current_status_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, dac960_current_status_proc_show, PDE(inode)->data); +} -/* - DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command. -*/ +static const struct file_operations dac960_current_status_proc_fops = { + .owner = THIS_MODULE, + .open = dac960_current_status_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; -static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset, - int Count, int *EOF, void *Data) +static int dac960_user_command_proc_show(struct seq_file *m, void *v) { - DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; - int BytesAvailable = Controller->UserStatusLength - Offset; - if (Count >= BytesAvailable) - { - Count = BytesAvailable; - *EOF = true; - } - if (Count <= 0) return 0; - *Start = Page; - memcpy(Page, &Controller->UserStatusBuffer[Offset], Count); - return Count; -} + DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; + seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer); + return 0; +} -/* - DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command. -*/ +static int dac960_user_command_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, dac960_user_command_proc_show, PDE(inode)->data); +} -static int DAC960_ProcWriteUserCommand(struct file *file, +static ssize_t dac960_user_command_proc_write(struct file *file, const char __user *Buffer, - unsigned long Count, void *Data) + size_t Count, loff_t *pos) { - DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; + DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data; unsigned char CommandBuffer[80]; int Length; if (Count > sizeof(CommandBuffer)-1) return -EINVAL; @@ -6572,6 +6556,14 @@ static int DAC960_ProcWriteUserCommand(struct file *file, ? Count : -EBUSY); } +static const struct file_operations dac960_user_command_proc_fops = { + .owner = THIS_MODULE, + .open = dac960_user_command_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = dac960_user_command_proc_write, +}; /* DAC960_CreateProcEntries creates the /proc/rd/... entries for the @@ -6586,23 +6578,17 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) if (DAC960_ProcDirectoryEntry == NULL) { DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); - StatusProcEntry = create_proc_read_entry("status", 0, + StatusProcEntry = proc_create("status", 0, DAC960_ProcDirectoryEntry, - DAC960_ProcReadStatus, NULL); + &dac960_proc_fops); } sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); ControllerProcEntry = proc_mkdir(Controller->ControllerName, DAC960_ProcDirectoryEntry); - create_proc_read_entry("initial_status", 0, ControllerProcEntry, - DAC960_ProcReadInitialStatus, Controller); - create_proc_read_entry("current_status", 0, ControllerProcEntry, - DAC960_ProcReadCurrentStatus, Controller); - UserCommandProcEntry = - create_proc_read_entry("user_command", S_IWUSR | S_IRUSR, - ControllerProcEntry, DAC960_ProcReadUserCommand, - Controller); - UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand; + proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); + proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller); + UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); Controller->ControllerProcEntry = ControllerProcEntry; } diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 24c3e21ab26..fb5be2d95d5 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -36,9 +36,11 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> +#include <linux/jiffies.h> #include <linux/hdreg.h> #include <linux/spinlock.h> #include <linux/compat.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -155,6 +157,10 @@ static struct board_type products[] = { static ctlr_info_t *hba[MAX_CTLR]; +static struct task_struct *cciss_scan_thread; +static DEFINE_MUTEX(scan_mutex); +static LIST_HEAD(scan_q); + static void do_cciss_request(struct request_queue *q); static irqreturn_t do_cciss_intr(int irq, void *dev_id); static int cciss_open(struct block_device *bdev, fmode_t mode); @@ -164,9 +170,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); static int cciss_revalidate(struct gendisk *disk); -static int rebuild_lun_table(ctlr_info_t *h, int first_time); +static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); static int deregister_disk(ctlr_info_t *h, int drv_index, - int clear_all); + int clear_all, int via_ioctl); static void cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size); @@ -189,8 +195,13 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); static void fail_all_cmds(unsigned long ctlr); +static int add_to_scan_list(struct ctlr_info *h); static int scan_thread(void *data); static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); +static void cciss_hba_release(struct device *dev); +static void cciss_device_release(struct device *dev); +static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); +static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); #ifdef CONFIG_PROC_FS static void cciss_procinit(int i); @@ -245,7 +256,10 @@ static inline void removeQ(CommandList_struct *c) #include "cciss_scsi.c" /* For SCSI tape support */ -#define RAID_UNKNOWN 6 +static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", + "UNKNOWN" +}; +#define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1) #ifdef CONFIG_PROC_FS @@ -255,9 +269,6 @@ static inline void removeQ(CommandList_struct *c) #define ENG_GIG 1000000000 #define ENG_GIG_FACTOR (ENG_GIG/512) #define ENGAGE_SCSI "engage scsi" -static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", - "UNKNOWN" -}; static struct proc_dir_entry *proc_cciss; @@ -318,7 +329,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v) ctlr_info_t *h = seq->private; unsigned ctlr = h->ctlr; loff_t *pos = v; - drive_info_struct *drv = &h->drv[*pos]; + drive_info_struct *drv = h->drv[*pos]; if (*pos > h->highest_lun) return 0; @@ -331,7 +342,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v) vol_sz_frac *= 100; sector_div(vol_sz_frac, ENG_GIG_FACTOR); - if (drv->raid_level > 5) + if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) drv->raid_level = RAID_UNKNOWN; seq_printf(seq, "cciss/c%dd%d:" "\t%4u.%02uGB\tRAID %s\n", @@ -426,7 +437,7 @@ out: return err; } -static struct file_operations cciss_proc_fops = { +static const struct file_operations cciss_proc_fops = { .owner = THIS_MODULE, .open = cciss_seq_open, .read = seq_read, @@ -454,9 +465,19 @@ static void __devinit cciss_procinit(int i) #define to_hba(n) container_of(n, struct ctlr_info, dev) #define to_drv(n) container_of(n, drive_info_struct, dev) -static struct device_type cciss_host_type = { - .name = "cciss_host", -}; +static ssize_t host_store_rescan(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ctlr_info *h = to_hba(dev); + + add_to_scan_list(h); + wake_up_process(cciss_scan_thread); + wait_for_completion_interruptible(&h->scan_wait); + + return count; +} +DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); static ssize_t dev_show_unique_id(struct device *dev, struct device_attribute *attr, @@ -560,11 +581,101 @@ static ssize_t dev_show_rev(struct device *dev, } DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); +static ssize_t cciss_show_lunid(struct device *dev, + struct device_attribute *attr, char *buf) +{ + drive_info_struct *drv = to_drv(dev); + struct ctlr_info *h = to_hba(drv->dev.parent); + unsigned long flags; + unsigned char lunid[8]; + + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + if (h->busy_configuring) { + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return -EBUSY; + } + if (!drv->heads) { + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return -ENOTTY; + } + memcpy(lunid, drv->LunID, sizeof(lunid)); + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + lunid[0], lunid[1], lunid[2], lunid[3], + lunid[4], lunid[5], lunid[6], lunid[7]); +} +DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); + +static ssize_t cciss_show_raid_level(struct device *dev, + struct device_attribute *attr, char *buf) +{ + drive_info_struct *drv = to_drv(dev); + struct ctlr_info *h = to_hba(drv->dev.parent); + int raid; + unsigned long flags; + + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + if (h->busy_configuring) { + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return -EBUSY; + } + raid = drv->raid_level; + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + if (raid < 0 || raid > RAID_UNKNOWN) + raid = RAID_UNKNOWN; + + return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", + raid_label[raid]); +} +DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); + +static ssize_t cciss_show_usage_count(struct device *dev, + struct device_attribute *attr, char *buf) +{ + drive_info_struct *drv = to_drv(dev); + struct ctlr_info *h = to_hba(drv->dev.parent); + unsigned long flags; + int count; + + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + if (h->busy_configuring) { + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return -EBUSY; + } + count = drv->usage_count; + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return snprintf(buf, 20, "%d\n", count); +} +DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); + +static struct attribute *cciss_host_attrs[] = { + &dev_attr_rescan.attr, + NULL +}; + +static struct attribute_group cciss_host_attr_group = { + .attrs = cciss_host_attrs, +}; + +static const struct attribute_group *cciss_host_attr_groups[] = { + &cciss_host_attr_group, + NULL +}; + +static struct device_type cciss_host_type = { + .name = "cciss_host", + .groups = cciss_host_attr_groups, + .release = cciss_hba_release, +}; + static struct attribute *cciss_dev_attrs[] = { &dev_attr_unique_id.attr, &dev_attr_model.attr, &dev_attr_vendor.attr, &dev_attr_rev.attr, + &dev_attr_lunid.attr, + &dev_attr_raid_level.attr, + &dev_attr_usage_count.attr, NULL }; @@ -580,12 +691,24 @@ static const struct attribute_group *cciss_dev_attr_groups[] = { static struct device_type cciss_dev_type = { .name = "cciss_device", .groups = cciss_dev_attr_groups, + .release = cciss_device_release, }; static struct bus_type cciss_bus_type = { .name = "cciss", }; +/* + * cciss_hba_release is called when the reference count + * of h->dev goes to zero. + */ +static void cciss_hba_release(struct device *dev) +{ + /* + * nothing to do, but need this to avoid a warning + * about not having a release handler from lib/kref.c. + */ +} /* * Initialize sysfs entry for each controller. This sets up and registers @@ -609,6 +732,16 @@ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) { device_del(&h->dev); + put_device(&h->dev); /* final put. */ +} + +/* cciss_device_release is called when the reference count + * of h->drv[x]dev goes to zero. + */ +static void cciss_device_release(struct device *dev) +{ + drive_info_struct *drv = to_drv(dev); + kfree(drv); } /* @@ -617,24 +750,39 @@ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from * /sys/block/cciss!c#d# to this entry. */ -static int cciss_create_ld_sysfs_entry(struct ctlr_info *h, - drive_info_struct *drv, +static long cciss_create_ld_sysfs_entry(struct ctlr_info *h, int drv_index) { - device_initialize(&drv->dev); - drv->dev.type = &cciss_dev_type; - drv->dev.bus = &cciss_bus_type; - dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index); - drv->dev.parent = &h->dev; - return device_add(&drv->dev); + struct device *dev; + + if (h->drv[drv_index]->device_initialized) + return 0; + + dev = &h->drv[drv_index]->dev; + device_initialize(dev); + dev->type = &cciss_dev_type; + dev->bus = &cciss_bus_type; + dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); + dev->parent = &h->dev; + h->drv[drv_index]->device_initialized = 1; + return device_add(dev); } /* * Remove sysfs entries for a logical drive. */ -static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv) +static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, + int ctlr_exiting) { - device_del(&drv->dev); + struct device *dev = &h->drv[drv_index]->dev; + + /* special case for c*d0, we only destroy it on controller exit */ + if (drv_index == 0 && !ctlr_exiting) + return; + + device_del(dev); + put_device(dev); /* the "final" put. */ + h->drv[drv_index] = NULL; } /* @@ -751,7 +899,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode) printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); #endif /* CCISS_DEBUG */ - if (host->busy_initializing || drv->busy_configuring) + if (drv->busy_configuring) return -EBUSY; /* * Root is allowed to open raw volume zero even if it's not configured @@ -767,7 +915,8 @@ static int cciss_open(struct block_device *bdev, fmode_t mode) if (MINOR(bdev->bd_dev) & 0x0f) { return -ENXIO; /* if it is, make sure we have a LUN ID */ - } else if (drv->LunID == 0) { + } else if (memcmp(drv->LunID, CTLR_LUNID, + sizeof(drv->LunID))) { return -ENXIO; } } @@ -1132,12 +1281,13 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, case CCISS_DEREGDISK: case CCISS_REGNEWD: case CCISS_REVALIDVOLS: - return rebuild_lun_table(host, 0); + return rebuild_lun_table(host, 0, 1); case CCISS_GETLUNINFO:{ LogvolInfo_struct luninfo; - luninfo.LunID = drv->LunID; + memcpy(&luninfo.LunID, drv->LunID, + sizeof(luninfo.LunID)); luninfo.num_opens = drv->usage_count; luninfo.num_parts = 0; if (copy_to_user(argp, &luninfo, @@ -1475,7 +1625,10 @@ static void cciss_check_queues(ctlr_info_t *h) /* make sure the disk has been added and the drive is real * because this can be called from the middle of init_one. */ - if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) + if (!h->drv[curr_queue]) + continue; + if (!(h->drv[curr_queue]->queue) || + !(h->drv[curr_queue]->heads)) continue; blk_start_queue(h->gendisk[curr_queue]->queue); @@ -1532,13 +1685,11 @@ static void cciss_softirq_done(struct request *rq) spin_unlock_irqrestore(&h->lock, flags); } -static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], - uint32_t log_unit) +static inline void log_unit_to_scsi3addr(ctlr_info_t *h, + unsigned char scsi3addr[], uint32_t log_unit) { - log_unit = h->drv[log_unit].LunID & 0x03fff; - memset(&scsi3addr[4], 0, 4); - memcpy(&scsi3addr[0], &log_unit, 4); - scsi3addr[3] |= 0x40; + memcpy(scsi3addr, h->drv[log_unit]->LunID, + sizeof(h->drv[log_unit]->LunID)); } /* This function gets the SCSI vendor, model, and revision of a logical drive @@ -1615,16 +1766,23 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq, return; } -static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, +/* + * cciss_add_disk sets up the block device queue for a logical drive + */ +static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, int drv_index) { disk->queue = blk_init_queue(do_cciss_request, &h->lock); + if (!disk->queue) + goto init_queue_failure; sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); disk->major = h->major; disk->first_minor = drv_index << NWD_SHIFT; disk->fops = &cciss_fops; - disk->private_data = &h->drv[drv_index]; - disk->driverfs_dev = &h->drv[drv_index].dev; + if (cciss_create_ld_sysfs_entry(h, drv_index)) + goto cleanup_queue; + disk->private_data = h->drv[drv_index]; + disk->driverfs_dev = &h->drv[drv_index]->dev; /* Set up queue information */ blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); @@ -1642,14 +1800,21 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, disk->queue->queuedata = h; blk_queue_logical_block_size(disk->queue, - h->drv[drv_index].block_size); + h->drv[drv_index]->block_size); /* Make sure all queue data is written out before */ - /* setting h->drv[drv_index].queue, as setting this */ + /* setting h->drv[drv_index]->queue, as setting this */ /* allows the interrupt handler to start the queue */ wmb(); - h->drv[drv_index].queue = disk->queue; + h->drv[drv_index]->queue = disk->queue; add_disk(disk); + return 0; + +cleanup_queue: + blk_cleanup_queue(disk->queue); + disk->queue = NULL; +init_queue_failure: + return -1; } /* This function will check the usage_count of the drive to be updated/added. @@ -1662,7 +1827,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, * is also the controller node. Any changes to disk 0 will show up on * the next reboot. */ -static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) +static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, + int via_ioctl) { ctlr_info_t *h = hba[ctlr]; struct gendisk *disk; @@ -1672,21 +1838,13 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) unsigned long flags = 0; int ret = 0; drive_info_struct *drvinfo; - int was_only_controller_node; /* Get information about the disk and modify the driver structure */ inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); - drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL); + drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); if (inq_buff == NULL || drvinfo == NULL) goto mem_msg; - /* See if we're trying to update the "controller node" - * this will happen the when the first logical drive gets - * created by ACU. - */ - was_only_controller_node = (drv_index == 0 && - h->drv[0].raid_level == -1); - /* testing to see if 16-byte CDBs are already being used */ if (h->cciss_read == CCISS_READ_16) { cciss_read_capacity_16(h->ctlr, drv_index, 1, @@ -1719,16 +1877,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) drvinfo->model, drvinfo->rev); cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, sizeof(drvinfo->serial_no)); + /* Save the lunid in case we deregister the disk, below. */ + memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, + sizeof(drvinfo->LunID)); /* Is it the same disk we already know, and nothing's changed? */ - if (h->drv[drv_index].raid_level != -1 && + if (h->drv[drv_index]->raid_level != -1 && ((memcmp(drvinfo->serial_no, - h->drv[drv_index].serial_no, 16) == 0) && - drvinfo->block_size == h->drv[drv_index].block_size && - drvinfo->nr_blocks == h->drv[drv_index].nr_blocks && - drvinfo->heads == h->drv[drv_index].heads && - drvinfo->sectors == h->drv[drv_index].sectors && - drvinfo->cylinders == h->drv[drv_index].cylinders)) + h->drv[drv_index]->serial_no, 16) == 0) && + drvinfo->block_size == h->drv[drv_index]->block_size && + drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && + drvinfo->heads == h->drv[drv_index]->heads && + drvinfo->sectors == h->drv[drv_index]->sectors && + drvinfo->cylinders == h->drv[drv_index]->cylinders)) /* The disk is unchanged, nothing to update */ goto freeret; @@ -1738,18 +1899,17 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) * If the disk already exists then deregister it before proceeding * (unless it's the first disk (for the controller node). */ - if (h->drv[drv_index].raid_level != -1 && drv_index != 0) { + if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { printk(KERN_WARNING "disk %d has changed.\n", drv_index); spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); - h->drv[drv_index].busy_configuring = 1; + h->drv[drv_index]->busy_configuring = 1; spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - /* deregister_disk sets h->drv[drv_index].queue = NULL + /* deregister_disk sets h->drv[drv_index]->queue = NULL * which keeps the interrupt handler from starting * the queue. */ - ret = deregister_disk(h, drv_index, 0); - h->drv[drv_index].busy_configuring = 0; + ret = deregister_disk(h, drv_index, 0, via_ioctl); } /* If the disk is in use return */ @@ -1757,22 +1917,31 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) goto freeret; /* Save the new information from cciss_geometry_inquiry - * and serial number inquiry. + * and serial number inquiry. If the disk was deregistered + * above, then h->drv[drv_index] will be NULL. */ - h->drv[drv_index].block_size = drvinfo->block_size; - h->drv[drv_index].nr_blocks = drvinfo->nr_blocks; - h->drv[drv_index].heads = drvinfo->heads; - h->drv[drv_index].sectors = drvinfo->sectors; - h->drv[drv_index].cylinders = drvinfo->cylinders; - h->drv[drv_index].raid_level = drvinfo->raid_level; - memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); - memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1); - memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1); - memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1); + if (h->drv[drv_index] == NULL) { + drvinfo->device_initialized = 0; + h->drv[drv_index] = drvinfo; + drvinfo = NULL; /* so it won't be freed below. */ + } else { + /* special case for cxd0 */ + h->drv[drv_index]->block_size = drvinfo->block_size; + h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; + h->drv[drv_index]->heads = drvinfo->heads; + h->drv[drv_index]->sectors = drvinfo->sectors; + h->drv[drv_index]->cylinders = drvinfo->cylinders; + h->drv[drv_index]->raid_level = drvinfo->raid_level; + memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); + memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, + VENDOR_LEN + 1); + memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); + memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); + } ++h->num_luns; disk = h->gendisk[drv_index]; - set_capacity(disk, h->drv[drv_index].nr_blocks); + set_capacity(disk, h->drv[drv_index]->nr_blocks); /* If it's not disk 0 (drv_index != 0) * or if it was disk 0, but there was previously @@ -1780,8 +1949,15 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) * (raid_leve == -1) then we want to update the * logical drive's information. */ - if (drv_index || first_time) - cciss_add_disk(h, disk, drv_index); + if (drv_index || first_time) { + if (cciss_add_disk(h, disk, drv_index) != 0) { + cciss_free_gendisk(h, drv_index); + cciss_free_drive_info(h, drv_index); + printk(KERN_WARNING "cciss:%d could not update " + "disk %d\n", h->ctlr, drv_index); + --h->num_luns; + } + } freeret: kfree(inq_buff); @@ -1793,28 +1969,70 @@ mem_msg: } /* This function will find the first index of the controllers drive array - * that has a -1 for the raid_level and will return that index. This is - * where new drives will be added. If the index to be returned is greater - * than the highest_lun index for the controller then highest_lun is set - * to this new index. If there are no available indexes then -1 is returned. - * "controller_node" is used to know if this is a real logical drive, or just - * the controller node, which determines if this counts towards highest_lun. + * that has a null drv pointer and allocate the drive info struct and + * will return that index This is where new drives will be added. + * If the index to be returned is greater than the highest_lun index for + * the controller then highest_lun is set * to this new index. + * If there are no available indexes or if tha allocation fails, then -1 + * is returned. * "controller_node" is used to know if this is a real + * logical drive, or just the controller node, which determines if this + * counts towards highest_lun. */ -static int cciss_find_free_drive_index(int ctlr, int controller_node) +static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) { int i; + drive_info_struct *drv; + /* Search for an empty slot for our drive info */ for (i = 0; i < CISS_MAX_LUN; i++) { - if (hba[ctlr]->drv[i].raid_level == -1) { - if (i > hba[ctlr]->highest_lun) - if (!controller_node) - hba[ctlr]->highest_lun = i; + + /* if not cxd0 case, and it's occupied, skip it. */ + if (h->drv[i] && i != 0) + continue; + /* + * If it's cxd0 case, and drv is alloc'ed already, and a + * disk is configured there, skip it. + */ + if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) + continue; + + /* + * We've found an empty slot. Update highest_lun + * provided this isn't just the fake cxd0 controller node. + */ + if (i > h->highest_lun && !controller_node) + h->highest_lun = i; + + /* If adding a real disk at cxd0, and it's already alloc'ed */ + if (i == 0 && h->drv[i] != NULL) return i; - } + + /* + * Found an empty slot, not already alloc'ed. Allocate it. + * Mark it with raid_level == -1, so we know it's new later on. + */ + drv = kzalloc(sizeof(*drv), GFP_KERNEL); + if (!drv) + return -1; + drv->raid_level = -1; /* so we know it's new */ + h->drv[i] = drv; + return i; } return -1; } +static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) +{ + kfree(h->drv[drv_index]); + h->drv[drv_index] = NULL; +} + +static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) +{ + put_disk(h->gendisk[drv_index]); + h->gendisk[drv_index] = NULL; +} + /* cciss_add_gendisk finds a free hba[]->drv structure * and allocates a gendisk if needed, and sets the lunid * in the drvinfo structure. It returns the index into @@ -1824,13 +2042,15 @@ static int cciss_find_free_drive_index(int ctlr, int controller_node) * a means to talk to the controller in case no logical * drives have yet been configured. */ -static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) +static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], + int controller_node) { int drv_index; - drv_index = cciss_find_free_drive_index(h->ctlr, controller_node); + drv_index = cciss_alloc_drive_info(h, controller_node); if (drv_index == -1) return -1; + /*Check if the gendisk needs to be allocated */ if (!h->gendisk[drv_index]) { h->gendisk[drv_index] = @@ -1839,23 +2059,24 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) printk(KERN_ERR "cciss%d: could not " "allocate a new disk %d\n", h->ctlr, drv_index); - return -1; + goto err_free_drive_info; } } - h->drv[drv_index].LunID = lunid; - if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index)) + memcpy(h->drv[drv_index]->LunID, lunid, + sizeof(h->drv[drv_index]->LunID)); + if (cciss_create_ld_sysfs_entry(h, drv_index)) goto err_free_disk; - /* Don't need to mark this busy because nobody */ /* else knows about this disk yet to contend */ /* for access to it. */ - h->drv[drv_index].busy_configuring = 0; + h->drv[drv_index]->busy_configuring = 0; wmb(); return drv_index; err_free_disk: - put_disk(h->gendisk[drv_index]); - h->gendisk[drv_index] = NULL; + cciss_free_gendisk(h, drv_index); +err_free_drive_info: + cciss_free_drive_info(h, drv_index); return -1; } @@ -1872,21 +2093,25 @@ static void cciss_add_controller_node(ctlr_info_t *h) if (h->gendisk[0] != NULL) /* already did this? Then bail. */ return; - drv_index = cciss_add_gendisk(h, 0, 1); - if (drv_index == -1) { - printk(KERN_WARNING "cciss%d: could not " - "add disk 0.\n", h->ctlr); - return; - } - h->drv[drv_index].block_size = 512; - h->drv[drv_index].nr_blocks = 0; - h->drv[drv_index].heads = 0; - h->drv[drv_index].sectors = 0; - h->drv[drv_index].cylinders = 0; - h->drv[drv_index].raid_level = -1; - memset(h->drv[drv_index].serial_no, 0, 16); + drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); + if (drv_index == -1) + goto error; + h->drv[drv_index]->block_size = 512; + h->drv[drv_index]->nr_blocks = 0; + h->drv[drv_index]->heads = 0; + h->drv[drv_index]->sectors = 0; + h->drv[drv_index]->cylinders = 0; + h->drv[drv_index]->raid_level = -1; + memset(h->drv[drv_index]->serial_no, 0, 16); disk = h->gendisk[drv_index]; - cciss_add_disk(h, disk, drv_index); + if (cciss_add_disk(h, disk, drv_index) == 0) + return; + cciss_free_gendisk(h, drv_index); + cciss_free_drive_info(h, drv_index); +error: + printk(KERN_WARNING "cciss%d: could not " + "add disk 0.\n", h->ctlr); + return; } /* This function will add and remove logical drives from the Logical @@ -1897,7 +2122,8 @@ static void cciss_add_controller_node(ctlr_info_t *h) * INPUT * h = The controller to perform the operations on */ -static int rebuild_lun_table(ctlr_info_t *h, int first_time) +static int rebuild_lun_table(ctlr_info_t *h, int first_time, + int via_ioctl) { int ctlr = h->ctlr; int num_luns; @@ -1907,7 +2133,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) int i; int drv_found; int drv_index = 0; - __u32 lunid = 0; + unsigned char lunid[8] = CTLR_LUNID; unsigned long flags; if (!capable(CAP_SYS_RAWIO)) @@ -1960,13 +2186,13 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) drv_found = 0; /* skip holes in the array from already deleted drives */ - if (h->drv[i].raid_level == -1) + if (h->drv[i] == NULL) continue; for (j = 0; j < num_luns; j++) { - memcpy(&lunid, &ld_buff->LUN[j][0], 4); - lunid = le32_to_cpu(lunid); - if (h->drv[i].LunID == lunid) { + memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); + if (memcmp(h->drv[i]->LunID, lunid, + sizeof(lunid)) == 0) { drv_found = 1; break; } @@ -1974,11 +2200,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) if (!drv_found) { /* Deregister it from the OS, it's gone. */ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); - h->drv[i].busy_configuring = 1; + h->drv[i]->busy_configuring = 1; spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - return_code = deregister_disk(h, i, 1); - cciss_destroy_ld_sysfs_entry(&h->drv[i]); - h->drv[i].busy_configuring = 0; + return_code = deregister_disk(h, i, 1, via_ioctl); + if (h->drv[i] != NULL) + h->drv[i]->busy_configuring = 0; } } @@ -1992,17 +2218,16 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) drv_found = 0; - memcpy(&lunid, &ld_buff->LUN[i][0], 4); - lunid = le32_to_cpu(lunid); - + memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); /* Find if the LUN is already in the drive array * of the driver. If so then update its info * if not in use. If it does not exist then find * the first free index and add it. */ for (j = 0; j <= h->highest_lun; j++) { - if (h->drv[j].raid_level != -1 && - h->drv[j].LunID == lunid) { + if (h->drv[j] != NULL && + memcmp(h->drv[j]->LunID, lunid, + sizeof(h->drv[j]->LunID)) == 0) { drv_index = j; drv_found = 1; break; @@ -2015,7 +2240,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) if (drv_index == -1) goto freeret; } - cciss_update_drive_info(ctlr, drv_index, first_time); + cciss_update_drive_info(ctlr, drv_index, first_time, + via_ioctl); } /* end for */ freeret: @@ -2032,6 +2258,25 @@ mem_msg: goto freeret; } +static void cciss_clear_drive_info(drive_info_struct *drive_info) +{ + /* zero out the disk size info */ + drive_info->nr_blocks = 0; + drive_info->block_size = 0; + drive_info->heads = 0; + drive_info->sectors = 0; + drive_info->cylinders = 0; + drive_info->raid_level = -1; + memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); + memset(drive_info->model, 0, sizeof(drive_info->model)); + memset(drive_info->rev, 0, sizeof(drive_info->rev)); + memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); + /* + * don't clear the LUNID though, we need to remember which + * one this one is. + */ +} + /* This function will deregister the disk and it's queue from the * kernel. It must be called with the controller lock held and the * drv structures busy_configuring flag set. It's parameters are: @@ -2046,43 +2291,48 @@ mem_msg: * the disk in preparation for re-adding it. In this case * the highest_lun should be left unchanged and the LunID * should not be cleared. + * via_ioctl + * This indicates whether we've reached this path via ioctl. + * This affects the maximum usage count allowed for c0d0 to be messed with. + * If this path is reached via ioctl(), then the max_usage_count will + * be 1, as the process calling ioctl() has got to have the device open. + * If we get here via sysfs, then the max usage count will be zero. */ static int deregister_disk(ctlr_info_t *h, int drv_index, - int clear_all) + int clear_all, int via_ioctl) { int i; struct gendisk *disk; drive_info_struct *drv; + int recalculate_highest_lun; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - drv = &h->drv[drv_index]; + drv = h->drv[drv_index]; disk = h->gendisk[drv_index]; /* make sure logical volume is NOT is use */ if (clear_all || (h->gendisk[0] == disk)) { - if (drv->usage_count > 1) + if (drv->usage_count > via_ioctl) return -EBUSY; } else if (drv->usage_count > 0) return -EBUSY; + recalculate_highest_lun = (drv == h->drv[h->highest_lun]); + /* invalidate the devices and deregister the disk. If it is disk * zero do not deregister it but just zero out it's values. This * allows us to delete disk zero but keep the controller registered. */ if (h->gendisk[0] != disk) { struct request_queue *q = disk->queue; - if (disk->flags & GENHD_FL_UP) + if (disk->flags & GENHD_FL_UP) { + cciss_destroy_ld_sysfs_entry(h, drv_index, 0); del_gendisk(disk); - if (q) { - blk_cleanup_queue(q); - /* Set drv->queue to NULL so that we do not try - * to call blk_start_queue on this queue in the - * interrupt handler - */ - drv->queue = NULL; } + if (q) + blk_cleanup_queue(q); /* If clear_all is set then we are deleting the logical * drive, not just refreshing its info. For drives * other than disk 0 we will call put_disk. We do not @@ -2105,34 +2355,20 @@ static int deregister_disk(ctlr_info_t *h, int drv_index, } } else { set_capacity(disk, 0); + cciss_clear_drive_info(drv); } --h->num_luns; - /* zero out the disk size info */ - drv->nr_blocks = 0; - drv->block_size = 0; - drv->heads = 0; - drv->sectors = 0; - drv->cylinders = 0; - drv->raid_level = -1; /* This can be used as a flag variable to - * indicate that this element of the drive - * array is free. - */ - - if (clear_all) { - /* check to see if it was the last disk */ - if (drv == h->drv + h->highest_lun) { - /* if so, find the new hightest lun */ - int i, newhighest = -1; - for (i = 0; i <= h->highest_lun; i++) { - /* if the disk has size > 0, it is available */ - if (h->drv[i].heads) - newhighest = i; - } - h->highest_lun = newhighest; - } - drv->LunID = 0; + /* if it was the last disk, find the new hightest lun */ + if (clear_all && recalculate_highest_lun) { + int i, newhighest = -1; + for (i = 0; i <= h->highest_lun; i++) { + /* if the disk has size > 0, it is available */ + if (h->drv[i] && h->drv[i]->heads) + newhighest = i; + } + h->highest_lun = newhighest; } return 0; } @@ -2479,8 +2715,6 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, } else { /* Get geometry failed */ printk(KERN_WARNING "cciss: reading geometry failed\n"); } - printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n", - drv->heads, drv->sectors, drv->cylinders); } static void @@ -2514,9 +2748,6 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, *total_size = 0; *block_size = BLOCK_SIZE; } - if (*total_size != 0) - printk(KERN_INFO " blocks= %llu block_size= %d\n", - (unsigned long long)*total_size+1, *block_size); kfree(buf); } @@ -2568,7 +2799,8 @@ static int cciss_revalidate(struct gendisk *disk) InquiryData_struct *inq_buff = NULL; for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { - if (h->drv[logvol].LunID == drv->LunID) { + if (memcmp(h->drv[logvol]->LunID, drv->LunID, + sizeof(drv->LunID)) == 0) { FOUND = 1; break; } @@ -3053,8 +3285,7 @@ static void do_cciss_request(struct request_queue *q) /* The first 2 bits are reserved for controller error reporting. */ c->Header.Tag.lower = (c->cmdindex << 3); c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ - c->Header.LUN.LogDev.VolId = drv->LunID; - c->Header.LUN.LogDev.Mode = 1; + memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); c->Request.CDBLen = 10; // 12 byte commands not in FW yet; c->Request.Type.Type = TYPE_CMD; // It is a command. c->Request.Type.Attribute = ATTR_SIMPLE; @@ -3232,20 +3463,121 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id) return IRQ_HANDLED; } +/** + * add_to_scan_list() - add controller to rescan queue + * @h: Pointer to the controller. + * + * Adds the controller to the rescan queue if not already on the queue. + * + * returns 1 if added to the queue, 0 if skipped (could be on the + * queue already, or the controller could be initializing or shutting + * down). + **/ +static int add_to_scan_list(struct ctlr_info *h) +{ + struct ctlr_info *test_h; + int found = 0; + int ret = 0; + + if (h->busy_initializing) + return 0; + + if (!mutex_trylock(&h->busy_shutting_down)) + return 0; + + mutex_lock(&scan_mutex); + list_for_each_entry(test_h, &scan_q, scan_list) { + if (test_h == h) { + found = 1; + break; + } + } + if (!found && !h->busy_scanning) { + INIT_COMPLETION(h->scan_wait); + list_add_tail(&h->scan_list, &scan_q); + ret = 1; + } + mutex_unlock(&scan_mutex); + mutex_unlock(&h->busy_shutting_down); + + return ret; +} + +/** + * remove_from_scan_list() - remove controller from rescan queue + * @h: Pointer to the controller. + * + * Removes the controller from the rescan queue if present. Blocks if + * the controller is currently conducting a rescan. + **/ +static void remove_from_scan_list(struct ctlr_info *h) +{ + struct ctlr_info *test_h, *tmp_h; + int scanning = 0; + + mutex_lock(&scan_mutex); + list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { + if (test_h == h) { + list_del(&h->scan_list); + complete_all(&h->scan_wait); + mutex_unlock(&scan_mutex); + return; + } + } + if (&h->busy_scanning) + scanning = 0; + mutex_unlock(&scan_mutex); + + if (scanning) + wait_for_completion(&h->scan_wait); +} + +/** + * scan_thread() - kernel thread used to rescan controllers + * @data: Ignored. + * + * A kernel thread used scan for drive topology changes on + * controllers. The thread processes only one controller at a time + * using a queue. Controllers are added to the queue using + * add_to_scan_list() and removed from the queue either after done + * processing or using remove_from_scan_list(). + * + * returns 0. + **/ static int scan_thread(void *data) { - ctlr_info_t *h = data; - int rc; - DECLARE_COMPLETION_ONSTACK(wait); - h->rescan_wait = &wait; + struct ctlr_info *h; - for (;;) { - rc = wait_for_completion_interruptible(&wait); + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); if (kthread_should_stop()) break; - if (!rc) - rebuild_lun_table(h, 0); + + while (1) { + mutex_lock(&scan_mutex); + if (list_empty(&scan_q)) { + mutex_unlock(&scan_mutex); + break; + } + + h = list_entry(scan_q.next, + struct ctlr_info, + scan_list); + list_del(&h->scan_list); + h->busy_scanning = 1; + mutex_unlock(&scan_mutex); + + if (h) { + rebuild_lun_table(h, 0, 0); + complete_all(&h->scan_wait); + mutex_lock(&scan_mutex); + h->busy_scanning = 0; + mutex_unlock(&scan_mutex); + } + } } + return 0; } @@ -3268,8 +3600,8 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) case REPORT_LUNS_CHANGED: printk(KERN_WARNING "cciss%d: report LUN data " "changed\n", h->ctlr); - if (h->rescan_wait) - complete(h->rescan_wait); + add_to_scan_list(h); + wake_up_process(cciss_scan_thread); return 1; break; case POWER_OR_RESET: @@ -3489,7 +3821,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) if (scratchpad == CCISS_FIRMWARE_READY) break; set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ / 10); /* wait 100ms */ + schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */ } if (scratchpad != CCISS_FIRMWARE_READY) { printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); @@ -3615,7 +3947,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) break; /* delay and try again */ set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(10); + schedule_timeout(msecs_to_jiffies(1)); } #ifdef CCISS_DEBUG @@ -3669,15 +4001,16 @@ Enomem: return -1; } -static void free_hba(int i) +static void free_hba(int n) { - ctlr_info_t *p = hba[i]; - int n; + ctlr_info_t *h = hba[n]; + int i; - hba[i] = NULL; - for (n = 0; n < CISS_MAX_LUN; n++) - put_disk(p->gendisk[n]); - kfree(p); + hba[n] = NULL; + for (i = 0; i < h->highest_lun + 1; i++) + if (h->gendisk[i] != NULL) + put_disk(h->gendisk[i]); + kfree(h); } /* Send a message CDB to the firmware. */ @@ -3918,6 +4251,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->busy_initializing = 1; INIT_HLIST_HEAD(&hba[i]->cmpQ); INIT_HLIST_HEAD(&hba[i]->reqQ); + mutex_init(&hba[i]->busy_shutting_down); if (cciss_pci_init(hba[i], pdev) != 0) goto clean0; @@ -3926,6 +4260,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->ctlr = i; hba[i]->pdev = pdev; + init_completion(&hba[i]->scan_wait); + if (cciss_create_hba_sysfs_entry(hba[i])) goto clean0; @@ -4001,8 +4337,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->num_luns = 0; hba[i]->highest_lun = -1; for (j = 0; j < CISS_MAX_LUN; j++) { - hba[i]->drv[j].raid_level = -1; - hba[i]->drv[j].queue = NULL; + hba[i]->drv[j] = NULL; hba[i]->gendisk[j] = NULL; } @@ -4035,14 +4370,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->cciss_max_sectors = 2048; + rebuild_lun_table(hba[i], 1, 0); hba[i]->busy_initializing = 0; - - rebuild_lun_table(hba[i], 1); - hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i], - "cciss_scan%02d", i); - if (IS_ERR(hba[i]->cciss_scan_thread)) - return PTR_ERR(hba[i]->cciss_scan_thread); - return 1; clean4: @@ -4063,12 +4392,7 @@ clean1: cciss_destroy_hba_sysfs_entry(hba[i]); clean0: hba[i]->busy_initializing = 0; - /* cleanup any queues that may have been initialized */ - for (j=0; j <= hba[i]->highest_lun; j++){ - drive_info_struct *drv = &(hba[i]->drv[j]); - if (drv->queue) - blk_cleanup_queue(drv->queue); - } + /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo @@ -4125,8 +4449,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) return; } - kthread_stop(hba[i]->cciss_scan_thread); + mutex_lock(&hba[i]->busy_shutting_down); + remove_from_scan_list(hba[i]); remove_proc_entry(hba[i]->devname, proc_cciss); unregister_blkdev(hba[i]->major, hba[i]->devname); @@ -4136,8 +4461,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) if (disk) { struct request_queue *q = disk->queue; - if (disk->flags & GENHD_FL_UP) + if (disk->flags & GENHD_FL_UP) { + cciss_destroy_ld_sysfs_entry(hba[i], j, 1); del_gendisk(disk); + } if (q) blk_cleanup_queue(q); } @@ -4170,6 +4497,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); cciss_destroy_hba_sysfs_entry(hba[i]); + mutex_unlock(&hba[i]->busy_shutting_down); free_hba(i); } @@ -4202,15 +4530,25 @@ static int __init cciss_init(void) if (err) return err; + /* Start the scan thread */ + cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); + if (IS_ERR(cciss_scan_thread)) { + err = PTR_ERR(cciss_scan_thread); + goto err_bus_unregister; + } + /* Register for our PCI devices */ err = pci_register_driver(&cciss_pci_driver); if (err) - goto err_bus_register; + goto err_thread_stop; - return 0; + return err; -err_bus_register: +err_thread_stop: + kthread_stop(cciss_scan_thread); +err_bus_unregister: bus_unregister(&cciss_bus_type); + return err; } @@ -4227,6 +4565,7 @@ static void __exit cciss_cleanup(void) cciss_remove_one(hba[i]->pdev); } } + kthread_stop(cciss_scan_thread); remove_proc_entry("driver/cciss", NULL); bus_unregister(&cciss_bus_type); } diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 06a5db25b29..31524cf42c7 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -2,6 +2,7 @@ #define CCISS_H #include <linux/genhd.h> +#include <linux/mutex.h> #include "cciss_cmd.h" @@ -29,7 +30,7 @@ struct access_method { }; typedef struct _drive_info_struct { - __u32 LunID; + unsigned char LunID[8]; int usage_count; struct request_queue *queue; sector_t nr_blocks; @@ -51,6 +52,7 @@ typedef struct _drive_info_struct char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ char model[MODEL_LEN + 1]; /* SCSI model string */ char rev[REV_LEN + 1]; /* SCSI revision string */ + char device_initialized; /* indicates whether dev is initialized */ } drive_info_struct; struct ctlr_info @@ -86,7 +88,7 @@ struct ctlr_info BYTE cciss_read_capacity; // information about each logical volume - drive_info_struct drv[CISS_MAX_LUN]; + drive_info_struct *drv[CISS_MAX_LUN]; struct access_method access; @@ -108,6 +110,8 @@ struct ctlr_info int nr_frees; int busy_configuring; int busy_initializing; + int busy_scanning; + struct mutex busy_shutting_down; /* This element holds the zero based queue number of the last * queue to be started. It is used for fairness. @@ -122,8 +126,8 @@ struct ctlr_info /* and saved for later processing */ #endif unsigned char alive; - struct completion *rescan_wait; - struct task_struct *cciss_scan_thread; + struct list_head scan_list; + struct completion scan_wait; struct device dev; }; diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index b82d438e260..6422651ec36 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -32,6 +32,7 @@ #include <linux/blkpg.h> #include <linux/timer.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/init.h> #include <linux/hdreg.h> #include <linux/spinlock.h> @@ -177,7 +178,6 @@ static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev); #ifdef CONFIG_PROC_FS static void ida_procinit(int i); -static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data); #else static void ida_procinit(int i) {} #endif @@ -206,6 +206,7 @@ static const struct block_device_operations ida_fops = { #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_array; +static const struct file_operations ida_proc_fops; /* * Get us a file in /proc/array that says something about each controller. @@ -218,19 +219,16 @@ static void __init ida_procinit(int i) if (!proc_array) return; } - create_proc_read_entry(hba[i]->devname, 0, proc_array, - ida_proc_get_info, hba[i]); + proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]); } /* * Report information about this controller. */ -static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) +static int ida_proc_show(struct seq_file *m, void *v) { - off_t pos = 0; - off_t len = 0; - int size, i, ctlr; - ctlr_info_t *h = (ctlr_info_t*)data; + int i, ctlr; + ctlr_info_t *h = (ctlr_info_t*)m->private; drv_info_t *drv; #ifdef CPQ_PROC_PRINT_QUEUES cmdlist_t *c; @@ -238,7 +236,7 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt #endif ctlr = h->ctlr; - size = sprintf(buffer, "%s: Compaq %s Controller\n" + seq_printf(m, "%s: Compaq %s Controller\n" " Board ID: 0x%08lx\n" " Firmware Revision: %c%c%c%c\n" " Controller Sig: 0x%08lx\n" @@ -258,55 +256,54 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt h->log_drives, h->phys_drives, h->Qdepth, h->maxQsinceinit); - pos += size; len += size; - - size = sprintf(buffer+len, "Logical Drive Info:\n"); - pos += size; len += size; + seq_puts(m, "Logical Drive Info:\n"); for(i=0; i<h->log_drives; i++) { drv = &h->drv[i]; - size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", + seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n", ctlr, i, drv->blk_size, drv->nr_blks); - pos += size; len += size; } #ifdef CPQ_PROC_PRINT_QUEUES spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); - size = sprintf(buffer+len, "\nCurrent Queues:\n"); - pos += size; len += size; + seq_puts(m, "\nCurrent Queues:\n"); c = h->reqQ; - size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; + seq_printf(m, "reqQ = %p", c); if (c) c=c->next; while(c && c != h->reqQ) { - size = sprintf(buffer+len, "->%p", c); - pos += size; len += size; + seq_printf(m, "->%p", c); c=c->next; } c = h->cmpQ; - size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; + seq_printf(m, "\ncmpQ = %p", c); if (c) c=c->next; while(c && c != h->cmpQ) { - size = sprintf(buffer+len, "->%p", c); - pos += size; len += size; + seq_printf(m, "->%p", c); c=c->next; } - size = sprintf(buffer+len, "\n"); pos += size; len += size; + seq_putc(m, '\n'); spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); #endif - size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", + seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n", h->nr_allocs, h->nr_frees); - pos += size; len += size; - - *eof = 1; - *start = buffer+offset; - len -= offset; - if (len>length) - len = length; - return len; + return 0; +} + +static int ida_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, ida_proc_show, PDE(inode)->data); } + +static const struct file_operations ida_proc_fops = { + .owner = THIS_MODULE, + .open = ida_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; #endif /* CONFIG_PROC_FS */ module_param_array(eisa, int, NULL, 0); diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index 60ab75104da..1c129211302 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -217,7 +217,7 @@ static const struct agp_bridge_driver parisc_agp_driver = { .configure = parisc_agp_configure, .fetch_size = parisc_agp_fetch_size, .tlb_flush = parisc_agp_tlbflush, - .mask_memory = parisc_agp_page_mask_memory, + .mask_memory = parisc_agp_mask_memory, .masks = parisc_agp_masks, .agp_enable = parisc_agp_enable, .cache_flush = global_cache_flush, diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index aaca40283be..4f568cb9af3 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -393,7 +393,7 @@ static int apm_open(struct inode * inode, struct file * filp) return as ? 0 : -ENOMEM; } -static struct file_operations apm_bios_fops = { +static const struct file_operations apm_bios_fops = { .owner = THIS_MODULE, .read = apm_read, .poll = apm_poll, diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c index e3dd24bff51..836d4f0a876 100644 --- a/drivers/char/bfin-otp.c +++ b/drivers/char/bfin-otp.c @@ -217,7 +217,7 @@ static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg) # define bfin_otp_ioctl NULL #endif -static struct file_operations bfin_otp_fops = { +static const struct file_operations bfin_otp_fops = { .owner = THIS_MODULE, .unlocked_ioctl = bfin_otp_ioctl, .read = bfin_otp_read, diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index df5038bbcbc..4254457d391 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -3354,7 +3354,7 @@ static int __init cy_detect_isa(void) continue; } #ifdef MODULE - if (isparam && irq[i]) + if (isparam && i < NR_CARDS && irq[i]) cy_isa_irq = irq[i]; else #endif diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 52e06589821..045c930e632 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -56,6 +56,7 @@ #include <linux/errno.h> /* for -EBUSY */ #include <linux/ioport.h> /* for request_region */ #include <linux/delay.h> /* for loops_per_jiffy */ +#include <linux/sched.h> #include <linux/smp_lock.h> /* cycle_kernel_lock() */ #include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */ #include <asm/uaccess.h> /* for get_user, etc. */ diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 41fc11dc921..65545de3dbf 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -36,6 +36,7 @@ #include <linux/errno.h> #include <asm/system.h> #include <linux/poll.h> +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/ipmi.h> diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 09050797c76..ec5e3f8df64 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -35,6 +35,7 @@ #include <linux/errno.h> #include <asm/system.h> #include <linux/poll.h> +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/slab.h> diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index 5942a9d674c..452370af95d 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c @@ -220,8 +220,7 @@ static inline int serial_paranoia_check(struct cyclades_port *info, char *name, return 1; } - if ((long)info < (long)(&cy_port[0]) - || (long)(&cy_port[NR_PORTS]) < (long)info) { + if (info < &cy_port[0] || info >= &cy_port[NR_PORTS]) { printk("Warning: cyclades_port out of range for (%s) in %s\n", name, routine); return 1; @@ -520,15 +519,13 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id) panic("TxInt on debug port!!!"); } #endif - - info = &cy_port[channel]; - /* validate the port number (as configured and open) */ if ((channel < 0) || (NR_PORTS <= channel)) { base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); base_addr[CyTEOIR] = CyNOTRANS; return IRQ_HANDLED; } + info = &cy_port[channel]; info->last_active = jiffies; if (info->tty == 0) { base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index aafdbaebc16..feb55075819 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c @@ -518,7 +518,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) static int tty_ldisc_halt(struct tty_struct *tty) { clear_bit(TTY_LDISC, &tty->flags); - return cancel_delayed_work(&tty->buf.work); + return cancel_delayed_work_sync(&tty->buf.work); } /** @@ -756,12 +756,9 @@ void tty_ldisc_hangup(struct tty_struct *tty) * N_TTY. */ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { - /* Make sure the old ldisc is quiescent */ - tty_ldisc_halt(tty); - flush_scheduled_work(); - /* Avoid racing set_ldisc or tty_ldisc_release */ mutex_lock(&tty->ldisc_mutex); + tty_ldisc_halt(tty); if (tty->ldisc) { /* Not yet closed */ /* Switch back to N_TTY */ tty_ldisc_reinit(tty); diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 29c651ab0d7..6b36ee56e6f 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -981,8 +981,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, goto eperm; if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg, - sizeof(struct vt_setactivate))) - return -EFAULT; + sizeof(struct vt_setactivate))) { + ret = -EFAULT; + goto out; + } if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) ret = -ENXIO; else { diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index f40ab699860..4846d50199f 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -559,7 +559,7 @@ static int hwicap_release(struct inode *inode, struct file *file) return status; } -static struct file_operations hwicap_fops = { +static const struct file_operations hwicap_fops = { .owner = THIS_MODULE, .write = hwicap_write, .read = hwicap_read, diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index abf4a2529f8..60697909ebd 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -227,7 +227,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) * cn_proc_mcast_ctl * @data: message sent from userspace via the connector */ -static void cn_proc_mcast_ctl(struct cn_msg *msg) +static void cn_proc_mcast_ctl(struct cn_msg *msg, + struct netlink_skb_parms *nsp) { enum proc_cn_mcast_op *mc_op = NULL; int err = 0; diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 4a1dfe1f4ba..210338ea222 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c @@ -78,18 +78,20 @@ void cn_queue_wrapper(struct work_struct *work) struct cn_callback_entry *cbq = container_of(work, struct cn_callback_entry, work); struct cn_callback_data *d = &cbq->data; + struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb)); + struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb); - d->callback(d->callback_priv); + d->callback(msg, nsp); - d->destruct_data(d->ddata); - d->ddata = NULL; + kfree_skb(d->skb); + d->skb = NULL; kfree(d->free); } static struct cn_callback_entry * cn_queue_alloc_callback_entry(char *name, struct cb_id *id, - void (*callback)(struct cn_msg *)) + void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) { struct cn_callback_entry *cbq; @@ -123,7 +125,7 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) } int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, - void (*callback)(struct cn_msg *)) + void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) { struct cn_callback_entry *cbq, *__cbq; int found = 0; diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index 74f52af7956..f06024668f9 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -129,21 +129,19 @@ EXPORT_SYMBOL_GPL(cn_netlink_send); /* * Callback helper - queues work and setup destructor for given data. */ -static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data) +static int cn_call_callback(struct sk_buff *skb) { struct cn_callback_entry *__cbq, *__new_cbq; struct cn_dev *dev = &cdev; + struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb)); int err = -ENODEV; spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { if (cn_cb_equal(&__cbq->id.id, &msg->id)) { if (likely(!work_pending(&__cbq->work) && - __cbq->data.ddata == NULL)) { - __cbq->data.callback_priv = msg; - - __cbq->data.ddata = data; - __cbq->data.destruct_data = destruct_data; + __cbq->data.skb == NULL)) { + __cbq->data.skb = skb; if (queue_cn_work(__cbq, &__cbq->work)) err = 0; @@ -156,10 +154,8 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); if (__new_cbq) { d = &__new_cbq->data; - d->callback_priv = msg; + d->skb = skb; d->callback = __cbq->data.callback; - d->ddata = data; - d->destruct_data = destruct_data; d->free = __new_cbq; __new_cbq->pdev = __cbq->pdev; @@ -191,7 +187,6 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v */ static void cn_rx_skb(struct sk_buff *__skb) { - struct cn_msg *msg; struct nlmsghdr *nlh; int err; struct sk_buff *skb; @@ -208,8 +203,7 @@ static void cn_rx_skb(struct sk_buff *__skb) return; } - msg = NLMSG_DATA(nlh); - err = cn_call_callback(msg, (void (*)(void *))kfree_skb, skb); + err = cn_call_callback(skb); if (err < 0) kfree_skb(skb); } @@ -270,7 +264,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event) * May sleep. */ int cn_add_callback(struct cb_id *id, char *name, - void (*callback)(struct cn_msg *)) + void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) { int err; struct cn_dev *dev = &cdev; @@ -352,7 +346,7 @@ static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2) * * Used for notification of a request's processing. */ -static void cn_callback(struct cn_msg *msg) +static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { struct cn_ctl_msg *ctl; struct cn_ctl_entry *ent; diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 4e551e63b6d..4f4ac82382f 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644); /* Lookup table for all possible MC control instances */ struct amd64_pvt; -static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; -static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; +static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; +static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; /* * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only @@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) /* Map from a CSROW entry to the mask entry that operates on it */ static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) { - return csrow >> (pvt->num_dcsm >> 3); + if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) + return csrow; + else + return csrow >> 1; } /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ @@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, intlv_en = pvt->dram_IntlvEn[0]; if (intlv_en == 0) { - for (node_id = 0; ; ) { + for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { if (amd64_base_limit_match(pvt, sys_addr, node_id)) - break; - - if (++node_id >= DRAM_REG_COUNT) - goto err_no_match; + goto found; } - goto found; + goto err_no_match; } - if (unlikely((intlv_en != (0x01 << 8)) && - (intlv_en != (0x03 << 8)) && - (intlv_en != (0x07 << 8)))) { + if (unlikely((intlv_en != 0x01) && + (intlv_en != 0x03) && + (intlv_en != 0x07))) { amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " "IntlvEn field of DRAM Base Register for node 0: " - "This probably indicates a BIOS bug.\n", intlv_en); + "this probably indicates a BIOS bug.\n", intlv_en); return NULL; } bits = (((u32) sys_addr) >> 12) & intlv_en; for (node_id = 0; ; ) { - if ((pvt->dram_limit[node_id] & intlv_en) == bits) + if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) break; /* intlv_sel field matches */ if (++node_id >= DRAM_REG_COUNT) @@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, /* sanity test for sys_addr */ if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { amd64_printk(KERN_WARNING, - "%s(): sys_addr 0x%lx falls outside base/limit " - "address range for node %d with node interleaving " - "enabled.\n", __func__, (unsigned long)sys_addr, - node_id); + "%s(): sys_addr 0x%llx falls outside base/limit " + "address range for node %d with node interleaving " + "enabled.\n", + __func__, sys_addr, node_id); return NULL; } @@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) * base/mask register pair, test the condition shown near the start of * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). */ - for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { + for (csrow = 0; csrow < pvt->cs_count; csrow++) { /* This DRAM chip select is disabled on this node */ if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) @@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, u64 base, mask; pvt = mci->pvt_info; - BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT)); + BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); base = base_from_dct_base(pvt, csrow); mask = mask_from_dct_mask(pvt, csrow); @@ -962,35 +962,27 @@ err_reg: */ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) { - if (pvt->ext_model >= OPTERON_CPU_REV_F) { + + if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) { + pvt->dcsb_base = REV_E_DCSB_BASE_BITS; + pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; + pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; + pvt->dcs_shift = REV_E_DCS_SHIFT; + pvt->cs_count = 8; + pvt->num_dcsm = 8; + } else { pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; - switch (boot_cpu_data.x86) { - case 0xf: - pvt->num_dcsm = REV_F_DCSM_COUNT; - break; - - case 0x10: - pvt->num_dcsm = F10_DCSM_COUNT; - break; - - case 0x11: - pvt->num_dcsm = F11_DCSM_COUNT; - break; - - default: - amd64_printk(KERN_ERR, "Unsupported family!\n"); - break; + if (boot_cpu_data.x86 == 0x11) { + pvt->cs_count = 4; + pvt->num_dcsm = 2; + } else { + pvt->cs_count = 8; + pvt->num_dcsm = 4; } - } else { - pvt->dcsb_base = REV_E_DCSB_BASE_BITS; - pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; - pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; - pvt->dcs_shift = REV_E_DCS_SHIFT; - pvt->num_dcsm = REV_E_DCSM_COUNT; } } @@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) amd64_set_dct_base_and_mask(pvt); - for (cs = 0; cs < CHIPSELECT_COUNT; cs++) { + for (cs = 0; cs < pvt->cs_count; cs++) { reg = K8_DCSB0 + (cs * 4); err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]); @@ -1130,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) debugf0("Reading K8_DRAM_BASE_LOW failed\n"); /* Extract parts into separate data entries */ - pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; + pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24; pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; pvt->dram_rw_en[dram] = (low & 0x3); @@ -1143,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) * Extract parts into separate data entries. Limit is the HIGHEST memory * location of the region, so lower 24 bits need to be all ones */ - pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; + pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF; pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; pvt->dram_DstNode[dram] = (low & 0x7); } @@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, * different from the node that detected the error. */ src_mci = find_mc_by_sys_addr(mci, SystemAddress); - if (src_mci) { + if (!src_mci) { amd64_mc_printk(mci, KERN_ERR, "failed to map error address 0x%lx to a node\n", (unsigned long)SystemAddress); @@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; - pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) | - ((u64) low_base & 0xFFFF0000))) << 8; + pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | + (((u64)low_base & 0xFFFF0000) << 24); low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); @@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) * Extract address values and form a LIMIT address. Limit is the HIGHEST * memory location of the region, so low 24 bits need to be all ones. */ - low_limit |= 0x0000FFFF; - pvt->dram_limit[dram] = - ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF); + pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | + (((u64) low_limit & 0xFFFF0000) << 24) | + 0x00FFFFFF; } static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) @@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); - for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { + for (csrow = 0; csrow < pvt->cs_count; csrow++) { cs_base = amd64_get_dct_base(pvt, cs, csrow); if (!(cs_base & K8_DCSB_CS_ENABLE)) @@ -2497,7 +2489,7 @@ err_reg: * NOTE: CPU Revision Dependent code * * Input: - * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1) + * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) * k8 private pointer to --> * DRAM Bank Address mapping register * node_id @@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" ); - for (i = 0; i < CHIPSELECT_COUNT; i++) { + for (i = 0; i < pvt->cs_count; i++) { csrow = &mci->csrows[i]; if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { @@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt) goto err_exit; ret = -ENOMEM; - mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id); + mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); if (!mci) goto err_exit; diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 8ea07e2715d..c6f359a8520 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -132,6 +132,8 @@ #define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ #define EDAC_MOD_STR "amd64_edac" +#define EDAC_MAX_NUMNODES 8 + /* Extended Model from CPUID, for CPU Revision numbers */ #define OPTERON_CPU_LE_REV_C 0 #define OPTERON_CPU_REV_D 1 @@ -142,7 +144,7 @@ #define OPTERON_CPU_REV_FA 5 /* Hardware limit on ChipSelect rows per MC and processors per system */ -#define CHIPSELECT_COUNT 8 +#define MAX_CS_COUNT 8 #define DRAM_REG_COUNT 8 @@ -193,7 +195,6 @@ */ #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) #define REV_E_DCS_SHIFT 4 -#define REV_E_DCSM_COUNT 8 #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) #define REV_F_F1Xh_DCS_SHIFT 8 @@ -204,9 +205,6 @@ */ #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) #define REV_F_DCS_SHIFT 8 -#define REV_F_DCSM_COUNT 4 -#define F10_DCSM_COUNT 4 -#define F11_DCSM_COUNT 2 /* DRAM CS Mask Registers */ #define K8_DCSM0 0x60 @@ -374,13 +372,11 @@ enum { #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ (BIT(((word) & 0xF) + 20) | \ - BIT(17) | \ - ((bits) & 0xF)) + BIT(17) | bits) #define SET_NB_DRAM_INJECTION_READ(word, bits) \ (BIT(((word) & 0xF) + 20) | \ - BIT(16) | \ - ((bits) & 0xF)) + BIT(16) | bits) #define K8_NBCAP 0xE8 #define K8_NBCAP_CORES (BIT(12)|BIT(13)) @@ -445,12 +441,12 @@ struct amd64_pvt { u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ - u32 dcsb0[CHIPSELECT_COUNT]; - u32 dcsb1[CHIPSELECT_COUNT]; + u32 dcsb0[MAX_CS_COUNT]; + u32 dcsb1[MAX_CS_COUNT]; /* DRAM CS Mask Registers F2x[1,0][6C:60] */ - u32 dcsm0[CHIPSELECT_COUNT]; - u32 dcsm1[CHIPSELECT_COUNT]; + u32 dcsm0[MAX_CS_COUNT]; + u32 dcsm1[MAX_CS_COUNT]; /* * Decoded parts of DRAM BASE and LIMIT Registers @@ -470,6 +466,7 @@ struct amd64_pvt { */ u32 dcsb_base; /* DCSB base bits */ u32 dcsm_mask; /* DCSM mask bits */ + u32 cs_count; /* num chip selects (== num DCSB registers) */ u32 num_dcsm; /* Number of DCSM registers */ u32 dcs_mask_notused; /* DCSM notused mask bits */ u32 dcs_shift; /* DCSB and DCSM shift value */ diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index d3675b76b3a..29f1f7a612d 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c @@ -1,5 +1,11 @@ #include "amd64_edac.h" +static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) +{ + struct amd64_pvt *pvt = mci->pvt_info; + return sprintf(buf, "0x%x\n", pvt->injection.section); +} + /* * store error injection section value which refers to one of 4 16-byte sections * within a 64-byte cacheline @@ -15,12 +21,26 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, ret = strict_strtoul(data, 10, &value); if (ret != -EINVAL) { + + if (value > 3) { + amd64_printk(KERN_WARNING, + "%s: invalid section 0x%lx\n", + __func__, value); + return -EINVAL; + } + pvt->injection.section = (u32) value; return count; } return ret; } +static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) +{ + struct amd64_pvt *pvt = mci->pvt_info; + return sprintf(buf, "0x%x\n", pvt->injection.word); +} + /* * store error injection word value which refers to one of 9 16-bit word of the * 16-byte (128-bit + ECC bits) section @@ -37,14 +57,25 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, ret = strict_strtoul(data, 10, &value); if (ret != -EINVAL) { - value = (value <= 8) ? value : 0; - pvt->injection.word = (u32) value; + if (value > 8) { + amd64_printk(KERN_WARNING, + "%s: invalid word 0x%lx\n", + __func__, value); + return -EINVAL; + } + pvt->injection.word = (u32) value; return count; } return ret; } +static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) +{ + struct amd64_pvt *pvt = mci->pvt_info; + return sprintf(buf, "0x%x\n", pvt->injection.bit_map); +} + /* * store 16 bit error injection vector which enables injecting errors to the * corresponding bit within the error injection word above. When used during a @@ -60,8 +91,14 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, ret = strict_strtoul(data, 16, &value); if (ret != -EINVAL) { - pvt->injection.bit_map = (u32) value & 0xFFFF; + if (value & 0xFFFF0000) { + amd64_printk(KERN_WARNING, + "%s: invalid EccVector: 0x%lx\n", + __func__, value); + return -EINVAL; + } + pvt->injection.bit_map = (u32) value; return count; } return ret; @@ -147,7 +184,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { .name = "inject_section", .mode = (S_IRUGO | S_IWUSR) }, - .show = NULL, + .show = amd64_inject_section_show, .store = amd64_inject_section_store, }, { @@ -155,7 +192,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { .name = "inject_word", .mode = (S_IRUGO | S_IWUSR) }, - .show = NULL, + .show = amd64_inject_word_show, .store = amd64_inject_word_store, }, { @@ -163,7 +200,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { .name = "inject_ecc_vector", .mode = (S_IRUGO | S_IWUSR) }, - .show = NULL, + .show = amd64_inject_ecc_vector_show, .store = amd64_inject_ecc_vector_store, }, { diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index ced186d7e9a..5089331544e 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -33,6 +33,7 @@ #include <linux/mutex.h> #include <linux/poll.h> #include <linux/preempt.h> +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/time.h> #include <linux/uaccess.h> diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 420a96e7f2d..051d1ebbd28 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -939,7 +939,7 @@ static int __init ibft_init(void) if (ibft_addr) { printk(KERN_INFO "iBFT detected at 0x%llx.\n", - (u64)virt_to_phys((void *)ibft_addr)); + (u64)isa_virt_to_bus(ibft_addr)); rc = ibft_check_device(); if (rc) diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index d53fbbfefa3..dfb15c06c88 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -65,10 +65,10 @@ void __init reserve_ibft_region(void) * so skip that area */ if (pos == VGA_MEM) pos += VGA_SIZE; - virt = phys_to_virt(pos); + virt = isa_bus_to_virt(pos); if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { unsigned long *addr = - (unsigned long *)phys_to_virt(pos + 4); + (unsigned long *)isa_bus_to_virt(pos + 4); len = *addr; /* if the length of the table extends past 1M, * the table cannot be valid. */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index bb11a429394..662ed923d9e 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1487,7 +1487,7 @@ static int gpiolib_open(struct inode *inode, struct file *file) return single_open(file, gpiolib_show, NULL); } -static struct file_operations gpiolib_operations = { +static const struct file_operations gpiolib_operations = { .open = gpiolib_open, .read = seq_read, .llseek = seq_lseek, diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 8e7b0ebece0..5cae0b3eee9 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1556,8 +1556,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, struct drm_crtc *crtc; int ret = 0; - DRM_DEBUG_KMS("\n"); - if (!req->flags) { DRM_ERROR("no operation set\n"); return -EINVAL; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 819ddcbfcce..23dc9c115fd 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -454,6 +454,96 @@ out_free: } EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); +static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, u16 regno, struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_framebuffer *fb = fb_helper->fb; + int pindex; + + pindex = regno; + + if (fb->bits_per_pixel == 16) { + pindex = regno << 3; + + if (fb->depth == 16 && regno > 63) + return; + if (fb->depth == 15 && regno > 31) + return; + + if (fb->depth == 16) { + u16 r, g, b; + int i; + if (regno < 32) { + for (i = 0; i < 8; i++) + fb_helper->funcs->gamma_set(crtc, red, + green, blue, pindex + i); + } + + fb_helper->funcs->gamma_get(crtc, &r, + &g, &b, + pindex >> 1); + + for (i = 0; i < 4; i++) + fb_helper->funcs->gamma_set(crtc, r, + green, b, + (pindex >> 1) + i); + } + } + + if (fb->depth != 16) + fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); + + if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) { + ((u32 *) fb->pseudo_palette)[regno] = + (regno << info->var.red.offset) | + (regno << info->var.green.offset) | + (regno << info->var.blue.offset); + } +} + +int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + u16 *red, *green, *blue, *transp; + struct drm_crtc *crtc; + int i, rc = 0; + int start; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + for (i = 0; i < fb_helper->crtc_count; i++) { + if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) + break; + } + if (i == fb_helper->crtc_count) + continue; + + red = cmap->red; + green = cmap->green; + blue = cmap->blue; + transp = cmap->transp; + start = cmap->start; + + for (i = 0; i < cmap->len; i++) { + u16 hred, hgreen, hblue, htransp = 0xffff; + + hred = *red++; + hgreen = *green++; + hblue = *blue++; + + if (transp) + htransp = *transp++; + + setcolreg(crtc, hred, hgreen, hblue, start++, info); + } + crtc_funcs->load_lut(crtc); + } + return rc; +} +EXPORT_SYMBOL(drm_fb_helper_setcmap); + int drm_fb_helper_setcolreg(unsigned regno, unsigned red, unsigned green, @@ -466,9 +556,11 @@ int drm_fb_helper_setcolreg(unsigned regno, struct drm_crtc *crtc; int i; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_framebuffer *fb = fb_helper->fb; + if (regno > 255) + return 1; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; for (i = 0; i < fb_helper->crtc_count; i++) { if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) break; @@ -476,35 +568,9 @@ int drm_fb_helper_setcolreg(unsigned regno, if (i == fb_helper->crtc_count) continue; - if (regno > 255) - return 1; - - if (fb->depth == 8) { - fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); - return 0; - } - if (regno < 16) { - switch (fb->depth) { - case 15: - fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | - ((green & 0xf800) >> 6) | - ((blue & 0xf800) >> 11); - break; - case 16: - fb->pseudo_palette[regno] = (red & 0xf800) | - ((green & 0xfc00) >> 5) | - ((blue & 0xf800) >> 11); - break; - case 24: - case 32: - fb->pseudo_palette[regno] = - (((red >> 8) & 0xff) << info->var.red.offset) | - (((green >> 8) & 0xff) << info->var.green.offset) | - (((blue >> 8) & 0xff) << info->var.blue.offset); - break; - } - } + setcolreg(crtc, red, green, blue, regno, info); + crtc_funcs->load_lut(crtc); } return 0; } @@ -674,6 +740,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, EXPORT_SYMBOL(drm_fb_helper_pan_display); int drm_fb_helper_single_fb_probe(struct drm_device *dev, + int preferred_bpp, int (*fb_create)(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height, @@ -696,6 +763,11 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, struct drm_fb_helper *fb_helper; uint32_t surface_depth = 24, surface_bpp = 32; + /* if driver picks 8 or 16 by default use that + for both depth/bpp */ + if (preferred_bpp != surface_bpp) { + surface_depth = surface_bpp = preferred_bpp; + } /* first up get a count of crtcs now in use and new min/maxes width/heights */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; @@ -851,10 +923,12 @@ void drm_fb_helper_free(struct drm_fb_helper *helper) } EXPORT_SYMBOL(drm_fb_helper_free); -void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch) +void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth) { info->fix.type = FB_TYPE_PACKED_PIXELS; - info->fix.visual = FB_VISUAL_TRUECOLOR; + info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : + FB_VISUAL_DIRECTCOLOR; info->fix.type_aux = 0; info->fix.xpanstep = 1; /* doing it in hw */ info->fix.ypanstep = 1; /* doing it in hw */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 93ff6c03733..ffa39671751 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3244,6 +3244,16 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, intel_crtc->lut_b[regno] = blue >> 8; } +void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + *red = intel_crtc->lut_r[regno] << 8; + *green = intel_crtc->lut_g[regno] << 8; + *blue = intel_crtc->lut_b[regno] << 8; +} + static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size) { @@ -3835,6 +3845,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = { .mode_set_base = intel_pipe_set_base, .prepare = intel_crtc_prepare, .commit = intel_crtc_commit, + .load_lut = intel_crtc_load_lut, }; static const struct drm_crtc_funcs intel_crtc_funcs = { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8aa4b7f30da..ef61fe9507e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -175,6 +175,8 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); extern void intelfb_restore(void); extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); +extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno); extern int intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index e85d7e9eed7..2b0fe54cd92 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -60,10 +60,12 @@ static struct fb_ops intelfb_ops = { .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, }; static struct drm_fb_helper_funcs intel_fb_helper_funcs = { .gamma_set = intel_crtc_fb_gamma_set, + .gamma_get = intel_crtc_fb_gamma_get, }; @@ -123,6 +125,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, struct device *device = &dev->pdev->dev; int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; + /* we don't do packed 24bpp */ + if (surface_bpp == 24) + surface_bpp = 32; + mode_cmd.width = surface_width; mode_cmd.height = surface_height; @@ -206,7 +212,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, // memset(info->screen_base, 0, size); - drm_fb_helper_fill_fix(info, fb->pitch); + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, fb, fb_width, fb_height); /* FIXME: we really shouldn't expose mmio space at all */ @@ -244,7 +250,7 @@ int intelfb_probe(struct drm_device *dev) int ret; DRM_DEBUG("\n"); - ret = drm_fb_helper_single_fb_probe(dev, intelfb_create); + ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); return ret; } EXPORT_SYMBOL(intelfb_probe); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 6a015929dee..14fa9701aeb 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -733,6 +733,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { .mode_set_base = atombios_crtc_set_base, .prepare = atombios_crtc_prepare, .commit = atombios_crtc_commit, + .load_lut = radeon_crtc_load_lut, }; void radeon_atombios_init_crtc(struct drm_device *dev, diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e6cce24de80..161094c07d9 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -32,6 +32,9 @@ #include "radeon_reg.h" #include "radeon.h" #include "r100d.h" +#include "rs100d.h" +#include "rv200d.h" +#include "rv250d.h" #include <linux/firmware.h> #include <linux/platform_device.h> @@ -60,18 +63,7 @@ MODULE_FIRMWARE(FIRMWARE_R520); /* This files gather functions specifics to: * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 - * - * Some of these functions might be used by newer ASICs. */ -int r200_init(struct radeon_device *rdev); -void r100_hdp_reset(struct radeon_device *rdev); -void r100_gpu_init(struct radeon_device *rdev); -int r100_gui_wait_for_idle(struct radeon_device *rdev); -int r100_mc_wait_for_idle(struct radeon_device *rdev); -void r100_gpu_wait_for_vsync(struct radeon_device *rdev); -void r100_gpu_wait_for_vsync2(struct radeon_device *rdev); -int r100_debugfs_mc_info_init(struct radeon_device *rdev); - /* * PCI GART @@ -152,136 +144,6 @@ void r100_pci_gart_fini(struct radeon_device *rdev) radeon_gart_fini(rdev); } - -/* - * MC - */ -void r100_mc_disable_clients(struct radeon_device *rdev) -{ - uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl; - - /* FIXME: is this function correct for rs100,rs200,rs300 ? */ - if (r100_gui_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait GUI idle while " - "programming pipes. Bad things might happen.\n"); - } - - /* stop display and memory access */ - ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL); - WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE); - crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); - WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS); - crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); - - r100_gpu_wait_for_vsync(rdev); - - WREG32(RADEON_CRTC_GEN_CNTL, - (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) | - RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN); - - if (!(rdev->flags & RADEON_SINGLE_CRTC)) { - crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); - - r100_gpu_wait_for_vsync2(rdev); - WREG32(RADEON_CRTC2_GEN_CNTL, - (crtc2_gen_cntl & - ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) | - RADEON_CRTC2_DISP_REQ_EN_B); - } - - udelay(500); -} - -void r100_mc_setup(struct radeon_device *rdev) -{ - uint32_t tmp; - int r; - - r = r100_debugfs_mc_info_init(rdev); - if (r) { - DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); - } - /* Write VRAM size in case we are limiting it */ - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); - /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, - * if the aperture is 64MB but we have 32MB VRAM - * we report only 32MB VRAM but we have to set MC_FB_LOCATION - * to 64MB, otherwise the gpu accidentially dies */ - tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; - tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); - tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); - WREG32(RADEON_MC_FB_LOCATION, tmp); - - /* Enable bus mastering */ - tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; - WREG32(RADEON_BUS_CNTL, tmp); - - if (rdev->flags & RADEON_IS_AGP) { - tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; - tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16); - tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16); - WREG32(RADEON_MC_AGP_LOCATION, tmp); - WREG32(RADEON_AGP_BASE, rdev->mc.agp_base); - } else { - WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF); - WREG32(RADEON_AGP_BASE, 0); - } - - tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; - tmp |= (7 << 28); - WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); - (void)RREG32(RADEON_HOST_PATH_CNTL); - WREG32(RADEON_HOST_PATH_CNTL, tmp); - (void)RREG32(RADEON_HOST_PATH_CNTL); -} - -int r100_mc_init(struct radeon_device *rdev) -{ - int r; - - if (r100_debugfs_rbbm_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for RBBM !\n"); - } - - r100_gpu_init(rdev); - /* Disable gart which also disable out of gart access */ - r100_pci_gart_disable(rdev); - - /* Setup GPU memory space */ - rdev->mc.gtt_location = 0xFFFFFFFFUL; - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); - if (r) { - printk(KERN_WARNING "[drm] Disabling AGP\n"); - rdev->flags &= ~RADEON_IS_AGP; - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; - } else { - rdev->mc.gtt_location = rdev->mc.agp_base; - } - } - r = radeon_mc_setup(rdev); - if (r) { - return r; - } - - r100_mc_disable_clients(rdev); - if (r100_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); - } - - r100_mc_setup(rdev); - return 0; -} - -void r100_mc_fini(struct radeon_device *rdev) -{ -} - - -/* - * Interrupts - */ int r100_irq_set(struct radeon_device *rdev) { uint32_t tmp = 0; @@ -358,10 +220,6 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) return RREG32(RADEON_CRTC2_CRNT_FRAME); } - -/* - * Fence emission - */ void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -377,10 +235,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, RADEON_SW_INT_FIRE); } - -/* - * Writeback - */ int r100_wb_init(struct radeon_device *rdev) { int r; @@ -504,10 +358,6 @@ int r100_copy_blit(struct radeon_device *rdev, return r; } - -/* - * CP - */ static int r100_cp_wait_for_idle(struct radeon_device *rdev) { unsigned i; @@ -612,6 +462,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) } return err; } + static void r100_cp_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data; @@ -978,7 +829,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 5); - reg = header >> 2; + reg = CP_PACKET0_GET_REG(header); mutex_lock(&p->rdev->ddev->mode_config.mutex); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { @@ -1990,7 +1841,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) r100_pll_errata_after_data(rdev); } -int r100_init(struct radeon_device *rdev) +void r100_set_safe_registers(struct radeon_device *rdev) { if (ASIC_IS_RN50(rdev)) { rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; @@ -1999,9 +1850,8 @@ int r100_init(struct radeon_device *rdev) rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); } else { - return r200_init(rdev); + r200_set_safe_registers(rdev); } - return 0; } /* @@ -2299,9 +2149,11 @@ void r100_bandwidth_update(struct radeon_device *rdev) mode1 = &rdev->mode_info.crtcs[0]->base.mode; pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; } - if (rdev->mode_info.crtcs[1]->base.enabled) { - mode2 = &rdev->mode_info.crtcs[1]->base.mode; - pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; + if (!(rdev->flags & RADEON_SINGLE_CRTC)) { + if (rdev->mode_info.crtcs[1]->base.enabled) { + mode2 = &rdev->mode_info.crtcs[1]->base.mode; + pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; + } } min_mem_eff.full = rfixed_const_8(0); @@ -3114,7 +2966,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) WREG32(R_000740_CP_CSQ_CNTL, 0); /* Save few CRTC registers */ - save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); + save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); @@ -3124,7 +2976,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) } /* Disable VGA aperture access */ - WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); + WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); /* Disable cursor, overlay, crtc */ WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | @@ -3156,10 +3008,264 @@ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) rdev->mc.vram_location); } /* Restore CRTC registers */ - WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); + WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); if (!(rdev->flags & RADEON_SINGLE_CRTC)) { WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); } } + +void r100_vga_render_disable(struct radeon_device *rdev) +{ + u32 tmp; + + tmp = RREG8(R_0003C2_GENMO_WT); + WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); +} + +static void r100_debugfs(struct radeon_device *rdev) +{ + int r; + + r = r100_debugfs_mc_info_init(rdev); + if (r) + dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); +} + +static void r100_mc_program(struct radeon_device *rdev) +{ + struct r100_mc_save save; + + /* Stops all mc clients */ + r100_mc_stop(rdev, &save); + if (rdev->flags & RADEON_IS_AGP) { + WREG32(R_00014C_MC_AGP_LOCATION, + S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | + S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); + WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); + if (rdev->family > CHIP_RV200) + WREG32(R_00015C_AGP_BASE_2, + upper_32_bits(rdev->mc.agp_base) & 0xff); + } else { + WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); + WREG32(R_000170_AGP_BASE, 0); + if (rdev->family > CHIP_RV200) + WREG32(R_00015C_AGP_BASE_2, 0); + } + /* Wait for mc idle */ + if (r100_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); + /* Program MC, should be a 32bits limited address space */ + WREG32(R_000148_MC_FB_LOCATION, + S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | + S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); + r100_mc_resume(rdev, &save); +} + +void r100_clock_startup(struct radeon_device *rdev) +{ + u32 tmp; + + if (radeon_dynclks != -1 && radeon_dynclks) + radeon_legacy_set_clock_gating(rdev, 1); + /* We need to force on some of the block */ + tmp = RREG32_PLL(R_00000D_SCLK_CNTL); + tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); + if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) + tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); + WREG32_PLL(R_00000D_SCLK_CNTL, tmp); +} + +static int r100_startup(struct radeon_device *rdev) +{ + int r; + + r100_mc_program(rdev); + /* Resume clock */ + r100_clock_startup(rdev); + /* Initialize GPU configuration (# pipes, ...) */ + r100_gpu_init(rdev); + /* Initialize GART (initialize after TTM so we can allocate + * memory through TTM but finalize after TTM) */ + if (rdev->flags & RADEON_IS_PCI) { + r = r100_pci_gart_enable(rdev); + if (r) + return r; + } + /* Enable IRQ */ + rdev->irq.sw_int = true; + r100_irq_set(rdev); + /* 1M ring buffer */ + r = r100_cp_init(rdev, 1024 * 1024); + if (r) { + dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + return r; + } + r = r100_wb_init(rdev); + if (r) + dev_err(rdev->dev, "failled initializing WB (%d).\n", r); + r = r100_ib_init(rdev); + if (r) { + dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + return r; + } + return 0; +} + +int r100_resume(struct radeon_device *rdev) +{ + /* Make sur GART are not working */ + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_disable(rdev); + /* Resume clock before doing reset */ + r100_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* post */ + radeon_combios_asic_init(rdev->ddev); + /* Resume clock after posting */ + r100_clock_startup(rdev); + return r100_startup(rdev); +} + +int r100_suspend(struct radeon_device *rdev) +{ + r100_cp_disable(rdev); + r100_wb_disable(rdev); + r100_irq_disable(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_disable(rdev); + return 0; +} + +void r100_fini(struct radeon_device *rdev) +{ + r100_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + radeon_gem_fini(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + radeon_fence_driver_fini(rdev); + radeon_object_fini(rdev); + radeon_atombios_fini(rdev); + kfree(rdev->bios); + rdev->bios = NULL; +} + +int r100_mc_init(struct radeon_device *rdev) +{ + int r; + u32 tmp; + + /* Setup GPU memory space */ + rdev->mc.vram_location = 0xFFFFFFFFUL; + rdev->mc.gtt_location = 0xFFFFFFFFUL; + if (rdev->flags & RADEON_IS_IGP) { + tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); + rdev->mc.vram_location = tmp << 16; + } + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) { + printk(KERN_WARNING "[drm] Disabling AGP\n"); + rdev->flags &= ~RADEON_IS_AGP; + rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; + } else { + rdev->mc.gtt_location = rdev->mc.agp_base; + } + } + r = radeon_mc_setup(rdev); + if (r) + return r; + return 0; +} + +int r100_init(struct radeon_device *rdev) +{ + int r; + + /* Register debugfs file specific to this group of asics */ + r100_debugfs(rdev); + /* Disable VGA */ + r100_vga_render_disable(rdev); + /* Initialize scratch registers */ + radeon_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); + /* TODO: disable VGA need to use VGA request */ + /* BIOS*/ + if (!radeon_get_bios(rdev)) { + if (ASIC_IS_AVIVO(rdev)) + return -EINVAL; + } + if (rdev->is_atom_bios) { + dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); + return -EINVAL; + } else { + r = radeon_combios_init(rdev); + if (r) + return r; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* check if cards are posted or not */ + if (!radeon_card_posted(rdev) && rdev->bios) { + DRM_INFO("GPU not posted. posting now...\n"); + radeon_combios_asic_init(rdev->ddev); + } + /* Set asic errata */ + r100_errata(rdev); + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); + /* Get vram informations */ + r100_vram_info(rdev); + /* Initialize memory controller (also test AGP) */ + r = r100_mc_init(rdev); + if (r) + return r; + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + /* Memory manager */ + r = radeon_object_init(rdev); + if (r) + return r; + if (rdev->flags & RADEON_IS_PCI) { + r = r100_pci_gart_init(rdev); + if (r) + return r; + } + r100_set_safe_registers(rdev); + rdev->accel_working = true; + r = r100_startup(rdev); + if (r) { + /* Somethings want wront with the accel init stop accel */ + dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r100_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + rdev->accel_working = false; + } + return 0; +} diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index c4b257ec920..df29a630c46 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h @@ -381,6 +381,24 @@ #define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) #define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) #define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF +#define R_000148_MC_FB_LOCATION 0x000148 +#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) +#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) +#define C_000148_MC_FB_START 0xFFFF0000 +#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) +#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_000148_MC_FB_TOP 0x0000FFFF +#define R_00014C_MC_AGP_LOCATION 0x00014C +#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0) +#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) +#define C_00014C_MC_AGP_START 0xFFFF0000 +#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) +#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_00014C_MC_AGP_TOP 0x0000FFFF +#define R_000170_AGP_BASE 0x000170 +#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) +#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_000170_AGP_BASE_ADDR 0x00000000 #define R_00023C_DISPLAY_BASE_ADDR 0x00023C #define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) #define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) @@ -403,25 +421,25 @@ #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) #define C_000360_CUR2_LOCK 0x7FFFFFFF -#define R_0003C0_GENMO_WT 0x0003C0 -#define S_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) -#define G_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) -#define C_0003C0_GENMO_MONO_ADDRESS_B 0xFFFFFFFE -#define S_0003C0_VGA_RAM_EN(x) (((x) & 0x1) << 1) -#define G_0003C0_VGA_RAM_EN(x) (((x) >> 1) & 0x1) -#define C_0003C0_VGA_RAM_EN 0xFFFFFFFD -#define S_0003C0_VGA_CKSEL(x) (((x) & 0x3) << 2) -#define G_0003C0_VGA_CKSEL(x) (((x) >> 2) & 0x3) -#define C_0003C0_VGA_CKSEL 0xFFFFFFF3 -#define S_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) -#define G_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) -#define C_0003C0_ODD_EVEN_MD_PGSEL 0xFFFFFFDF -#define S_0003C0_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) -#define G_0003C0_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) -#define C_0003C0_VGA_HSYNC_POL 0xFFFFFFBF -#define S_0003C0_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) -#define G_0003C0_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) -#define C_0003C0_VGA_VSYNC_POL 0xFFFFFF7F +#define R_0003C2_GENMO_WT 0x0003C0 +#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) +#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) +#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE +#define S_0003C2_VGA_RAM_EN(x) (((x) & 0x1) << 1) +#define G_0003C2_VGA_RAM_EN(x) (((x) >> 1) & 0x1) +#define C_0003C2_VGA_RAM_EN 0xFD +#define S_0003C2_VGA_CKSEL(x) (((x) & 0x3) << 2) +#define G_0003C2_VGA_CKSEL(x) (((x) >> 2) & 0x3) +#define C_0003C2_VGA_CKSEL 0xF3 +#define S_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) +#define G_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) +#define C_0003C2_ODD_EVEN_MD_PGSEL 0xDF +#define S_0003C2_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) +#define G_0003C2_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) +#define C_0003C2_VGA_HSYNC_POL 0xBF +#define S_0003C2_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) +#define G_0003C2_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) +#define C_0003C2_VGA_VSYNC_POL 0x7F #define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 #define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) #define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) @@ -545,6 +563,46 @@ #define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) #define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) #define C_000774_SCRATCH_ADDR 0x0000001F +#define R_0007C0_CP_STAT 0x0007C0 +#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) +#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) +#define C_0007C0_MRU_BUSY 0xFFFFFFFE +#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) +#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) +#define C_0007C0_MWU_BUSY 0xFFFFFFFD +#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) +#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) +#define C_0007C0_RSIU_BUSY 0xFFFFFFFB +#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) +#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) +#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 +#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) +#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) +#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF +#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) +#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) +#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF +#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) +#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) +#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF +#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) +#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) +#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF +#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) +#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) +#define C_0007C0_CSI_BUSY 0xFFFFDFFF +#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) +#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) +#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF +#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) +#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) +#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF +#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) +#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) +#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF +#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) +#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) +#define C_0007C0_CP_BUSY 0x7FFFFFFF #define R_000E40_RBBM_STATUS 0x000E40 #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) @@ -604,4 +662,53 @@ #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) #define C_000E40_GUI_ACTIVE 0x7FFFFFFF + +#define R_00000D_SCLK_CNTL 0x00000D +#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) +#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) +#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 +#define S_00000D_TCLK_SRC_SEL(x) (((x) & 0x7) << 8) +#define G_00000D_TCLK_SRC_SEL(x) (((x) >> 8) & 0x7) +#define C_00000D_TCLK_SRC_SEL 0xFFFFF8FF +#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) +#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) +#define C_00000D_FORCE_CP 0xFFFEFFFF +#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) +#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) +#define C_00000D_FORCE_HDP 0xFFFDFFFF +#define S_00000D_FORCE_DISP(x) (((x) & 0x1) << 18) +#define G_00000D_FORCE_DISP(x) (((x) >> 18) & 0x1) +#define C_00000D_FORCE_DISP 0xFFFBFFFF +#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) +#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) +#define C_00000D_FORCE_TOP 0xFFF7FFFF +#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) +#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) +#define C_00000D_FORCE_E2 0xFFEFFFFF +#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) +#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) +#define C_00000D_FORCE_SE 0xFFDFFFFF +#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) +#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) +#define C_00000D_FORCE_IDCT 0xFFBFFFFF +#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) +#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) +#define C_00000D_FORCE_VIP 0xFF7FFFFF +#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) +#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) +#define C_00000D_FORCE_RE 0xFEFFFFFF +#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) +#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) +#define C_00000D_FORCE_PB 0xFDFFFFFF +#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) +#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) +#define C_00000D_FORCE_TAM 0xFBFFFFFF +#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) +#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) +#define C_00000D_FORCE_TDM 0xF7FFFFFF +#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) +#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) +#define C_00000D_FORCE_RB 0xEFFFFFFF + + #endif diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index cf7fea5ff2e..eb740fc3549 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -447,9 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, return 0; } -int r200_init(struct radeon_device *rdev) +void r200_set_safe_registers(struct radeon_device *rdev) { rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); - return 0; } diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 1ebea8cc8c9..e08c4a8974c 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -33,43 +33,16 @@ #include "radeon_drm.h" #include "r100_track.h" #include "r300d.h" - +#include "rv350d.h" #include "r300_reg_safe.h" -/* r300,r350,rv350,rv370,rv380 depends on : */ -void r100_hdp_reset(struct radeon_device *rdev); -int r100_cp_reset(struct radeon_device *rdev); -int r100_rb2d_reset(struct radeon_device *rdev); -int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); -int r100_pci_gart_enable(struct radeon_device *rdev); -void r100_mc_setup(struct radeon_device *rdev); -void r100_mc_disable_clients(struct radeon_device *rdev); -int r100_gui_wait_for_idle(struct radeon_device *rdev); -int r100_cs_packet_parse(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - unsigned idx); -int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); -int r100_cs_parse_packet0(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - const unsigned *auth, unsigned n, - radeon_packet0_check_t check); -int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - struct radeon_object *robj); - -/* This files gather functions specifics to: - * r300,r350,rv350,rv370,rv380 - * - * Some of these functions might be used by newer ASICs. - */ -void r300_gpu_init(struct radeon_device *rdev); -int r300_mc_wait_for_idle(struct radeon_device *rdev); -int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); - +/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ /* * rv370,rv380 PCIE GART */ +static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); + void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) { uint32_t tmp; @@ -182,59 +155,6 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev) radeon_gart_fini(rdev); } -/* - * MC - */ -int r300_mc_init(struct radeon_device *rdev) -{ - int r; - - if (r100_debugfs_rbbm_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for RBBM !\n"); - } - - r300_gpu_init(rdev); - r100_pci_gart_disable(rdev); - if (rdev->flags & RADEON_IS_PCIE) { - rv370_pcie_gart_disable(rdev); - } - - /* Setup GPU memory space */ - rdev->mc.vram_location = 0xFFFFFFFFUL; - rdev->mc.gtt_location = 0xFFFFFFFFUL; - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); - if (r) { - printk(KERN_WARNING "[drm] Disabling AGP\n"); - rdev->flags &= ~RADEON_IS_AGP; - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; - } else { - rdev->mc.gtt_location = rdev->mc.agp_base; - } - } - r = radeon_mc_setup(rdev); - if (r) { - return r; - } - - /* Program GPU memory space */ - r100_mc_disable_clients(rdev); - if (r300_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); - } - r100_mc_setup(rdev); - return 0; -} - -void r300_mc_fini(struct radeon_device *rdev) -{ -} - - -/* - * Fence emission - */ void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -260,10 +180,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, RADEON_SW_INT_FIRE); } - -/* - * Global GPU functions - */ int r300_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, @@ -582,11 +498,6 @@ void r300_vram_info(struct radeon_device *rdev) r100_vram_init_sizes(rdev); } - -/* - * PCIE Lanes - */ - void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) { uint32_t link_width_cntl, mask; @@ -646,10 +557,6 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) } - -/* - * Debugfs info - */ #if defined(CONFIG_DEBUG_FS) static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) { @@ -680,7 +587,7 @@ static struct drm_info_list rv370_pcie_gart_info_list[] = { }; #endif -int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) +static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); @@ -689,10 +596,6 @@ int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) #endif } - -/* - * CS functions - */ static int r300_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) @@ -1226,12 +1129,6 @@ void r300_set_reg_safe(struct radeon_device *rdev) rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); } -int r300_init(struct radeon_device *rdev) -{ - r300_set_reg_safe(rdev); - return 0; -} - void r300_mc_program(struct radeon_device *rdev) { struct r100_mc_save save; @@ -1265,3 +1162,198 @@ void r300_mc_program(struct radeon_device *rdev) S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); r100_mc_resume(rdev, &save); } + +void r300_clock_startup(struct radeon_device *rdev) +{ + u32 tmp; + + if (radeon_dynclks != -1 && radeon_dynclks) + radeon_legacy_set_clock_gating(rdev, 1); + /* We need to force on some of the block */ + tmp = RREG32_PLL(R_00000D_SCLK_CNTL); + tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); + if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) + tmp |= S_00000D_FORCE_VAP(1); + WREG32_PLL(R_00000D_SCLK_CNTL, tmp); +} + +static int r300_startup(struct radeon_device *rdev) +{ + int r; + + r300_mc_program(rdev); + /* Resume clock */ + r300_clock_startup(rdev); + /* Initialize GPU configuration (# pipes, ...) */ + r300_gpu_init(rdev); + /* Initialize GART (initialize after TTM so we can allocate + * memory through TTM but finalize after TTM) */ + if (rdev->flags & RADEON_IS_PCIE) { + r = rv370_pcie_gart_enable(rdev); + if (r) + return r; + } + if (rdev->flags & RADEON_IS_PCI) { + r = r100_pci_gart_enable(rdev); + if (r) + return r; + } + /* Enable IRQ */ + rdev->irq.sw_int = true; + r100_irq_set(rdev); + /* 1M ring buffer */ + r = r100_cp_init(rdev, 1024 * 1024); + if (r) { + dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + return r; + } + r = r100_wb_init(rdev); + if (r) + dev_err(rdev->dev, "failled initializing WB (%d).\n", r); + r = r100_ib_init(rdev); + if (r) { + dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + return r; + } + return 0; +} + +int r300_resume(struct radeon_device *rdev) +{ + /* Make sur GART are not working */ + if (rdev->flags & RADEON_IS_PCIE) + rv370_pcie_gart_disable(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_disable(rdev); + /* Resume clock before doing reset */ + r300_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* post */ + radeon_combios_asic_init(rdev->ddev); + /* Resume clock after posting */ + r300_clock_startup(rdev); + return r300_startup(rdev); +} + +int r300_suspend(struct radeon_device *rdev) +{ + r100_cp_disable(rdev); + r100_wb_disable(rdev); + r100_irq_disable(rdev); + if (rdev->flags & RADEON_IS_PCIE) + rv370_pcie_gart_disable(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_disable(rdev); + return 0; +} + +void r300_fini(struct radeon_device *rdev) +{ + r300_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + radeon_gem_fini(rdev); + if (rdev->flags & RADEON_IS_PCIE) + rv370_pcie_gart_fini(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + radeon_fence_driver_fini(rdev); + radeon_object_fini(rdev); + radeon_atombios_fini(rdev); + kfree(rdev->bios); + rdev->bios = NULL; +} + +int r300_init(struct radeon_device *rdev) +{ + int r; + + /* Disable VGA */ + r100_vga_render_disable(rdev); + /* Initialize scratch registers */ + radeon_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); + /* TODO: disable VGA need to use VGA request */ + /* BIOS*/ + if (!radeon_get_bios(rdev)) { + if (ASIC_IS_AVIVO(rdev)) + return -EINVAL; + } + if (rdev->is_atom_bios) { + dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); + return -EINVAL; + } else { + r = radeon_combios_init(rdev); + if (r) + return r; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* check if cards are posted or not */ + if (!radeon_card_posted(rdev) && rdev->bios) { + DRM_INFO("GPU not posted. posting now...\n"); + radeon_combios_asic_init(rdev->ddev); + } + /* Set asic errata */ + r300_errata(rdev); + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); + /* Get vram informations */ + r300_vram_info(rdev); + /* Initialize memory controller (also test AGP) */ + r = r420_mc_init(rdev); + if (r) + return r; + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + /* Memory manager */ + r = radeon_object_init(rdev); + if (r) + return r; + if (rdev->flags & RADEON_IS_PCIE) { + r = rv370_pcie_gart_init(rdev); + if (r) + return r; + } + if (rdev->flags & RADEON_IS_PCI) { + r = r100_pci_gart_init(rdev); + if (r) + return r; + } + r300_set_reg_safe(rdev); + rdev->accel_working = true; + r = r300_startup(rdev); + if (r) { + /* Somethings want wront with the accel init stop accel */ + dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r300_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + if (rdev->flags & RADEON_IS_PCIE) + rv370_pcie_gart_fini(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + rdev->accel_working = false; + } + return 0; +} diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index d4fa3eb1074..4c73114f0de 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h @@ -96,6 +96,211 @@ #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) #define C_000170_AGP_BASE_ADDR 0x00000000 +#define R_0007C0_CP_STAT 0x0007C0 +#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) +#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) +#define C_0007C0_MRU_BUSY 0xFFFFFFFE +#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) +#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) +#define C_0007C0_MWU_BUSY 0xFFFFFFFD +#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) +#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) +#define C_0007C0_RSIU_BUSY 0xFFFFFFFB +#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) +#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) +#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 +#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) +#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) +#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF +#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) +#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) +#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF +#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) +#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) +#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF +#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) +#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) +#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF +#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) +#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) +#define C_0007C0_CSI_BUSY 0xFFFFDFFF +#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) +#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) +#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF +#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) +#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) +#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF +#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) +#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) +#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF +#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) +#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) +#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF +#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) +#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) +#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF +#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) +#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) +#define C_0007C0_CP_BUSY 0x7FFFFFFF +#define R_000E40_RBBM_STATUS 0x000E40 +#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) +#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) +#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 +#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) +#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) +#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF +#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) +#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) +#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF +#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) +#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) +#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF +#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) +#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) +#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF +#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) +#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) +#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF +#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) +#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) +#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF +#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) +#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) +#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF +#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) +#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) +#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF +#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) +#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) +#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF +#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) +#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) +#define C_000E40_E2_BUSY 0xFFFDFFFF +#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) +#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) +#define C_000E40_RB2D_BUSY 0xFFFBFFFF +#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) +#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) +#define C_000E40_RB3D_BUSY 0xFFF7FFFF +#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) +#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) +#define C_000E40_VAP_BUSY 0xFFEFFFFF +#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) +#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) +#define C_000E40_RE_BUSY 0xFFDFFFFF +#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) +#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) +#define C_000E40_TAM_BUSY 0xFFBFFFFF +#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) +#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) +#define C_000E40_TDM_BUSY 0xFF7FFFFF +#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) +#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) +#define C_000E40_PB_BUSY 0xFEFFFFFF +#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) +#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) +#define C_000E40_TIM_BUSY 0xFDFFFFFF +#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) +#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) +#define C_000E40_GA_BUSY 0xFBFFFFFF +#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) +#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) +#define C_000E40_CBA2D_BUSY 0xF7FFFFFF +#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) +#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) +#define C_000E40_GUI_ACTIVE 0x7FFFFFFF +#define R_00000D_SCLK_CNTL 0x00000D +#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) +#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) +#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 +#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) +#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) +#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 +#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) +#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) +#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF +#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) +#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) +#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF +#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) +#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) +#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF +#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) +#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) +#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F +#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) +#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) +#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF +#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) +#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) +#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF +#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) +#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) +#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF +#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) +#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) +#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF +#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) +#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) +#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF +#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) +#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) +#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF +#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) +#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) +#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF +#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) +#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) +#define C_00000D_FORCE_DISP2 0xFFFF7FFF +#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) +#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) +#define C_00000D_FORCE_CP 0xFFFEFFFF +#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) +#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) +#define C_00000D_FORCE_HDP 0xFFFDFFFF +#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) +#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) +#define C_00000D_FORCE_DISP1 0xFFFBFFFF +#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) +#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) +#define C_00000D_FORCE_TOP 0xFFF7FFFF +#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) +#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) +#define C_00000D_FORCE_E2 0xFFEFFFFF +#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) +#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) +#define C_00000D_FORCE_SE 0xFFDFFFFF +#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) +#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) +#define C_00000D_FORCE_IDCT 0xFFBFFFFF +#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) +#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) +#define C_00000D_FORCE_VIP 0xFF7FFFFF +#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) +#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) +#define C_00000D_FORCE_RE 0xFEFFFFFF +#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) +#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) +#define C_00000D_FORCE_PB 0xFDFFFFFF +#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) +#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) +#define C_00000D_FORCE_TAM 0xFBFFFFFF +#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) +#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) +#define C_00000D_FORCE_TDM 0xF7FFFFFF +#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) +#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) +#define C_00000D_FORCE_RB 0xEFFFFFFF +#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) +#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) +#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF +#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) +#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) +#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF +#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) +#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) +#define C_00000D_FORCE_OV0 0x7FFFFFFF + #endif diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 49a2fdc57d2..5c7fe52de30 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -155,6 +155,9 @@ static void r420_debugfs(struct radeon_device *rdev) static void r420_clock_resume(struct radeon_device *rdev) { u32 sclk_cntl; + + if (radeon_dynclks != -1 && radeon_dynclks) + radeon_atom_set_clock_gating(rdev, 1); sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); if (rdev->family == CHIP_R420) @@ -167,6 +170,8 @@ static int r420_startup(struct radeon_device *rdev) int r; r300_mc_program(rdev); + /* Resume clock */ + r420_clock_resume(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ if (rdev->flags & RADEON_IS_PCIE) { @@ -267,7 +272,6 @@ int r420_init(struct radeon_device *rdev) { int r; - rdev->new_init_path = true; /* Initialize scratch registers */ radeon_scratch_init(rdev); /* Initialize surface registers */ diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h index a48a7db1e2a..fc78d31a0b4 100644 --- a/drivers/gpu/drm/radeon/r420d.h +++ b/drivers/gpu/drm/radeon/r420d.h @@ -212,9 +212,9 @@ #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) #define C_00000D_FORCE_E2 0xFFEFFFFF -#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) -#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) -#define C_00000D_FORCE_SE 0xFFDFFFFF +#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) +#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) +#define C_00000D_FORCE_VAP 0xFFDFFFFF #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) #define C_00000D_FORCE_IDCT 0xFFBFFFFF @@ -224,24 +224,24 @@ #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) #define C_00000D_FORCE_RE 0xFEFFFFFF -#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) -#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) -#define C_00000D_FORCE_PB 0xFDFFFFFF +#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) +#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) +#define C_00000D_FORCE_SR 0xFDFFFFFF #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) #define C_00000D_FORCE_PX 0xFBFFFFFF #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) #define C_00000D_FORCE_TX 0xF7FFFFFF -#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) -#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) -#define C_00000D_FORCE_RB 0xEFFFFFFF +#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) +#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) +#define C_00000D_FORCE_US 0xEFFFFFFF #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF -#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) -#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) -#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF +#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) +#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) +#define C_00000D_FORCE_SU 0xBFFFFFFF #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) #define C_00000D_FORCE_OV0 0x7FFFFFFF diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 0bf13fccdaf..a555b7b19b4 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -186,7 +186,7 @@ static int r520_startup(struct radeon_device *rdev) } /* Enable IRQ */ rdev->irq.sw_int = true; - r100_irq_set(rdev); + rs600_irq_set(rdev); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -228,7 +228,6 @@ int r520_init(struct radeon_device *rdev) { int r; - rdev->new_init_path = true; /* Initialize scratch registers */ radeon_scratch_init(rdev); /* Initialize surface registers */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2e4e60edbff..609719490ec 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin"); int r600_debugfs_mc_info_init(struct radeon_device *rdev); -/* This files gather functions specifics to: - * r600,rv610,rv630,rv620,rv635,rv670 - * - * Some of these functions might be used by newer ASICs. - */ +/* r600,rv610,rv630,rv620,rv635,rv670 */ int r600_mc_wait_for_idle(struct radeon_device *rdev); void r600_gpu_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); - /* * R600 PCIE GART */ @@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); @@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev) radeon_gart_fini(rdev); } +void r600_agp_enable(struct radeon_device *rdev) +{ + u32 tmp; + int i; + + /* Setup L2 cache */ + WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | + ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | + EFFECTIVE_L2_QUEUE_SIZE(7)); + WREG32(VM_L2_CNTL2, 0); + WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); + /* Setup TLB control */ + tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | + SYSTEM_ACCESS_MODE_NOT_IN_SYS | + EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | + ENABLE_WAIT_L2_QUERY; + WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); + WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); + WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); + for (i = 0; i < 7; i++) + WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); +} + int r600_mc_wait_for_idle(struct radeon_device *rdev) { unsigned i; @@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -static void r600_mc_resume(struct radeon_device *rdev) +static void r600_mc_program(struct radeon_device *rdev) { - u32 d1vga_control, d2vga_control; - u32 vga_render_control, vga_hdp_control; - u32 d1crtc_control, d2crtc_control; - u32 new_d1grph_primary, new_d1grph_secondary; - u32 new_d2grph_primary, new_d2grph_secondary; - u64 old_vram_start; + struct rv515_mc_save save; u32 tmp; int i, j; @@ -261,85 +285,51 @@ static void r600_mc_resume(struct radeon_device *rdev) } WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); - d1vga_control = RREG32(D1VGA_CONTROL); - d2vga_control = RREG32(D2VGA_CONTROL); - vga_render_control = RREG32(VGA_RENDER_CONTROL); - vga_hdp_control = RREG32(VGA_HDP_CONTROL); - d1crtc_control = RREG32(D1CRTC_CONTROL); - d2crtc_control = RREG32(D2CRTC_CONTROL); - old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; - new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); - new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); - new_d1grph_primary += rdev->mc.vram_start - old_vram_start; - new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; - new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); - new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); - new_d2grph_primary += rdev->mc.vram_start - old_vram_start; - new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; - - /* Stop all video */ - WREG32(D1VGA_CONTROL, 0); - WREG32(D2VGA_CONTROL, 0); - WREG32(VGA_RENDER_CONTROL, 0); - WREG32(D1CRTC_UPDATE_LOCK, 1); - WREG32(D2CRTC_UPDATE_LOCK, 1); - WREG32(D1CRTC_CONTROL, 0); - WREG32(D2CRTC_CONTROL, 0); - WREG32(D1CRTC_UPDATE_LOCK, 0); - WREG32(D2CRTC_UPDATE_LOCK, 0); - - mdelay(1); + rv515_mc_stop(rdev, &save); if (r600_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "[drm] MC not idle !\n"); + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } - - /* Lockout access through VGA aperture*/ + /* Lockout access through VGA aperture (doesn't exist before R600) */ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); - /* Update configuration */ - WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); - WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); + if (rdev->flags & RADEON_IS_AGP) { + if (rdev->mc.vram_start < rdev->mc.gtt_start) { + /* VRAM before AGP */ + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, + rdev->mc.vram_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + rdev->mc.gtt_end >> 12); + } else { + /* VRAM after AGP */ + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, + rdev->mc.gtt_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + rdev->mc.vram_end >> 12); + } + } else { + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); + } WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); - tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; + tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); - WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); + WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); if (rdev->flags & RADEON_IS_AGP) { - WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); - WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); + WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); + WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); } else { WREG32(MC_VM_AGP_BASE, 0); WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); } - WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); - WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); - WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); - WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); - WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); - - /* Unlock host access */ - WREG32(VGA_HDP_CONTROL, vga_hdp_control); - - mdelay(1); if (r600_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "[drm] MC not idle !\n"); + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } - - /* Restore video state */ - WREG32(D1CRTC_UPDATE_LOCK, 1); - WREG32(D2CRTC_UPDATE_LOCK, 1); - WREG32(D1CRTC_CONTROL, d1crtc_control); - WREG32(D2CRTC_CONTROL, d2crtc_control); - WREG32(D1CRTC_UPDATE_LOCK, 0); - WREG32(D2CRTC_UPDATE_LOCK, 0); - WREG32(D1VGA_CONTROL, d1vga_control); - WREG32(D2VGA_CONTROL, d2vga_control); - WREG32(VGA_RENDER_CONTROL, vga_render_control); - + rv515_mc_resume(rdev, &save); /* we need to own VRAM, so turn off the VGA renderer here * to stop it overwriting our objects */ rv515_vga_render_disable(rdev); @@ -445,9 +435,9 @@ int r600_mc_init(struct radeon_device *rdev) } } rdev->mc.vram_start = rdev->mc.vram_location; - rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; + rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; rdev->mc.gtt_start = rdev->mc.gtt_location; - rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; + rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; /* FIXME: we should enforce default clock in case GPU is not in * default setup */ @@ -463,6 +453,7 @@ int r600_mc_init(struct radeon_device *rdev) */ int r600_gpu_soft_reset(struct radeon_device *rdev) { + struct rv515_mc_save save; u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | @@ -480,13 +471,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); u32 srbm_reset = 0; + u32 tmp; + dev_info(rdev->dev, "GPU softreset \n"); + dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", + RREG32(R_008010_GRBM_STATUS)); + dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", + RREG32(R_008014_GRBM_STATUS2)); + dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", + RREG32(R_000E50_SRBM_STATUS)); + rv515_mc_stop(rdev, &save); + if (r600_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } /* Disable CP parsing/prefetching */ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); /* Check if any of the rendering block is busy and reset it */ if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { - WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) | + tmp = S_008020_SOFT_RESET_CR(1) | S_008020_SOFT_RESET_DB(1) | S_008020_SOFT_RESET_CB(1) | S_008020_SOFT_RESET_PA(1) | @@ -498,14 +501,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) S_008020_SOFT_RESET_TC(1) | S_008020_SOFT_RESET_TA(1) | S_008020_SOFT_RESET_VC(1) | - S_008020_SOFT_RESET_VGT(1)); + S_008020_SOFT_RESET_VGT(1); + dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(R_008020_GRBM_SOFT_RESET, tmp); (void)RREG32(R_008020_GRBM_SOFT_RESET); udelay(50); WREG32(R_008020_GRBM_SOFT_RESET, 0); (void)RREG32(R_008020_GRBM_SOFT_RESET); } /* Reset CP (we always reset CP) */ - WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1)); + tmp = S_008020_SOFT_RESET_CP(1); + dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(R_008020_GRBM_SOFT_RESET, tmp); (void)RREG32(R_008020_GRBM_SOFT_RESET); udelay(50); WREG32(R_008020_GRBM_SOFT_RESET, 0); @@ -533,6 +540,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) srbm_reset |= S_000E60_SOFT_RESET_RLC(1); if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) srbm_reset |= S_000E60_SOFT_RESET_SEM(1); + if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) + srbm_reset |= S_000E60_SOFT_RESET_BIF(1); + dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); + WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); + (void)RREG32(R_000E60_SRBM_SOFT_RESET); + udelay(50); + WREG32(R_000E60_SRBM_SOFT_RESET, 0); + (void)RREG32(R_000E60_SRBM_SOFT_RESET); WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); (void)RREG32(R_000E60_SRBM_SOFT_RESET); udelay(50); @@ -540,6 +555,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) (void)RREG32(R_000E60_SRBM_SOFT_RESET); /* Wait a little for things to settle down */ udelay(50); + dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", + RREG32(R_008010_GRBM_STATUS)); + dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", + RREG32(R_008014_GRBM_STATUS2)); + dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", + RREG32(R_000E50_SRBM_STATUS)); + /* After reset we need to reinit the asic as GPU often endup in an + * incoherent state. + */ + atom_asic_init(rdev->mode_info.atom_context); + rv515_mc_resume(rdev, &save); return 0; } @@ -1350,32 +1376,47 @@ int r600_ring_test(struct radeon_device *rdev) return r; } -/* - * Writeback - */ -int r600_wb_init(struct radeon_device *rdev) +void r600_wb_disable(struct radeon_device *rdev) +{ + WREG32(SCRATCH_UMSK, 0); + if (rdev->wb.wb_obj) { + radeon_object_kunmap(rdev->wb.wb_obj); + radeon_object_unpin(rdev->wb.wb_obj); + } +} + +void r600_wb_fini(struct radeon_device *rdev) +{ + r600_wb_disable(rdev); + if (rdev->wb.wb_obj) { + radeon_object_unref(&rdev->wb.wb_obj); + rdev->wb.wb = NULL; + rdev->wb.wb_obj = NULL; + } +} + +int r600_wb_enable(struct radeon_device *rdev) { int r; if (rdev->wb.wb_obj == NULL) { - r = radeon_object_create(rdev, NULL, 4096, - true, - RADEON_GEM_DOMAIN_GTT, - false, &rdev->wb.wb_obj); + r = radeon_object_create(rdev, NULL, 4096, true, + RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); if (r) { - DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); + dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); return r; } - r = radeon_object_pin(rdev->wb.wb_obj, - RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); + r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->wb.gpu_addr); if (r) { - DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); + dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); + r600_wb_fini(rdev); return r; } r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); if (r) { - DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); + dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); + r600_wb_fini(rdev); return r; } } @@ -1386,21 +1427,6 @@ int r600_wb_init(struct radeon_device *rdev) return 0; } -void r600_wb_fini(struct radeon_device *rdev) -{ - if (rdev->wb.wb_obj) { - radeon_object_kunmap(rdev->wb.wb_obj); - radeon_object_unpin(rdev->wb.wb_obj); - radeon_object_unref(&rdev->wb.wb_obj); - rdev->wb.wb = NULL; - rdev->wb.wb_obj = NULL; - } -} - - -/* - * CS - */ void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -1477,11 +1503,14 @@ int r600_startup(struct radeon_device *rdev) { int r; - r600_gpu_reset(rdev); - r600_mc_resume(rdev); - r = r600_pcie_gart_enable(rdev); - if (r) - return r; + r600_mc_program(rdev); + if (rdev->flags & RADEON_IS_AGP) { + r600_agp_enable(rdev); + } else { + r = r600_pcie_gart_enable(rdev); + if (r) + return r; + } r600_gpu_init(rdev); r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, @@ -1500,9 +1529,8 @@ int r600_startup(struct radeon_device *rdev) r = r600_cp_resume(rdev); if (r) return r; - r = r600_wb_init(rdev); - if (r) - return r; + /* write back buffer are not vital so don't worry about failure */ + r600_wb_enable(rdev); return 0; } @@ -1524,15 +1552,12 @@ int r600_resume(struct radeon_device *rdev) { int r; - if (radeon_gpu_reset(rdev)) { - /* FIXME: what do we want to do here ? */ - } + /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, + * posting will perform necessary task to bring back GPU into good + * shape. + */ /* post card */ - if (rdev->is_atom_bios) { - atom_asic_init(rdev->mode_info.atom_context); - } else { - radeon_combios_asic_init(rdev->ddev); - } + atom_asic_init(rdev->mode_info.atom_context); /* Initialize clocks */ r = radeon_clocks_init(rdev); if (r) { @@ -1545,7 +1570,7 @@ int r600_resume(struct radeon_device *rdev) return r; } - r = radeon_ib_test(rdev); + r = r600_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); return r; @@ -1553,13 +1578,12 @@ int r600_resume(struct radeon_device *rdev) return r; } - int r600_suspend(struct radeon_device *rdev) { /* FIXME: we should wait for ring to be empty */ r600_cp_stop(rdev); rdev->cp.ready = false; - + r600_wb_disable(rdev); r600_pcie_gart_disable(rdev); /* unpin shaders bo */ radeon_object_unpin(rdev->r600_blit.shader_obj); @@ -1576,7 +1600,6 @@ int r600_init(struct radeon_device *rdev) { int r; - rdev->new_init_path = true; r = radeon_dummy_page_init(rdev); if (r) return r; @@ -1593,8 +1616,10 @@ int r600_init(struct radeon_device *rdev) return -EINVAL; } /* Must be an ATOMBIOS */ - if (!rdev->is_atom_bios) + if (!rdev->is_atom_bios) { + dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); return -EINVAL; + } r = radeon_atombios_init(rdev); if (r) return r; @@ -1616,15 +1641,8 @@ int r600_init(struct radeon_device *rdev) if (r) return r; r = r600_mc_init(rdev); - if (r) { - if (rdev->flags & RADEON_IS_AGP) { - /* Retry with disabling AGP */ - r600_fini(rdev); - rdev->flags &= ~RADEON_IS_AGP; - return r600_init(rdev); - } + if (r) return r; - } /* Memory manager */ r = radeon_object_init(rdev); if (r) @@ -1653,12 +1671,10 @@ int r600_init(struct radeon_device *rdev) r = r600_startup(rdev); if (r) { - if (rdev->flags & RADEON_IS_AGP) { - /* Retry with disabling AGP */ - r600_fini(rdev); - rdev->flags &= ~RADEON_IS_AGP; - return r600_init(rdev); - } + r600_suspend(rdev); + r600_wb_fini(rdev); + radeon_ring_fini(rdev); + r600_pcie_gart_fini(rdev); rdev->accel_working = false; } if (rdev->accel_working) { @@ -1667,7 +1683,7 @@ int r600_init(struct radeon_device *rdev) DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); rdev->accel_working = false; } - r = radeon_ib_test(rdev); + r = r600_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); rdev->accel_working = false; @@ -1683,19 +1699,15 @@ void r600_fini(struct radeon_device *rdev) r600_blit_fini(rdev); radeon_ring_fini(rdev); + r600_wb_fini(rdev); r600_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); -#if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) radeon_agp_fini(rdev); -#endif radeon_object_fini(rdev); - if (rdev->is_atom_bios) - radeon_atombios_fini(rdev); - else - radeon_combios_fini(rdev); + radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; radeon_dummy_page_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index d988eece018..dec50108160 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c @@ -582,8 +582,6 @@ r600_blit_copy(struct drm_device *dev, u64 vb_addr; u32 *vb; - vb = r600_nomm_get_vb_ptr(dev); - if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { max_bytes = 8192; @@ -619,8 +617,8 @@ r600_blit_copy(struct drm_device *dev, if (!dev_priv->blit_vb) return; set_shaders(dev); - vb = r600_nomm_get_vb_ptr(dev); } + vb = r600_nomm_get_vb_ptr(dev); vb[0] = i2f(dst_x); vb[1] = 0; @@ -708,8 +706,8 @@ r600_blit_copy(struct drm_device *dev, return; set_shaders(dev); - vb = r600_nomm_get_vb_ptr(dev); } + vb = r600_nomm_get_vb_ptr(dev); vb[0] = i2f(dst_x / 4); vb[1] = 0; @@ -777,8 +775,6 @@ r600_blit_swap(struct drm_device *dev, u64 vb_addr; u32 *vb; - vb = r600_nomm_get_vb_ptr(dev); - if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { r600_nomm_put_vb(dev); @@ -787,8 +783,8 @@ r600_blit_swap(struct drm_device *dev, return; set_shaders(dev); - vb = r600_nomm_get_vb_ptr(dev); } + vb = r600_nomm_get_vb_ptr(dev); if (cpp == 4) { cb_format = COLOR_8_8_8_8; diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index acae33e2ad5..93108bb31d1 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -610,7 +610,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, size_bytes, rdev->r600_blit.vb_used); - vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { max_bytes = 8192; @@ -653,6 +652,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, vb = r600_nomm_get_vb_ptr(dev); #endif } + vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); vb[0] = i2f(dst_x); vb[1] = 0; @@ -747,6 +747,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, vb = r600_nomm_get_vb_ptr(dev); } #endif + vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); vb[0] = i2f(dst_x / 4); vb[1] = 0; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index d28970db6a2..17e42195c63 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -252,7 +252,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); - reg = header >> 2; + reg = CP_PACKET0_GET_REG(header); mutex_lock(&p->rdev->ddev->mode_config.mutex); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 4a9028a85c9..9b64d47f1f8 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -643,6 +643,7 @@ #define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) #define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) #define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) +#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1) #define R_000E60_SRBM_SOFT_RESET 0x0E60 #define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) #define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 950b346e343..5ab35b81c86 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -590,18 +590,8 @@ struct radeon_asic { void (*fini)(struct radeon_device *rdev); int (*resume)(struct radeon_device *rdev); int (*suspend)(struct radeon_device *rdev); - void (*errata)(struct radeon_device *rdev); - void (*vram_info)(struct radeon_device *rdev); void (*vga_set_state)(struct radeon_device *rdev, bool state); int (*gpu_reset)(struct radeon_device *rdev); - int (*mc_init)(struct radeon_device *rdev); - void (*mc_fini)(struct radeon_device *rdev); - int (*wb_init)(struct radeon_device *rdev); - void (*wb_fini)(struct radeon_device *rdev); - int (*gart_init)(struct radeon_device *rdev); - void (*gart_fini)(struct radeon_device *rdev); - int (*gart_enable)(struct radeon_device *rdev); - void (*gart_disable)(struct radeon_device *rdev); void (*gart_tlb_flush)(struct radeon_device *rdev); int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); @@ -611,7 +601,6 @@ struct radeon_asic { void (*ring_start)(struct radeon_device *rdev); int (*ring_test)(struct radeon_device *rdev); void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); - int (*ib_test)(struct radeon_device *rdev); int (*irq_set)(struct radeon_device *rdev); int (*irq_process)(struct radeon_device *rdev); u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); @@ -789,7 +778,6 @@ struct radeon_device { bool shutdown; bool suspend; bool need_dma32; - bool new_init_path; bool accel_working; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ @@ -949,28 +937,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) -#define radeon_errata(rdev) (rdev)->asic->errata((rdev)) -#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) -#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev)) -#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev)) -#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev)) -#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev)) -#define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev)) -#define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev)) -#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev)) -#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) -#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize)) -#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev)) -#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev)) #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) -#define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev)) #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) @@ -996,6 +970,7 @@ extern void radeon_clocks_fini(struct radeon_device *rdev); extern void radeon_scratch_init(struct radeon_device *rdev); extern void radeon_surface_init(struct radeon_device *rdev); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); +extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ @@ -1031,11 +1006,27 @@ extern int r100_wb_init(struct radeon_device *rdev); extern void r100_hdp_reset(struct radeon_device *rdev); extern int r100_rb2d_reset(struct radeon_device *rdev); extern int r100_cp_reset(struct radeon_device *rdev); +extern void r100_vga_render_disable(struct radeon_device *rdev); +extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + struct radeon_object *robj); +extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + const unsigned *auth, unsigned n, + radeon_packet0_check_t check); +extern int r100_cs_packet_parse(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + unsigned idx); + +/* rv200,rv250,rv280 */ +extern void r200_set_safe_registers(struct radeon_device *rdev); /* r300,r350,rv350,rv370,rv380 */ extern void r300_set_reg_safe(struct radeon_device *rdev); extern void r300_mc_program(struct radeon_device *rdev); extern void r300_vram_info(struct radeon_device *rdev); +extern void r300_clock_startup(struct radeon_device *rdev); +extern int r300_mc_wait_for_idle(struct radeon_device *rdev); extern int rv370_pcie_gart_init(struct radeon_device *rdev); extern void rv370_pcie_gart_fini(struct radeon_device *rdev); extern int rv370_pcie_gart_enable(struct radeon_device *rdev); @@ -1066,6 +1057,18 @@ extern void rv515_clock_startup(struct radeon_device *rdev); extern void rv515_debugfs(struct radeon_device *rdev); extern int rv515_suspend(struct radeon_device *rdev); +/* rs400 */ +extern int rs400_gart_init(struct radeon_device *rdev); +extern int rs400_gart_enable(struct radeon_device *rdev); +extern void rs400_gart_adjust_size(struct radeon_device *rdev); +extern void rs400_gart_disable(struct radeon_device *rdev); +extern void rs400_gart_fini(struct radeon_device *rdev); + +/* rs600 */ +extern void rs600_set_safe_registers(struct radeon_device *rdev); +extern int rs600_irq_set(struct radeon_device *rdev); +extern void rs600_irq_disable(struct radeon_device *rdev); + /* rs690, rs740 */ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode1, @@ -1083,8 +1086,9 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev); extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); extern int r600_ib_test(struct radeon_device *rdev); extern int r600_ring_test(struct radeon_device *rdev); -extern int r600_wb_init(struct radeon_device *rdev); extern void r600_wb_fini(struct radeon_device *rdev); +extern int r600_wb_enable(struct radeon_device *rdev); +extern void r600_wb_disable(struct radeon_device *rdev); extern void r600_scratch_init(struct radeon_device *rdev); extern int r600_blit_init(struct radeon_device *rdev); extern void r600_blit_fini(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c8a4e7b5663..c3532c7a6f3 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -41,28 +41,17 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); /* * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ -int r100_init(struct radeon_device *rdev); -int r200_init(struct radeon_device *rdev); +extern int r100_init(struct radeon_device *rdev); +extern void r100_fini(struct radeon_device *rdev); +extern int r100_suspend(struct radeon_device *rdev); +extern int r100_resume(struct radeon_device *rdev); uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); -void r100_errata(struct radeon_device *rdev); -void r100_vram_info(struct radeon_device *rdev); void r100_vga_set_state(struct radeon_device *rdev, bool state); int r100_gpu_reset(struct radeon_device *rdev); -int r100_mc_init(struct radeon_device *rdev); -void r100_mc_fini(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); -int r100_wb_init(struct radeon_device *rdev); -void r100_wb_fini(struct radeon_device *rdev); -int r100_pci_gart_init(struct radeon_device *rdev); -void r100_pci_gart_fini(struct radeon_device *rdev); -int r100_pci_gart_enable(struct radeon_device *rdev); -void r100_pci_gart_disable(struct radeon_device *rdev); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); -int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); -void r100_cp_fini(struct radeon_device *rdev); -void r100_cp_disable(struct radeon_device *rdev); void r100_cp_commit(struct radeon_device *rdev); void r100_ring_start(struct radeon_device *rdev); int r100_irq_set(struct radeon_device *rdev); @@ -83,33 +72,21 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, int r100_clear_surface_reg(struct radeon_device *rdev, int reg); void r100_bandwidth_update(struct radeon_device *rdev); void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); -int r100_ib_test(struct radeon_device *rdev); int r100_ring_test(struct radeon_device *rdev); static struct radeon_asic r100_asic = { .init = &r100_init, - .errata = &r100_errata, - .vram_info = &r100_vram_info, + .fini = &r100_fini, + .suspend = &r100_suspend, + .resume = &r100_resume, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r100_gpu_reset, - .mc_init = &r100_mc_init, - .mc_fini = &r100_mc_fini, - .wb_init = &r100_wb_init, - .wb_fini = &r100_wb_fini, - .gart_init = &r100_pci_gart_init, - .gart_fini = &r100_pci_gart_fini, - .gart_enable = &r100_pci_gart_enable, - .gart_disable = &r100_pci_gart_disable, .gart_tlb_flush = &r100_pci_gart_tlb_flush, .gart_set_page = &r100_pci_gart_set_page, - .cp_init = &r100_cp_init, - .cp_fini = &r100_cp_fini, - .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r100_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, - .ib_test = &r100_ib_test, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .get_vblank_counter = &r100_get_vblank_counter, @@ -131,55 +108,38 @@ static struct radeon_asic r100_asic = { /* * r300,r350,rv350,rv380 */ -int r300_init(struct radeon_device *rdev); -void r300_errata(struct radeon_device *rdev); -void r300_vram_info(struct radeon_device *rdev); -int r300_gpu_reset(struct radeon_device *rdev); -int r300_mc_init(struct radeon_device *rdev); -void r300_mc_fini(struct radeon_device *rdev); -void r300_ring_start(struct radeon_device *rdev); -void r300_fence_ring_emit(struct radeon_device *rdev, - struct radeon_fence *fence); -int r300_cs_parse(struct radeon_cs_parser *p); -int rv370_pcie_gart_init(struct radeon_device *rdev); -void rv370_pcie_gart_fini(struct radeon_device *rdev); -int rv370_pcie_gart_enable(struct radeon_device *rdev); -void rv370_pcie_gart_disable(struct radeon_device *rdev); -void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); -int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); -uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); -void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); -void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); -int r300_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, - struct radeon_fence *fence); - +extern int r300_init(struct radeon_device *rdev); +extern void r300_fini(struct radeon_device *rdev); +extern int r300_suspend(struct radeon_device *rdev); +extern int r300_resume(struct radeon_device *rdev); +extern int r300_gpu_reset(struct radeon_device *rdev); +extern void r300_ring_start(struct radeon_device *rdev); +extern void r300_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence); +extern int r300_cs_parse(struct radeon_cs_parser *p); +extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); +extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); +extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); +extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); +extern int r300_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_pages, + struct radeon_fence *fence); static struct radeon_asic r300_asic = { .init = &r300_init, - .errata = &r300_errata, - .vram_info = &r300_vram_info, + .fini = &r300_fini, + .suspend = &r300_suspend, + .resume = &r300_resume, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, - .mc_init = &r300_mc_init, - .mc_fini = &r300_mc_fini, - .wb_init = &r100_wb_init, - .wb_fini = &r100_wb_fini, - .gart_init = &r100_pci_gart_init, - .gart_fini = &r100_pci_gart_fini, - .gart_enable = &r100_pci_gart_enable, - .gart_disable = &r100_pci_gart_disable, .gart_tlb_flush = &r100_pci_gart_tlb_flush, .gart_set_page = &r100_pci_gart_set_page, - .cp_init = &r100_cp_init, - .cp_fini = &r100_cp_fini, - .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, - .ib_test = &r100_ib_test, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .get_vblank_counter = &r100_get_vblank_counter, @@ -209,26 +169,14 @@ static struct radeon_asic r420_asic = { .fini = &r420_fini, .suspend = &r420_suspend, .resume = &r420_resume, - .errata = NULL, - .vram_info = NULL, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, - .mc_init = NULL, - .mc_fini = NULL, - .wb_init = NULL, - .wb_fini = NULL, - .gart_enable = NULL, - .gart_disable = NULL, .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, .gart_set_page = &rv370_pcie_gart_set_page, - .cp_init = NULL, - .cp_fini = NULL, - .cp_disable = NULL, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, - .ib_test = NULL, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .get_vblank_counter = &r100_get_vblank_counter, @@ -250,42 +198,27 @@ static struct radeon_asic r420_asic = { /* * rs400,rs480 */ -void rs400_errata(struct radeon_device *rdev); -void rs400_vram_info(struct radeon_device *rdev); -int rs400_mc_init(struct radeon_device *rdev); -void rs400_mc_fini(struct radeon_device *rdev); -int rs400_gart_init(struct radeon_device *rdev); -void rs400_gart_fini(struct radeon_device *rdev); -int rs400_gart_enable(struct radeon_device *rdev); -void rs400_gart_disable(struct radeon_device *rdev); +extern int rs400_init(struct radeon_device *rdev); +extern void rs400_fini(struct radeon_device *rdev); +extern int rs400_suspend(struct radeon_device *rdev); +extern int rs400_resume(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev); int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); static struct radeon_asic rs400_asic = { - .init = &r300_init, - .errata = &rs400_errata, - .vram_info = &rs400_vram_info, + .init = &rs400_init, + .fini = &rs400_fini, + .suspend = &rs400_suspend, + .resume = &rs400_resume, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, - .mc_init = &rs400_mc_init, - .mc_fini = &rs400_mc_fini, - .wb_init = &r100_wb_init, - .wb_fini = &r100_wb_fini, - .gart_init = &rs400_gart_init, - .gart_fini = &rs400_gart_fini, - .gart_enable = &rs400_gart_enable, - .gart_disable = &rs400_gart_disable, .gart_tlb_flush = &rs400_gart_tlb_flush, .gart_set_page = &rs400_gart_set_page, - .cp_init = &r100_cp_init, - .cp_fini = &r100_cp_fini, - .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, - .ib_test = &r100_ib_test, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .get_vblank_counter = &r100_get_vblank_counter, @@ -307,18 +240,13 @@ static struct radeon_asic rs400_asic = { /* * rs600. */ -int rs600_init(struct radeon_device *rdev); -void rs600_errata(struct radeon_device *rdev); -void rs600_vram_info(struct radeon_device *rdev); -int rs600_mc_init(struct radeon_device *rdev); -void rs600_mc_fini(struct radeon_device *rdev); +extern int rs600_init(struct radeon_device *rdev); +extern void rs600_fini(struct radeon_device *rdev); +extern int rs600_suspend(struct radeon_device *rdev); +extern int rs600_resume(struct radeon_device *rdev); int rs600_irq_set(struct radeon_device *rdev); int rs600_irq_process(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); -int rs600_gart_init(struct radeon_device *rdev); -void rs600_gart_fini(struct radeon_device *rdev); -int rs600_gart_enable(struct radeon_device *rdev); -void rs600_gart_disable(struct radeon_device *rdev); void rs600_gart_tlb_flush(struct radeon_device *rdev); int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); @@ -326,28 +254,17 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rs600_asic = { .init = &rs600_init, - .errata = &rs600_errata, - .vram_info = &rs600_vram_info, + .fini = &rs600_fini, + .suspend = &rs600_suspend, + .resume = &rs600_resume, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, - .mc_init = &rs600_mc_init, - .mc_fini = &rs600_mc_fini, - .wb_init = &r100_wb_init, - .wb_fini = &r100_wb_fini, - .gart_init = &rs600_gart_init, - .gart_fini = &rs600_gart_fini, - .gart_enable = &rs600_gart_enable, - .gart_disable = &rs600_gart_disable, .gart_tlb_flush = &rs600_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, - .cp_init = &r100_cp_init, - .cp_fini = &r100_cp_fini, - .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, - .ib_test = &r100_ib_test, .irq_set = &rs600_irq_set, .irq_process = &rs600_irq_process, .get_vblank_counter = &rs600_get_vblank_counter, @@ -367,37 +284,26 @@ static struct radeon_asic rs600_asic = { /* * rs690,rs740 */ -void rs690_errata(struct radeon_device *rdev); -void rs690_vram_info(struct radeon_device *rdev); -int rs690_mc_init(struct radeon_device *rdev); -void rs690_mc_fini(struct radeon_device *rdev); +int rs690_init(struct radeon_device *rdev); +void rs690_fini(struct radeon_device *rdev); +int rs690_resume(struct radeon_device *rdev); +int rs690_suspend(struct radeon_device *rdev); uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs690_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rs690_asic = { - .init = &rs600_init, - .errata = &rs690_errata, - .vram_info = &rs690_vram_info, + .init = &rs690_init, + .fini = &rs690_fini, + .suspend = &rs690_suspend, + .resume = &rs690_resume, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, - .mc_init = &rs690_mc_init, - .mc_fini = &rs690_mc_fini, - .wb_init = &r100_wb_init, - .wb_fini = &r100_wb_fini, - .gart_init = &rs400_gart_init, - .gart_fini = &rs400_gart_fini, - .gart_enable = &rs400_gart_enable, - .gart_disable = &rs400_gart_disable, .gart_tlb_flush = &rs400_gart_tlb_flush, .gart_set_page = &rs400_gart_set_page, - .cp_init = &r100_cp_init, - .cp_fini = &r100_cp_fini, - .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, - .ib_test = &r100_ib_test, .irq_set = &rs600_irq_set, .irq_process = &rs600_irq_process, .get_vblank_counter = &rs600_get_vblank_counter, @@ -435,28 +341,14 @@ static struct radeon_asic rv515_asic = { .fini = &rv515_fini, .suspend = &rv515_suspend, .resume = &rv515_resume, - .errata = NULL, - .vram_info = NULL, .vga_set_state = &r100_vga_set_state, .gpu_reset = &rv515_gpu_reset, - .mc_init = NULL, - .mc_fini = NULL, - .wb_init = NULL, - .wb_fini = NULL, - .gart_init = &rv370_pcie_gart_init, - .gart_fini = &rv370_pcie_gart_fini, - .gart_enable = NULL, - .gart_disable = NULL, .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, .gart_set_page = &rv370_pcie_gart_set_page, - .cp_init = NULL, - .cp_fini = NULL, - .cp_disable = NULL, .cp_commit = &r100_cp_commit, .ring_start = &rv515_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, - .ib_test = NULL, .irq_set = &rs600_irq_set, .irq_process = &rs600_irq_process, .get_vblank_counter = &rs600_get_vblank_counter, @@ -485,28 +377,14 @@ static struct radeon_asic r520_asic = { .fini = &rv515_fini, .suspend = &rv515_suspend, .resume = &r520_resume, - .errata = NULL, - .vram_info = NULL, .vga_set_state = &r100_vga_set_state, .gpu_reset = &rv515_gpu_reset, - .mc_init = NULL, - .mc_fini = NULL, - .wb_init = NULL, - .wb_fini = NULL, - .gart_init = NULL, - .gart_fini = NULL, - .gart_enable = NULL, - .gart_disable = NULL, .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, .gart_set_page = &rv370_pcie_gart_set_page, - .cp_init = NULL, - .cp_fini = NULL, - .cp_disable = NULL, .cp_commit = &r100_cp_commit, .ring_start = &rv515_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, - .ib_test = NULL, .irq_set = &rs600_irq_set, .irq_process = &rs600_irq_process, .get_vblank_counter = &rs600_get_vblank_counter, @@ -554,37 +432,23 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t offset, uint32_t obj_size); int r600_clear_surface_reg(struct radeon_device *rdev, int reg); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); -int r600_ib_test(struct radeon_device *rdev); int r600_ring_test(struct radeon_device *rdev); int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); static struct radeon_asic r600_asic = { - .errata = NULL, .init = &r600_init, .fini = &r600_fini, .suspend = &r600_suspend, .resume = &r600_resume, .cp_commit = &r600_cp_commit, - .vram_info = NULL, .vga_set_state = &r600_vga_set_state, .gpu_reset = &r600_gpu_reset, - .mc_init = NULL, - .mc_fini = NULL, - .wb_init = &r600_wb_init, - .wb_fini = &r600_wb_fini, - .gart_enable = NULL, - .gart_disable = NULL, .gart_tlb_flush = &r600_pcie_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, - .cp_init = NULL, - .cp_fini = NULL, - .cp_disable = NULL, - .ring_start = NULL, .ring_test = &r600_ring_test, .ring_ib_execute = &r600_ring_ib_execute, - .ib_test = &r600_ib_test, .irq_set = &r600_irq_set, .irq_process = &r600_irq_process, .fence_ring_emit = &r600_fence_ring_emit, @@ -611,30 +475,17 @@ int rv770_resume(struct radeon_device *rdev); int rv770_gpu_reset(struct radeon_device *rdev); static struct radeon_asic rv770_asic = { - .errata = NULL, .init = &rv770_init, .fini = &rv770_fini, .suspend = &rv770_suspend, .resume = &rv770_resume, .cp_commit = &r600_cp_commit, - .vram_info = NULL, .gpu_reset = &rv770_gpu_reset, .vga_set_state = &r600_vga_set_state, - .mc_init = NULL, - .mc_fini = NULL, - .wb_init = &r600_wb_init, - .wb_fini = &r600_wb_fini, - .gart_enable = NULL, - .gart_disable = NULL, .gart_tlb_flush = &r600_pcie_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, - .cp_init = NULL, - .cp_fini = NULL, - .cp_disable = NULL, - .ring_start = NULL, .ring_test = &r600_ring_test, .ring_ib_execute = &r600_ring_ib_execute, - .ib_test = &r600_ib_test, .irq_set = &r600_irq_set, .irq_process = &r600_irq_process, .fence_ring_emit = &r600_fence_ring_emit, diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 96e37a6e7ce..34a9b911951 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -33,12 +33,50 @@ /* * BIOS. */ + +/* If you boot an IGP board with a discrete card as the primary, + * the IGP rom is not accessible via the rom bar as the IGP rom is + * part of the system bios. On boot, the system bios puts a + * copy of the igp rom at the start of vram if a discrete card is + * present. + */ +static bool igp_read_bios_from_vram(struct radeon_device *rdev) +{ + uint8_t __iomem *bios; + resource_size_t vram_base; + resource_size_t size = 256 * 1024; /* ??? */ + + rdev->bios = NULL; + vram_base = drm_get_resource_start(rdev->ddev, 0); + bios = ioremap(vram_base, size); + if (!bios) { + DRM_ERROR("Unable to mmap vram\n"); + return false; + } + + if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + iounmap(bios); + DRM_ERROR("bad rom signature\n"); + return false; + } + rdev->bios = kmalloc(size, GFP_KERNEL); + if (rdev->bios == NULL) { + iounmap(bios); + DRM_ERROR("kmalloc failed\n"); + return false; + } + memcpy(rdev->bios, bios, size); + iounmap(bios); + return true; +} + static bool radeon_read_bios(struct radeon_device *rdev) { uint8_t __iomem *bios; size_t size; rdev->bios = NULL; + /* XXX: some cards may return 0 for rom size? ddx has a workaround */ bios = pci_map_rom(rdev->pdev, &size); if (!bios) { return false; @@ -341,7 +379,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) static bool radeon_read_disabled_bios(struct radeon_device *rdev) { - if (rdev->family >= CHIP_RV770) + if (rdev->flags & RADEON_IS_IGP) + return igp_read_bios_from_vram(rdev); + else if (rdev->family >= CHIP_RV770) return r700_read_disabled_bios(rdev); else if (rdev->family >= CHIP_R600) return r600_read_disabled_bios(rdev); @@ -356,7 +396,12 @@ bool radeon_get_bios(struct radeon_device *rdev) bool r; uint16_t tmp; - r = radeon_read_bios(rdev); + if (rdev->flags & RADEON_IS_IGP) { + r = igp_read_bios_from_vram(rdev); + if (r == false) + r = radeon_read_bios(rdev); + } else + r = radeon_read_bios(rdev); if (r == false) { r = radeon_read_disabled_bios(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 152eef13197..f5c32a766b1 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c @@ -411,7 +411,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) R300_PIXCLK_TRANS_ALWAYS_ONb | R300_PIXCLK_TVO_ALWAYS_ONb | R300_P2G2CLK_ALWAYS_ONb | - R300_P2G2CLK_ALWAYS_ONb); + R300_P2G2CLK_DAC_ALWAYS_ONb); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); } else if (rdev->family >= CHIP_RV350) { tmp = RREG32_PLL(R300_SCLK_CNTL2); @@ -464,7 +464,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) R300_PIXCLK_TRANS_ALWAYS_ONb | R300_PIXCLK_TVO_ALWAYS_ONb | R300_P2G2CLK_ALWAYS_ONb | - R300_P2G2CLK_ALWAYS_ONb); + R300_P2G2CLK_DAC_ALWAYS_ONb); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); tmp = RREG32_PLL(RADEON_MCLK_MISC); @@ -654,7 +654,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) R300_PIXCLK_TRANS_ALWAYS_ONb | R300_PIXCLK_TVO_ALWAYS_ONb | R300_P2G2CLK_ALWAYS_ONb | - R300_P2G2CLK_ALWAYS_ONb | + R300_P2G2CLK_DAC_ALWAYS_ONb | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); } else if (rdev->family >= CHIP_RV350) { @@ -705,7 +705,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) R300_PIXCLK_TRANS_ALWAYS_ONb | R300_PIXCLK_TVO_ALWAYS_ONb | R300_P2G2CLK_ALWAYS_ONb | - R300_P2G2CLK_ALWAYS_ONb | + R300_P2G2CLK_DAC_ALWAYS_ONb | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); } else { diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ec835d56d30..3d667031de6 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -322,10 +322,6 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RV380: rdev->asic = &r300_asic; if (rdev->flags & RADEON_IS_PCIE) { - rdev->asic->gart_init = &rv370_pcie_gart_init; - rdev->asic->gart_fini = &rv370_pcie_gart_fini; - rdev->asic->gart_enable = &rv370_pcie_gart_enable; - rdev->asic->gart_disable = &rv370_pcie_gart_disable; rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; } @@ -485,7 +481,6 @@ void radeon_combios_fini(struct radeon_device *rdev) static unsigned int radeon_vga_set_decode(void *cookie, bool state) { struct radeon_device *rdev = cookie; - radeon_vga_set_state(rdev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | @@ -493,6 +488,29 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) else return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } + +void radeon_agp_disable(struct radeon_device *rdev) +{ + rdev->flags &= ~RADEON_IS_AGP; + if (rdev->family >= CHIP_R600) { + DRM_INFO("Forcing AGP to PCIE mode\n"); + rdev->flags |= RADEON_IS_PCIE; + } else if (rdev->family >= CHIP_RV515 || + rdev->family == CHIP_RV380 || + rdev->family == CHIP_RV410 || + rdev->family == CHIP_R423) { + DRM_INFO("Forcing AGP to PCIE mode\n"); + rdev->flags |= RADEON_IS_PCIE; + rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; + } else { + DRM_INFO("Forcing AGP to PCI mode\n"); + rdev->flags |= RADEON_IS_PCI; + rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart_set_page = &r100_pci_gart_set_page; + } +} + /* * Radeon device. */ @@ -531,32 +549,7 @@ int radeon_device_init(struct radeon_device *rdev, } if (radeon_agpmode == -1) { - rdev->flags &= ~RADEON_IS_AGP; - if (rdev->family >= CHIP_R600) { - DRM_INFO("Forcing AGP to PCIE mode\n"); - rdev->flags |= RADEON_IS_PCIE; - } else if (rdev->family >= CHIP_RV515 || - rdev->family == CHIP_RV380 || - rdev->family == CHIP_RV410 || - rdev->family == CHIP_R423) { - DRM_INFO("Forcing AGP to PCIE mode\n"); - rdev->flags |= RADEON_IS_PCIE; - rdev->asic->gart_init = &rv370_pcie_gart_init; - rdev->asic->gart_fini = &rv370_pcie_gart_fini; - rdev->asic->gart_enable = &rv370_pcie_gart_enable; - rdev->asic->gart_disable = &rv370_pcie_gart_disable; - rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; - rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; - } else { - DRM_INFO("Forcing AGP to PCI mode\n"); - rdev->flags |= RADEON_IS_PCI; - rdev->asic->gart_init = &r100_pci_gart_init; - rdev->asic->gart_fini = &r100_pci_gart_fini; - rdev->asic->gart_enable = &r100_pci_gart_enable; - rdev->asic->gart_disable = &r100_pci_gart_disable; - rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; - rdev->asic->gart_set_page = &r100_pci_gart_set_page; - } + radeon_agp_disable(rdev); } /* set DMA mask + need_dma32 flags. @@ -588,111 +581,27 @@ int radeon_device_init(struct radeon_device *rdev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); - rdev->new_init_path = false; - r = radeon_init(rdev); - if (r) { - return r; - } - /* if we have > 1 VGA cards, then disable the radeon VGA resources */ r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); if (r) { return -EINVAL; } - if (!rdev->new_init_path) { - /* Setup errata flags */ - radeon_errata(rdev); - /* Initialize scratch registers */ - radeon_scratch_init(rdev); - /* Initialize surface registers */ - radeon_surface_init(rdev); - - /* BIOS*/ - if (!radeon_get_bios(rdev)) { - if (ASIC_IS_AVIVO(rdev)) - return -EINVAL; - } - if (rdev->is_atom_bios) { - r = radeon_atombios_init(rdev); - if (r) { - return r; - } - } else { - r = radeon_combios_init(rdev); - if (r) { - return r; - } - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - /* FIXME: what do we want to do here ? */ - } - /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - if (rdev->is_atom_bios) { - atom_asic_init(rdev->mode_info.atom_context); - } else { - radeon_combios_asic_init(rdev->ddev); - } - } - /* Get clock & vram information */ - radeon_get_clock_info(rdev->ddev); - radeon_vram_info(rdev); - /* Initialize clocks */ - r = radeon_clocks_init(rdev); - if (r) { - return r; - } + r = radeon_init(rdev); + if (r) + return r; - /* Initialize memory controller (also test AGP) */ - r = radeon_mc_init(rdev); - if (r) { - return r; - } - /* Fence driver */ - r = radeon_fence_driver_init(rdev); - if (r) { - return r; - } - r = radeon_irq_kms_init(rdev); - if (r) { - return r; - } - /* Memory manager */ - r = radeon_object_init(rdev); - if (r) { - return r; - } - r = radeon_gpu_gart_init(rdev); + if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { + /* Acceleration not working on AGP card try again + * with fallback to PCI or PCIE GART + */ + radeon_gpu_reset(rdev); + radeon_fini(rdev); + radeon_agp_disable(rdev); + r = radeon_init(rdev); if (r) return r; - /* Initialize GART (initialize after TTM so we can allocate - * memory through TTM but finalize after TTM) */ - r = radeon_gart_enable(rdev); - if (r) - return 0; - r = radeon_gem_init(rdev); - if (r) - return 0; - - /* 1M ring buffer */ - r = radeon_cp_init(rdev, 1024 * 1024); - if (r) - return 0; - r = radeon_wb_init(rdev); - if (r) - DRM_ERROR("radeon: failled initializing WB (%d).\n", r); - r = radeon_ib_pool_init(rdev); - if (r) - return 0; - r = radeon_ib_test(rdev); - if (r) - return 0; - rdev->accel_working = true; } - DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); if (radeon_testing) { radeon_test_moves(rdev); } @@ -706,32 +615,8 @@ void radeon_device_fini(struct radeon_device *rdev) { DRM_INFO("radeon: finishing device.\n"); rdev->shutdown = true; - /* Order matter so becarefull if you rearrange anythings */ - if (!rdev->new_init_path) { - radeon_ib_pool_fini(rdev); - radeon_cp_fini(rdev); - radeon_wb_fini(rdev); - radeon_gpu_gart_fini(rdev); - radeon_gem_fini(rdev); - radeon_mc_fini(rdev); -#if __OS_HAS_AGP - radeon_agp_fini(rdev); -#endif - radeon_irq_kms_fini(rdev); - vga_client_register(rdev->pdev, NULL, NULL, NULL); - radeon_fence_driver_fini(rdev); - radeon_clocks_fini(rdev); - radeon_object_fini(rdev); - if (rdev->is_atom_bios) { - radeon_atombios_fini(rdev); - } else { - radeon_combios_fini(rdev); - } - kfree(rdev->bios); - rdev->bios = NULL; - } else { - radeon_fini(rdev); - } + radeon_fini(rdev); + vga_client_register(rdev->pdev, NULL, NULL, NULL); iounmap(rdev->rmmio); rdev->rmmio = NULL; } @@ -771,14 +656,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) radeon_save_bios_scratch_regs(rdev); - if (!rdev->new_init_path) { - radeon_cp_disable(rdev); - radeon_gart_disable(rdev); - rdev->irq.sw_int = false; - radeon_irq_set(rdev); - } else { - radeon_suspend(rdev); - } + radeon_suspend(rdev); /* evict remaining vram memory */ radeon_object_evict_vram(rdev); @@ -797,7 +675,6 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) int radeon_resume_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; - int r; acquire_console_sem(); pci_set_power_state(dev->pdev, PCI_D0); @@ -807,43 +684,7 @@ int radeon_resume_kms(struct drm_device *dev) return -1; } pci_set_master(dev->pdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (!rdev->new_init_path) { - if (radeon_gpu_reset(rdev)) { - /* FIXME: what do we want to do here ? */ - } - /* post card */ - if (rdev->is_atom_bios) { - atom_asic_init(rdev->mode_info.atom_context); - } else { - radeon_combios_asic_init(rdev->ddev); - } - /* Initialize clocks */ - r = radeon_clocks_init(rdev); - if (r) { - release_console_sem(); - return r; - } - /* Enable IRQ */ - rdev->irq.sw_int = true; - radeon_irq_set(rdev); - /* Initialize GPU Memory Controller */ - r = radeon_mc_init(rdev); - if (r) { - goto out; - } - r = radeon_gart_enable(rdev); - if (r) { - goto out; - } - r = radeon_cp_init(rdev, rdev->cp.ring_size); - if (r) { - goto out; - } - } else { - radeon_resume(rdev); - } -out: + radeon_resume(rdev); radeon_restore_bios_scratch_regs(rdev); fb_set_suspend(rdev->fbdev_info, 0); release_console_sem(); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5d8141b1376..3655d91993a 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -106,24 +106,33 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) legacy_crtc_load_lut(crtc); } -/** Sets the color ramps on behalf of RandR */ +/** Sets the color ramps on behalf of fbcon */ void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - if (regno == 0) - DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id); radeon_crtc->lut_r[regno] = red >> 6; radeon_crtc->lut_g[regno] = green >> 6; radeon_crtc->lut_b[regno] = blue >> 6; } +/** Gets the color ramps on behalf of fbcon */ +void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + + *red = radeon_crtc->lut_r[regno] << 6; + *green = radeon_crtc->lut_g[regno] << 6; + *blue = radeon_crtc->lut_b[regno] << 6; +} + static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int i, j; + int i; if (size != 256) { return; @@ -132,23 +141,11 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, return; } - if (crtc->fb->depth == 16) { - for (i = 0; i < 64; i++) { - if (i <= 31) { - for (j = 0; j < 8; j++) { - radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6; - radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6; - } - } - for (j = 0; j < 4; j++) - radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6; - } - } else { - for (i = 0; i < 256; i++) { - radeon_crtc->lut_r[i] = red[i] >> 6; - radeon_crtc->lut_g[i] = green[i] >> 6; - radeon_crtc->lut_b[i] = blue[i] >> 6; - } + /* userspace palettes are always correct as is */ + for (i = 0; i < 256; i++) { + radeon_crtc->lut_r[i] = red[i] >> 6; + radeon_crtc->lut_g[i] = green[i] >> 6; + radeon_crtc->lut_b[i] = blue[i] >> 6; } radeon_crtc_load_lut(crtc); @@ -724,7 +721,11 @@ int radeon_modeset_init(struct radeon_device *rdev) if (ret) { return ret; } - /* allocate crtcs - TODO single crtc */ + + if (rdev->flags & RADEON_SINGLE_CRTC) + num_crtc = 1; + + /* allocate crtcs */ for (i = 0; i < num_crtc; i++) { radeon_crtc_init(rdev->ddev, i); } diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 621646752cd..a65ab1a0dad 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1345,6 +1345,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) { + struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; @@ -1364,7 +1365,10 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su return; encoder = &radeon_encoder->base; - encoder->possible_crtcs = 0x3; + if (rdev->flags & RADEON_SINGLE_CRTC) + encoder->possible_crtcs = 0x1; + else + encoder->possible_crtcs = 0x3; encoder->possible_clones = 0; radeon_encoder->enc_priv = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 1ba704eedef..b38c4c8e2c6 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -55,6 +55,7 @@ static struct fb_ops radeonfb_ops = { .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, }; /** @@ -123,6 +124,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { .gamma_set = radeon_crtc_fb_gamma_set, + .gamma_get = radeon_crtc_fb_gamma_get, }; int radeonfb_create(struct drm_device *dev, @@ -146,9 +148,15 @@ int radeonfb_create(struct drm_device *dev, unsigned long tmp; bool fb_tiled = false; /* useful for testing */ u32 tiling_flags = 0; + int crtc_count; mode_cmd.width = surface_width; mode_cmd.height = surface_height; + + /* avivo can't scanout real 24bpp */ + if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) + surface_bpp = 32; + mode_cmd.bpp = surface_bpp; /* need to align pitch with crtc limits */ mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); @@ -217,7 +225,11 @@ int radeonfb_create(struct drm_device *dev, rfbdev = info->par; rfbdev->helper.funcs = &radeon_fb_helper_funcs; rfbdev->helper.dev = dev; - ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2, + if (rdev->flags & RADEON_SINGLE_CRTC) + crtc_count = 1; + else + crtc_count = 2; + ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, RADEONFB_CONN_LIMIT); if (ret) goto out_unref; @@ -234,7 +246,7 @@ int radeonfb_create(struct drm_device *dev, strcpy(info->fix.id, "radeondrmfb"); - drm_fb_helper_fill_fix(info, fb->pitch); + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); info->flags = FBINFO_DEFAULT; info->fbops = &radeonfb_ops; @@ -309,7 +321,7 @@ int radeon_parse_options(char *options) int radeonfb_probe(struct drm_device *dev) { - return drm_fb_helper_single_fb_probe(dev, &radeonfb_create); + return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); } int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 1841145a7c4..8e0a8759e42 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -83,8 +83,12 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) int radeon_irq_kms_init(struct radeon_device *rdev) { int r = 0; + int num_crtc = 2; - r = drm_vblank_init(rdev->ddev, 2); + if (rdev->flags & RADEON_SINGLE_CRTC) + num_crtc = 1; + + r = drm_vblank_init(rdev->ddev, num_crtc); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 2b997a15fb1..36410f85d70 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1053,6 +1053,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = { .mode_set_base = radeon_crtc_set_base, .prepare = radeon_crtc_prepare, .commit = radeon_crtc_commit, + .load_lut = radeon_crtc_load_lut, }; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index b1547f700d7..6ceb958fd19 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -881,7 +881,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, R420_TV_DAC_DACADJ_MASK | R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | - R420_TV_DAC_GDACPD | + R420_TV_DAC_BDACPD | R420_TV_DAC_TVENABLE); } else { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | @@ -889,7 +889,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, RADEON_TV_DAC_DACADJ_MASK | RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | - RADEON_TV_DAC_GDACPD); + RADEON_TV_DAC_BDACPD); } /* FIXME TV */ @@ -1318,7 +1318,10 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t return; encoder = &radeon_encoder->base; - encoder->possible_crtcs = 0x3; + if (rdev->flags & RADEON_SINGLE_CRTC) + encoder->possible_crtcs = 0x1; + else + encoder->possible_crtcs = 0x3; encoder->possible_clones = 0; radeon_encoder->enc_priv = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 570a58729da..e61226817cc 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -407,6 +407,8 @@ extern void radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); +extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno); struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 73af463b7a5..1f056dadc5c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -400,11 +400,9 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj, int radeon_object_list_reserve(struct list_head *head) { struct radeon_object_list *lobj; - struct list_head *i; int r; - list_for_each(i, head) { - lobj = list_entry(i, struct radeon_object_list, list); + list_for_each_entry(lobj, head, list){ if (!lobj->robj->pin_count) { r = radeon_object_reserve(lobj->robj, true); if (unlikely(r != 0)) { @@ -420,13 +418,10 @@ int radeon_object_list_reserve(struct list_head *head) void radeon_object_list_unreserve(struct list_head *head) { struct radeon_object_list *lobj; - struct list_head *i; - list_for_each(i, head) { - lobj = list_entry(i, struct radeon_object_list, list); + list_for_each_entry(lobj, head, list) { if (!lobj->robj->pin_count) { radeon_object_unreserve(lobj->robj); - } else { } } } @@ -436,7 +431,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) struct radeon_object_list *lobj; struct radeon_object *robj; struct radeon_fence *old_fence = NULL; - struct list_head *i; int r; r = radeon_object_list_reserve(head); @@ -444,8 +438,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence) radeon_object_list_unreserve(head); return r; } - list_for_each(i, head) { - lobj = list_entry(i, struct radeon_object_list, list); + list_for_each_entry(lobj, head, list) { robj = lobj->robj; if (!robj->pin_count) { if (lobj->wdomain) { @@ -482,10 +475,8 @@ void radeon_object_list_unvalidate(struct list_head *head) { struct radeon_object_list *lobj; struct radeon_fence *old_fence = NULL; - struct list_head *i; - list_for_each(i, head) { - lobj = list_entry(i, struct radeon_object_list, list); + list_for_each_entry(lobj, head, list) { old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; lobj->robj->tobj.sync_obj = NULL; if (old_fence) { diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h new file mode 100644 index 00000000000..48a913a06cf --- /dev/null +++ b/drivers/gpu/drm/radeon/rs100d.h @@ -0,0 +1,40 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef __RS100D_H__ +#define __RS100D_H__ + +/* Registers */ +#define R_00015C_NB_TOM 0x00015C +#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) +#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) +#define C_00015C_MC_FB_START 0xFFFF0000 +#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) +#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_00015C_MC_FB_TOP 0x0000FFFF + +#endif diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index a3fbdad938c..a769c296f6a 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -27,27 +27,12 @@ */ #include <linux/seq_file.h> #include <drm/drmP.h> -#include "radeon_reg.h" #include "radeon.h" +#include "rs400d.h" -/* rs400,rs480 depends on : */ -void r100_hdp_reset(struct radeon_device *rdev); -void r100_mc_disable_clients(struct radeon_device *rdev); -int r300_mc_wait_for_idle(struct radeon_device *rdev); -void r420_pipes_init(struct radeon_device *rdev); +/* This files gather functions specifics to : rs400,rs480 */ +static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); -/* This files gather functions specifics to : - * rs400,rs480 - * - * Some of these functions might be used by newer ASICs. - */ -void rs400_gpu_init(struct radeon_device *rdev); -int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); - - -/* - * GART functions. - */ void rs400_gart_adjust_size(struct radeon_device *rdev) { /* Check gart size */ @@ -238,61 +223,6 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } - -/* - * MC functions. - */ -int rs400_mc_init(struct radeon_device *rdev) -{ - uint32_t tmp; - int r; - - if (r100_debugfs_rbbm_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for RBBM !\n"); - } - - rs400_gpu_init(rdev); - rs400_gart_disable(rdev); - rdev->mc.gtt_location = rdev->mc.mc_vram_size; - rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); - rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); - r = radeon_mc_setup(rdev); - if (r) { - return r; - } - - r100_mc_disable_clients(rdev); - if (r300_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); - } - - tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; - tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); - tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); - WREG32(RADEON_MC_FB_LOCATION, tmp); - tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS; - WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); - (void)RREG32(RADEON_HOST_PATH_CNTL); - WREG32(RADEON_HOST_PATH_CNTL, tmp); - (void)RREG32(RADEON_HOST_PATH_CNTL); - - return 0; -} - -void rs400_mc_fini(struct radeon_device *rdev) -{ -} - - -/* - * Global GPU functions - */ -void rs400_errata(struct radeon_device *rdev) -{ - rdev->pll_errata = 0; -} - void rs400_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs400 ? */ @@ -305,10 +235,6 @@ void rs400_gpu_init(struct radeon_device *rdev) } } - -/* - * VRAM info. - */ void rs400_vram_info(struct radeon_device *rdev) { rs400_gart_adjust_size(rdev); @@ -319,10 +245,6 @@ void rs400_vram_info(struct radeon_device *rdev) r100_vram_init_sizes(rdev); } - -/* - * Indirect registers accessor - */ uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) { uint32_t r; @@ -340,10 +262,6 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) WREG32(RS480_NB_MC_INDEX, 0xff); } - -/* - * Debugfs info - */ #if defined(CONFIG_DEBUG_FS) static int rs400_debugfs_gart_info(struct seq_file *m, void *data) { @@ -419,7 +337,7 @@ static struct drm_info_list rs400_gart_info_list[] = { }; #endif -int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) +static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); @@ -427,3 +345,188 @@ int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) return 0; #endif } + +static int rs400_mc_init(struct radeon_device *rdev) +{ + int r; + u32 tmp; + + /* Setup GPU memory space */ + tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); + rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; + rdev->mc.gtt_location = 0xFFFFFFFFUL; + r = radeon_mc_setup(rdev); + if (r) + return r; + return 0; +} + +void rs400_mc_program(struct radeon_device *rdev) +{ + struct r100_mc_save save; + + /* Stops all mc clients */ + r100_mc_stop(rdev, &save); + + /* Wait for mc idle */ + if (r300_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); + WREG32(R_000148_MC_FB_LOCATION, + S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | + S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); + + r100_mc_resume(rdev, &save); +} + +static int rs400_startup(struct radeon_device *rdev) +{ + int r; + + rs400_mc_program(rdev); + /* Resume clock */ + r300_clock_startup(rdev); + /* Initialize GPU configuration (# pipes, ...) */ + rs400_gpu_init(rdev); + /* Initialize GART (initialize after TTM so we can allocate + * memory through TTM but finalize after TTM) */ + r = rs400_gart_enable(rdev); + if (r) + return r; + /* Enable IRQ */ + rdev->irq.sw_int = true; + r100_irq_set(rdev); + /* 1M ring buffer */ + r = r100_cp_init(rdev, 1024 * 1024); + if (r) { + dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + return r; + } + r = r100_wb_init(rdev); + if (r) + dev_err(rdev->dev, "failled initializing WB (%d).\n", r); + r = r100_ib_init(rdev); + if (r) { + dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + return r; + } + return 0; +} + +int rs400_resume(struct radeon_device *rdev) +{ + /* Make sur GART are not working */ + rs400_gart_disable(rdev); + /* Resume clock before doing reset */ + r300_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* post */ + radeon_combios_asic_init(rdev->ddev); + /* Resume clock after posting */ + r300_clock_startup(rdev); + return rs400_startup(rdev); +} + +int rs400_suspend(struct radeon_device *rdev) +{ + r100_cp_disable(rdev); + r100_wb_disable(rdev); + r100_irq_disable(rdev); + rs400_gart_disable(rdev); + return 0; +} + +void rs400_fini(struct radeon_device *rdev) +{ + rs400_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + radeon_gem_fini(rdev); + rs400_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + radeon_fence_driver_fini(rdev); + radeon_object_fini(rdev); + radeon_atombios_fini(rdev); + kfree(rdev->bios); + rdev->bios = NULL; +} + +int rs400_init(struct radeon_device *rdev) +{ + int r; + + /* Disable VGA */ + r100_vga_render_disable(rdev); + /* Initialize scratch registers */ + radeon_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); + /* TODO: disable VGA need to use VGA request */ + /* BIOS*/ + if (!radeon_get_bios(rdev)) { + if (ASIC_IS_AVIVO(rdev)) + return -EINVAL; + } + if (rdev->is_atom_bios) { + dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); + return -EINVAL; + } else { + r = radeon_combios_init(rdev); + if (r) + return r; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* check if cards are posted or not */ + if (!radeon_card_posted(rdev) && rdev->bios) { + DRM_INFO("GPU not posted. posting now...\n"); + radeon_combios_asic_init(rdev->ddev); + } + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); + /* Get vram informations */ + rs400_vram_info(rdev); + /* Initialize memory controller (also test AGP) */ + r = rs400_mc_init(rdev); + if (r) + return r; + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + /* Memory manager */ + r = radeon_object_init(rdev); + if (r) + return r; + r = rs400_gart_init(rdev); + if (r) + return r; + r300_set_reg_safe(rdev); + rdev->accel_working = true; + r = rs400_startup(rdev); + if (r) { + /* Somethings want wront with the accel init stop accel */ + dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs400_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + rs400_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + rdev->accel_working = false; + } + return 0; +} diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h new file mode 100644 index 00000000000..6d8bac58ced --- /dev/null +++ b/drivers/gpu/drm/radeon/rs400d.h @@ -0,0 +1,160 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef __RS400D_H__ +#define __RS400D_H__ + +/* Registers */ +#define R_000148_MC_FB_LOCATION 0x000148 +#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) +#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) +#define C_000148_MC_FB_START 0xFFFF0000 +#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) +#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_000148_MC_FB_TOP 0x0000FFFF +#define R_00015C_NB_TOM 0x00015C +#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) +#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) +#define C_00015C_MC_FB_START 0xFFFF0000 +#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) +#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_00015C_MC_FB_TOP 0x0000FFFF +#define R_0007C0_CP_STAT 0x0007C0 +#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) +#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) +#define C_0007C0_MRU_BUSY 0xFFFFFFFE +#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) +#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) +#define C_0007C0_MWU_BUSY 0xFFFFFFFD +#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) +#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) +#define C_0007C0_RSIU_BUSY 0xFFFFFFFB +#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) +#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) +#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 +#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) +#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) +#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF +#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) +#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) +#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF +#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) +#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) +#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF +#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) +#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) +#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF +#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) +#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) +#define C_0007C0_CSI_BUSY 0xFFFFDFFF +#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) +#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) +#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF +#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) +#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) +#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF +#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) +#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) +#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF +#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) +#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) +#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF +#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) +#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) +#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF +#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) +#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) +#define C_0007C0_CP_BUSY 0x7FFFFFFF +#define R_000E40_RBBM_STATUS 0x000E40 +#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) +#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) +#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 +#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) +#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) +#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF +#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) +#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) +#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF +#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) +#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) +#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF +#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) +#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) +#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF +#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) +#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) +#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF +#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) +#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) +#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF +#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) +#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) +#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF +#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) +#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) +#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF +#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) +#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) +#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF +#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) +#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) +#define C_000E40_E2_BUSY 0xFFFDFFFF +#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) +#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) +#define C_000E40_RB2D_BUSY 0xFFFBFFFF +#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) +#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) +#define C_000E40_RB3D_BUSY 0xFFF7FFFF +#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) +#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) +#define C_000E40_VAP_BUSY 0xFFEFFFFF +#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) +#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) +#define C_000E40_RE_BUSY 0xFFDFFFFF +#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) +#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) +#define C_000E40_TAM_BUSY 0xFFBFFFFF +#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) +#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) +#define C_000E40_TDM_BUSY 0xFF7FFFFF +#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) +#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) +#define C_000E40_PB_BUSY 0xFEFFFFFF +#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) +#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) +#define C_000E40_TIM_BUSY 0xFDFFFFFF +#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) +#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) +#define C_000E40_GA_BUSY 0xFBFFFFFF +#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) +#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) +#define C_000E40_CBA2D_BUSY 0xF7FFFFFF +#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) +#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) +#define C_000E40_GUI_ACTIVE 0x7FFFFFFF + +#endif diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 4a4fe1cb131..10dfa78762d 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -25,27 +25,26 @@ * Alex Deucher * Jerome Glisse */ +/* RS600 / Radeon X1250/X1270 integrated GPU + * + * This file gather function specific to RS600 which is the IGP of + * the X1250/X1270 family supporting intel CPU (while RS690/RS740 + * is the X1250/X1270 supporting AMD CPU). The display engine are + * the avivo one, bios is an atombios, 3D block are the one of the + * R4XX family. The GART is different from the RS400 one and is very + * close to the one of the R600 family (R600 likely being an evolution + * of the RS600 GART block). + */ #include "drmP.h" -#include "radeon_reg.h" #include "radeon.h" +#include "atom.h" +#include "rs600d.h" #include "rs600_reg_safe.h" -/* rs600 depends on : */ -void r100_hdp_reset(struct radeon_device *rdev); -int r100_gui_wait_for_idle(struct radeon_device *rdev); -int r300_mc_wait_for_idle(struct radeon_device *rdev); -void r420_pipes_init(struct radeon_device *rdev); - -/* This files gather functions specifics to : - * rs600 - * - * Some of these functions might be used by newer ASICs. - */ void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); - /* * GART. */ @@ -53,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) { uint32_t tmp; - tmp = RREG32_MC(RS600_MC_PT0_CNTL); - tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); - WREG32_MC(RS600_MC_PT0_CNTL, tmp); + tmp = RREG32_MC(R_000100_MC_PT0_CNTL); + tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; + WREG32_MC(R_000100_MC_PT0_CNTL, tmp); - tmp = RREG32_MC(RS600_MC_PT0_CNTL); - tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; - WREG32_MC(RS600_MC_PT0_CNTL, tmp); + tmp = RREG32_MC(R_000100_MC_PT0_CNTL); + tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); + WREG32_MC(R_000100_MC_PT0_CNTL, tmp); - tmp = RREG32_MC(RS600_MC_PT0_CNTL); - tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); - WREG32_MC(RS600_MC_PT0_CNTL, tmp); - tmp = RREG32_MC(RS600_MC_PT0_CNTL); + tmp = RREG32_MC(R_000100_MC_PT0_CNTL); + tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; + WREG32_MC(R_000100_MC_PT0_CNTL, tmp); + tmp = RREG32_MC(R_000100_MC_PT0_CNTL); } int rs600_gart_init(struct radeon_device *rdev) @@ -86,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev) int rs600_gart_enable(struct radeon_device *rdev) { - uint32_t tmp; + u32 tmp; int r, i; if (rdev->gart.table.vram.robj == NULL) { @@ -96,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; + /* Enable bus master */ + tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; + WREG32(R_00004C_BUS_CNTL, tmp); /* FIXME: setup default page */ - WREG32_MC(RS600_MC_PT0_CNTL, - (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | - RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); + WREG32_MC(R_000100_MC_PT0_CNTL, + (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | + S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); for (i = 0; i < 19; i++) { - WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, - (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | - RS600_SYSTEM_ACCESS_MODE_IN_SYS | - RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | - RS600_EFFECTIVE_L1_CACHE_SIZE(3) | - RS600_ENABLE_FRAGMENT_PROCESSING | - RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); + WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, + S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | + S_00016C_SYSTEM_ACCESS_MODE_MASK( + V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | + S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( + V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | + S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | + S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | + S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); } /* System context map to GART space */ - WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); - tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; - WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp); + WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start); + WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end); /* enable first context */ - WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); - tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; - WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); - WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, - (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); + WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); + WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); + WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, + S_000102_ENABLE_PAGE_TABLE(1) | + S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); /* disable all other contexts */ for (i = 1; i < 8; i++) { - WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); + WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); } /* setup the page table */ - WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, - rdev->gart.table_addr); - WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); + WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, + rdev->gart.table_addr); + WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); /* enable page tables */ - tmp = RREG32_MC(RS600_MC_PT0_CNTL); - WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); - tmp = RREG32_MC(RS600_MC_CNTL1); - WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); + tmp = RREG32_MC(R_000100_MC_PT0_CNTL); + WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); + tmp = RREG32_MC(R_000009_MC_CNTL1); + WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); rs600_gart_tlb_flush(rdev); rdev->gart.ready = true; return 0; @@ -146,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev) uint32_t tmp; /* FIXME: disable out of gart access */ - WREG32_MC(RS600_MC_PT0_CNTL, 0); - tmp = RREG32_MC(RS600_MC_CNTL1); - tmp &= ~RS600_ENABLE_PAGE_TABLES; - WREG32_MC(RS600_MC_CNTL1, tmp); + WREG32_MC(R_000100_MC_PT0_CNTL, 0); + tmp = RREG32_MC(R_000009_MC_CNTL1); + WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); if (rdev->gart.table.vram.robj) { radeon_object_kunmap(rdev->gart.table.vram.robj); radeon_object_unpin(rdev->gart.table.vram.robj); @@ -183,129 +185,61 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } - -/* - * MC. - */ -void rs600_mc_disable_clients(struct radeon_device *rdev) -{ - unsigned tmp; - - if (r100_gui_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait GUI idle while " - "programming pipes. Bad things might happen.\n"); - } - - rv515_vga_render_disable(rdev); - - tmp = RREG32(AVIVO_D1VGA_CONTROL); - WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); - tmp = RREG32(AVIVO_D2VGA_CONTROL); - WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); - - tmp = RREG32(AVIVO_D1CRTC_CONTROL); - WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); - tmp = RREG32(AVIVO_D2CRTC_CONTROL); - WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); - - /* make sure all previous write got through */ - tmp = RREG32(AVIVO_D2CRTC_CONTROL); - - mdelay(1); -} - -int rs600_mc_init(struct radeon_device *rdev) -{ - uint32_t tmp; - int r; - - if (r100_debugfs_rbbm_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for RBBM !\n"); - } - - rs600_gpu_init(rdev); - rs600_gart_disable(rdev); - - /* Setup GPU memory space */ - rdev->mc.vram_location = 0xFFFFFFFFUL; - rdev->mc.gtt_location = 0xFFFFFFFFUL; - r = radeon_mc_setup(rdev); - if (r) { - return r; - } - - /* Program GPU memory space */ - /* Enable bus master */ - tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; - WREG32(RADEON_BUS_CNTL, tmp); - /* FIXME: What does AGP means for such chipset ? */ - WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF); - /* FIXME: are this AGP reg in indirect MC range ? */ - WREG32_MC(RS600_MC_AGP_BASE, 0); - WREG32_MC(RS600_MC_AGP_BASE_2, 0); - rs600_mc_disable_clients(rdev); - if (rs600_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); - } - tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; - tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); - tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); - WREG32_MC(RS600_MC_FB_LOCATION, tmp); - WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); - return 0; -} - -void rs600_mc_fini(struct radeon_device *rdev) -{ -} - - -/* - * Interrupts - */ int rs600_irq_set(struct radeon_device *rdev) { uint32_t tmp = 0; uint32_t mode_int = 0; if (rdev->irq.sw_int) { - tmp |= RADEON_SW_INT_ENABLE; + tmp |= S_000040_SW_INT_EN(1); } if (rdev->irq.crtc_vblank_int[0]) { - mode_int |= AVIVO_D1MODE_INT_MASK; + mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); } if (rdev->irq.crtc_vblank_int[1]) { - mode_int |= AVIVO_D2MODE_INT_MASK; + mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); } - WREG32(RADEON_GEN_INT_CNTL, tmp); - WREG32(AVIVO_DxMODE_INT_MASK, mode_int); + WREG32(R_000040_GEN_INT_CNTL, tmp); + WREG32(R_006540_DxMODE_INT_MASK, mode_int); return 0; } static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) { - uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); - uint32_t irq_mask = RADEON_SW_INT_TEST; - - if (irqs & AVIVO_DISPLAY_INT_STATUS) { - *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); - if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { - WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); + uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); + uint32_t irq_mask = ~C_000044_SW_INT; + + if (G_000044_DISPLAY_INT_STAT(irqs)) { + *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); + if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { + WREG32(R_006534_D1MODE_VBLANK_STATUS, + S_006534_D1MODE_VBLANK_ACK(1)); } - if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { - WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); + if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { + WREG32(R_006D34_D2MODE_VBLANK_STATUS, + S_006D34_D2MODE_VBLANK_ACK(1)); } } else { *r500_disp_int = 0; } if (irqs) { - WREG32(RADEON_GEN_INT_STATUS, irqs); + WREG32(R_000044_GEN_INT_STATUS, irqs); } return irqs & irq_mask; } +void rs600_irq_disable(struct radeon_device *rdev) +{ + u32 tmp; + + WREG32(R_000040_GEN_INT_CNTL, 0); + WREG32(R_006540_DxMODE_INT_MASK, 0); + /* Wait and acknowledge irq */ + mdelay(1); + rs600_irq_ack(rdev, &tmp); +} + int rs600_irq_process(struct radeon_device *rdev) { uint32_t status; @@ -317,16 +251,13 @@ int rs600_irq_process(struct radeon_device *rdev) } while (status || r500_disp_int) { /* SW interrupt */ - if (status & RADEON_SW_INT_TEST) { + if (G_000040_SW_INT_EN(status)) radeon_fence_process(rdev); - } /* Vertical blank interrupts */ - if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { + if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) drm_handle_vblank(rdev->ddev, 0); - } - if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { + if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) drm_handle_vblank(rdev->ddev, 1); - } status = rs600_irq_ack(rdev, &r500_disp_int); } return IRQ_HANDLED; @@ -335,53 +266,34 @@ int rs600_irq_process(struct radeon_device *rdev) u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) { if (crtc == 0) - return RREG32(AVIVO_D1CRTC_FRAME_COUNT); + return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); else - return RREG32(AVIVO_D2CRTC_FRAME_COUNT); + return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); } - -/* - * Global GPU functions - */ int rs600_mc_wait_for_idle(struct radeon_device *rdev) { unsigned i; - uint32_t tmp; for (i = 0; i < rdev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32_MC(RS600_MC_STATUS); - if (tmp & RS600_MC_STATUS_IDLE) { + if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) return 0; - } - DRM_UDELAY(1); + udelay(1); } return -1; } -void rs600_errata(struct radeon_device *rdev) -{ - rdev->pll_errata = 0; -} - void rs600_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs600 ? */ r100_hdp_reset(rdev); - rv515_vga_render_disable(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); - if (rs600_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); - } + /* Wait for mc idle */ + if (rs600_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); } - -/* - * VRAM info. - */ void rs600_vram_info(struct radeon_device *rdev) { /* FIXME: to do or is these values sane ? */ @@ -394,31 +306,206 @@ void rs600_bandwidth_update(struct radeon_device *rdev) /* FIXME: implement, should this be like rs690 ? */ } - -/* - * Indirect registers accessor - */ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) { - uint32_t r; - - WREG32(RS600_MC_INDEX, - ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0)); - r = RREG32(RS600_MC_DATA); - return r; + WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | + S_000070_MC_IND_CITF_ARB0(1)); + return RREG32(R_000074_MC_IND_DATA); } void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { - WREG32(RS600_MC_INDEX, - RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | - ((reg) & RS600_MC_ADDR_MASK)); - WREG32(RS600_MC_DATA, v); + WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | + S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); + WREG32(R_000074_MC_IND_DATA, v); } -int rs600_init(struct radeon_device *rdev) +void rs600_debugfs(struct radeon_device *rdev) +{ + if (r100_debugfs_rbbm_init(rdev)) + DRM_ERROR("Failed to register debugfs file for RBBM !\n"); +} + +void rs600_set_safe_registers(struct radeon_device *rdev) { rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); +} + +static void rs600_mc_program(struct radeon_device *rdev) +{ + struct rv515_mc_save save; + + /* Stops all mc clients */ + rv515_mc_stop(rdev, &save); + + /* Wait for mc idle */ + if (rs600_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); + + /* FIXME: What does AGP means for such chipset ? */ + WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); + WREG32_MC(R_000006_AGP_BASE, 0); + WREG32_MC(R_000007_AGP_BASE_2, 0); + /* Program MC */ + WREG32_MC(R_000004_MC_FB_LOCATION, + S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | + S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); + WREG32(R_000134_HDP_FB_LOCATION, + S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); + + rv515_mc_resume(rdev, &save); +} + +static int rs600_startup(struct radeon_device *rdev) +{ + int r; + + rs600_mc_program(rdev); + /* Resume clock */ + rv515_clock_startup(rdev); + /* Initialize GPU configuration (# pipes, ...) */ + rs600_gpu_init(rdev); + /* Initialize GART (initialize after TTM so we can allocate + * memory through TTM but finalize after TTM) */ + r = rs600_gart_enable(rdev); + if (r) + return r; + /* Enable IRQ */ + rdev->irq.sw_int = true; + rs600_irq_set(rdev); + /* 1M ring buffer */ + r = r100_cp_init(rdev, 1024 * 1024); + if (r) { + dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + return r; + } + r = r100_wb_init(rdev); + if (r) + dev_err(rdev->dev, "failled initializing WB (%d).\n", r); + r = r100_ib_init(rdev); + if (r) { + dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + return r; + } + return 0; +} + +int rs600_resume(struct radeon_device *rdev) +{ + /* Make sur GART are not working */ + rs600_gart_disable(rdev); + /* Resume clock before doing reset */ + rv515_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* post */ + atom_asic_init(rdev->mode_info.atom_context); + /* Resume clock after posting */ + rv515_clock_startup(rdev); + return rs600_startup(rdev); +} + +int rs600_suspend(struct radeon_device *rdev) +{ + r100_cp_disable(rdev); + r100_wb_disable(rdev); + rs600_irq_disable(rdev); + rs600_gart_disable(rdev); + return 0; +} + +void rs600_fini(struct radeon_device *rdev) +{ + rs600_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + radeon_gem_fini(rdev); + rs600_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + radeon_fence_driver_fini(rdev); + radeon_object_fini(rdev); + radeon_atombios_fini(rdev); + kfree(rdev->bios); + rdev->bios = NULL; +} + +int rs600_init(struct radeon_device *rdev) +{ + int r; + + /* Disable VGA */ + rv515_vga_render_disable(rdev); + /* Initialize scratch registers */ + radeon_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); + /* BIOS */ + if (!radeon_get_bios(rdev)) { + if (ASIC_IS_AVIVO(rdev)) + return -EINVAL; + } + if (rdev->is_atom_bios) { + r = radeon_atombios_init(rdev); + if (r) + return r; + } else { + dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); + return -EINVAL; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* check if cards are posted or not */ + if (!radeon_card_posted(rdev) && rdev->bios) { + DRM_INFO("GPU not posted. posting now...\n"); + atom_asic_init(rdev->mode_info.atom_context); + } + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); + /* Get vram informations */ + rs600_vram_info(rdev); + /* Initialize memory controller (also test AGP) */ + r = r420_mc_init(rdev); + if (r) + return r; + rs600_debugfs(rdev); + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + /* Memory manager */ + r = radeon_object_init(rdev); + if (r) + return r; + r = rs600_gart_init(rdev); + if (r) + return r; + rs600_set_safe_registers(rdev); + rdev->accel_working = true; + r = rs600_startup(rdev); + if (r) { + /* Somethings want wront with the accel init stop accel */ + dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs600_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + rs600_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + rdev->accel_working = false; + } return 0; } diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h new file mode 100644 index 00000000000..81308924859 --- /dev/null +++ b/drivers/gpu/drm/radeon/rs600d.h @@ -0,0 +1,470 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef __RS600D_H__ +#define __RS600D_H__ + +/* Registers */ +#define R_000040_GEN_INT_CNTL 0x000040 +#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) +#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) +#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE +#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) +#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) +#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF +#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6) +#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1) +#define C_000040_CRTC2_VSYNC 0xFFFFFFBF +#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7) +#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1) +#define C_000040_SNAPSHOT2 0xFFFFFF7F +#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9) +#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1) +#define C_000040_CRTC2_VBLANK 0xFFFFFDFF +#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10) +#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1) +#define C_000040_FP2_DETECT 0xFFFFFBFF +#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11) +#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1) +#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF +#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) +#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) +#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF +#define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14) +#define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1) +#define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF +#define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15) +#define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1) +#define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF +#define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17) +#define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1) +#define C_000040_I2C_INT_EN 0xFFFDFFFF +#define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19) +#define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1) +#define C_000040_GUI_IDLE 0xFFF7FFFF +#define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24) +#define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1) +#define C_000040_VIPH_INT_EN 0xFEFFFFFF +#define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25) +#define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1) +#define C_000040_SW_INT_EN 0xFDFFFFFF +#define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27) +#define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1) +#define C_000040_GEYSERVILLE 0xF7FFFFFF +#define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28) +#define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1) +#define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF +#define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29) +#define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1) +#define C_000040_DVI_I2C_INT 0xDFFFFFFF +#define S_000040_GUIDMA(x) (((x) & 0x1) << 30) +#define G_000040_GUIDMA(x) (((x) >> 30) & 0x1) +#define C_000040_GUIDMA 0xBFFFFFFF +#define S_000040_VIDDMA(x) (((x) & 0x1) << 31) +#define G_000040_VIDDMA(x) (((x) >> 31) & 0x1) +#define C_000040_VIDDMA 0x7FFFFFFF +#define R_000044_GEN_INT_STATUS 0x000044 +#define S_000044_DISPLAY_INT_STAT(x) (((x) & 0x1) << 0) +#define G_000044_DISPLAY_INT_STAT(x) (((x) >> 0) & 0x1) +#define C_000044_DISPLAY_INT_STAT 0xFFFFFFFE +#define S_000044_VGA_INT_STAT(x) (((x) & 0x1) << 1) +#define G_000044_VGA_INT_STAT(x) (((x) >> 1) & 0x1) +#define C_000044_VGA_INT_STAT 0xFFFFFFFD +#define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8) +#define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1) +#define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF +#define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12) +#define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1) +#define C_000044_DMA_VIPH0_INT 0xFFFFEFFF +#define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13) +#define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1) +#define C_000044_DMA_VIPH1_INT 0xFFFFDFFF +#define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14) +#define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1) +#define C_000044_DMA_VIPH2_INT 0xFFFFBFFF +#define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15) +#define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1) +#define C_000044_DMA_VIPH3_INT 0xFFFF7FFF +#define S_000044_MC_PROBE_FAULT_STAT(x) (((x) & 0x1) << 16) +#define G_000044_MC_PROBE_FAULT_STAT(x) (((x) >> 16) & 0x1) +#define C_000044_MC_PROBE_FAULT_STAT 0xFFFEFFFF +#define S_000044_I2C_INT(x) (((x) & 0x1) << 17) +#define G_000044_I2C_INT(x) (((x) >> 17) & 0x1) +#define C_000044_I2C_INT 0xFFFDFFFF +#define S_000044_SCRATCH_INT_STAT(x) (((x) & 0x1) << 18) +#define G_000044_SCRATCH_INT_STAT(x) (((x) >> 18) & 0x1) +#define C_000044_SCRATCH_INT_STAT 0xFFFBFFFF +#define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19) +#define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1) +#define C_000044_GUI_IDLE_STAT 0xFFF7FFFF +#define S_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) & 0x1) << 20) +#define G_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) >> 20) & 0x1) +#define C_000044_ATI_OVERDRIVE_INT_STAT 0xFFEFFFFF +#define S_000044_MC_PROTECTION_FAULT_STAT(x) (((x) & 0x1) << 21) +#define G_000044_MC_PROTECTION_FAULT_STAT(x) (((x) >> 21) & 0x1) +#define C_000044_MC_PROTECTION_FAULT_STAT 0xFFDFFFFF +#define S_000044_RBBM_READ_INT_STAT(x) (((x) & 0x1) << 22) +#define G_000044_RBBM_READ_INT_STAT(x) (((x) >> 22) & 0x1) +#define C_000044_RBBM_READ_INT_STAT 0xFFBFFFFF +#define S_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) & 0x1) << 23) +#define G_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) >> 23) & 0x1) +#define C_000044_CB_CONTEXT_SWITCH_STAT 0xFF7FFFFF +#define S_000044_VIPH_INT(x) (((x) & 0x1) << 24) +#define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1) +#define C_000044_VIPH_INT 0xFEFFFFFF +#define S_000044_SW_INT(x) (((x) & 0x1) << 25) +#define G_000044_SW_INT(x) (((x) >> 25) & 0x1) +#define C_000044_SW_INT 0xFDFFFFFF +#define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26) +#define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1) +#define C_000044_SW_INT_SET 0xFBFFFFFF +#define S_000044_IDCT_INT_STAT(x) (((x) & 0x1) << 27) +#define G_000044_IDCT_INT_STAT(x) (((x) >> 27) & 0x1) +#define C_000044_IDCT_INT_STAT 0xF7FFFFFF +#define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30) +#define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1) +#define C_000044_GUIDMA_STAT 0xBFFFFFFF +#define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31) +#define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1) +#define C_000044_VIDDMA_STAT 0x7FFFFFFF +#define R_00004C_BUS_CNTL 0x00004C +#define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14) +#define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1) +#define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF +#define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20) +#define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1) +#define C_00004C_BUS_MSI_REARM 0xFFEFFFFF +#define R_000070_MC_IND_INDEX 0x000070 +#define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0) +#define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF) +#define C_000070_MC_IND_ADDR 0xFFFF0000 +#define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16) +#define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1) +#define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF +#define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17) +#define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1) +#define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF +#define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18) +#define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1) +#define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF +#define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19) +#define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1) +#define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF +#define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20) +#define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1) +#define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF +#define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21) +#define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1) +#define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF +#define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22) +#define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1) +#define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF +#define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23) +#define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1) +#define C_000070_MC_IND_WR_EN 0xFF7FFFFF +#define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24) +#define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1) +#define C_000070_MC_IND_RD_INV 0xFEFFFFFF +#define R_000074_MC_IND_DATA 0x000074 +#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) +#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_000074_MC_IND_DATA 0x00000000 +#define R_000134_HDP_FB_LOCATION 0x000134 +#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) +#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) +#define C_000134_HDP_FB_START 0xFFFF0000 +#define R_0007C0_CP_STAT 0x0007C0 +#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) +#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) +#define C_0007C0_MRU_BUSY 0xFFFFFFFE +#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) +#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) +#define C_0007C0_MWU_BUSY 0xFFFFFFFD +#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) +#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) +#define C_0007C0_RSIU_BUSY 0xFFFFFFFB +#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) +#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) +#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 +#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) +#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) +#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF +#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) +#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) +#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF +#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) +#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) +#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF +#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) +#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) +#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF +#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) +#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) +#define C_0007C0_CSI_BUSY 0xFFFFDFFF +#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) +#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) +#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF +#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) +#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) +#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF +#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) +#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) +#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF +#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) +#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) +#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF +#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) +#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) +#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF +#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) +#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) +#define C_0007C0_CP_BUSY 0x7FFFFFFF +#define R_000E40_RBBM_STATUS 0x000E40 +#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) +#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) +#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 +#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) +#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) +#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF +#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) +#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) +#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF +#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) +#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) +#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF +#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) +#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) +#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF +#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) +#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) +#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF +#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) +#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) +#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF +#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) +#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) +#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF +#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) +#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) +#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF +#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) +#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) +#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF +#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) +#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) +#define C_000E40_E2_BUSY 0xFFFDFFFF +#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) +#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) +#define C_000E40_RB2D_BUSY 0xFFFBFFFF +#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) +#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) +#define C_000E40_RB3D_BUSY 0xFFF7FFFF +#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) +#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) +#define C_000E40_VAP_BUSY 0xFFEFFFFF +#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) +#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) +#define C_000E40_RE_BUSY 0xFFDFFFFF +#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) +#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) +#define C_000E40_TAM_BUSY 0xFFBFFFFF +#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) +#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) +#define C_000E40_TDM_BUSY 0xFF7FFFFF +#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) +#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) +#define C_000E40_PB_BUSY 0xFEFFFFFF +#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) +#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) +#define C_000E40_TIM_BUSY 0xFDFFFFFF +#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) +#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) +#define C_000E40_GA_BUSY 0xFBFFFFFF +#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) +#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) +#define C_000E40_CBA2D_BUSY 0xF7FFFFFF +#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) +#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) +#define C_000E40_GUI_ACTIVE 0x7FFFFFFF +#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4 +#define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) +#define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) +#define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000 +#define R_006534_D1MODE_VBLANK_STATUS 0x006534 +#define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) +#define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) +#define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE +#define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) +#define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) +#define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF +#define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) +#define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) +#define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF +#define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) +#define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) +#define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF +#define R_006540_DxMODE_INT_MASK 0x006540 +#define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0) +#define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1) +#define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE +#define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4) +#define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1) +#define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF +#define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8) +#define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1) +#define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF +#define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12) +#define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1) +#define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF +#define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30) +#define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1) +#define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF +#define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31) +#define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1) +#define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF +#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4 +#define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) +#define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) +#define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000 +#define R_006D34_D2MODE_VBLANK_STATUS 0x006D34 +#define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) +#define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) +#define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE +#define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) +#define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) +#define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF +#define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) +#define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) +#define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF +#define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) +#define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) +#define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF +#define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC +#define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4) +#define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1) +#define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF +#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) +#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) +#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF + + +/* MC registers */ +#define R_000000_MC_STATUS 0x000000 +#define S_000000_MC_IDLE(x) (((x) & 0x1) << 0) +#define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1) +#define C_000000_MC_IDLE 0xFFFFFFFE +#define R_000004_MC_FB_LOCATION 0x000004 +#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0) +#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF) +#define C_000004_MC_FB_START 0xFFFF0000 +#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) +#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_000004_MC_FB_TOP 0x0000FFFF +#define R_000005_MC_AGP_LOCATION 0x000005 +#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0) +#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) +#define C_000005_MC_AGP_START 0xFFFF0000 +#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) +#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_000005_MC_AGP_TOP 0x0000FFFF +#define R_000006_AGP_BASE 0x000006 +#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) +#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_000006_AGP_BASE_ADDR 0x00000000 +#define R_000007_AGP_BASE_2 0x000007 +#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) +#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) +#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0 +#define R_000009_MC_CNTL1 0x000009 +#define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26) +#define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1) +#define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF +/* FIXME don't know the various field size need feedback from AMD */ +#define R_000100_MC_PT0_CNTL 0x000100 +#define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0) +#define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1) +#define C_000100_ENABLE_PT 0xFFFFFFFE +#define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15) +#define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7) +#define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF +#define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21) +#define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7) +#define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF +#define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28) +#define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1) +#define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF +#define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29) +#define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1) +#define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF +#define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102 +#define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0) +#define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1) +#define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE +#define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1) +#define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3) +#define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9 +#define V_000102_PAGE_TABLE_FLAT 0 +/* R600 documentation suggest that this should be a number of pages */ +#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112 +#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114 +#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C +#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C +#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C +#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C +#define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C +#define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0) +#define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1) +#define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE +#define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1) +#define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1) +#define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD +#define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8) +#define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3) +#define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF +#define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0 +#define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1 +#define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2 +#define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3 +#define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10) +#define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1) +#define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF +#define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0 +#define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1 +#define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11) +#define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7) +#define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF +#define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14) +#define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1) +#define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF +#define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15) +#define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7) +#define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF +#define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20) +#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) +#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF + +#endif diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 7a0098ddf97..025e3225346 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -26,105 +26,29 @@ * Jerome Glisse */ #include "drmP.h" -#include "radeon_reg.h" #include "radeon.h" -#include "rs690r.h" #include "atom.h" -#include "atom-bits.h" - -/* rs690,rs740 depends on : */ -void r100_hdp_reset(struct radeon_device *rdev); -int r300_mc_wait_for_idle(struct radeon_device *rdev); -void r420_pipes_init(struct radeon_device *rdev); -void rs400_gart_disable(struct radeon_device *rdev); -int rs400_gart_enable(struct radeon_device *rdev); -void rs400_gart_adjust_size(struct radeon_device *rdev); -void rs600_mc_disable_clients(struct radeon_device *rdev); - -/* This files gather functions specifics to : - * rs690,rs740 - * - * Some of these functions might be used by newer ASICs. - */ -void rs690_gpu_init(struct radeon_device *rdev); -int rs690_mc_wait_for_idle(struct radeon_device *rdev); - - -/* - * MC functions. - */ -int rs690_mc_init(struct radeon_device *rdev) -{ - uint32_t tmp; - int r; - - if (r100_debugfs_rbbm_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for RBBM !\n"); - } - - rs690_gpu_init(rdev); - rs400_gart_disable(rdev); - - /* Setup GPU memory space */ - rdev->mc.gtt_location = rdev->mc.mc_vram_size; - rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); - rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); - rdev->mc.vram_location = 0xFFFFFFFFUL; - r = radeon_mc_setup(rdev); - if (r) { - return r; - } - - /* Program GPU memory space */ - rs600_mc_disable_clients(rdev); - if (rs690_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); - } - tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; - tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); - tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); - WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); - /* FIXME: Does this reg exist on RS480,RS740 ? */ - WREG32(0x310, rdev->mc.vram_location); - WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); - return 0; -} - -void rs690_mc_fini(struct radeon_device *rdev) -{ -} - +#include "rs690d.h" -/* - * Global GPU functions - */ -int rs690_mc_wait_for_idle(struct radeon_device *rdev) +static int rs690_mc_wait_for_idle(struct radeon_device *rdev) { unsigned i; uint32_t tmp; for (i = 0; i < rdev->usec_timeout; i++) { /* read MC_STATUS */ - tmp = RREG32_MC(RS690_MC_STATUS); - if (tmp & RS690_MC_STATUS_IDLE) { + tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); + if (G_000090_MC_SYSTEM_IDLE(tmp)) return 0; - } - DRM_UDELAY(1); + udelay(1); } return -1; } -void rs690_errata(struct radeon_device *rdev) -{ - rdev->pll_errata = 0; -} - -void rs690_gpu_init(struct radeon_device *rdev) +static void rs690_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs690 ? */ r100_hdp_reset(rdev); - rv515_vga_render_disable(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); if (rs690_mc_wait_for_idle(rdev)) { @@ -133,10 +57,6 @@ void rs690_gpu_init(struct radeon_device *rdev) } } - -/* - * VRAM info. - */ void rs690_pm_info(struct radeon_device *rdev) { int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); @@ -250,39 +170,39 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, /* * Line Buffer Setup * There is a single line buffer shared by both display controllers. - * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between + * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between * the display controllers. The paritioning can either be done * manually or via one of four preset allocations specified in bits 1:0: * 0 - line buffer is divided in half and shared between crtc * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 * 2 - D1 gets the whole buffer * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 - * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual + * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual * allocation mode. In manual allocation mode, D1 always starts at 0, * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. */ - tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; - tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; + tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; + tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; /* auto */ if (mode1 && mode2) { if (mode1->hdisplay > mode2->hdisplay) { if (mode1->hdisplay > 2560) - tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; + tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; else - tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else if (mode2->hdisplay > mode1->hdisplay) { if (mode2->hdisplay > 2560) - tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; + tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; else - tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else - tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else if (mode1) { - tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; + tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; } else if (mode2) { - tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; + tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; } - WREG32(DC_LB_MEMORY_SPLIT, tmp); + WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); } struct rs690_watermark { @@ -487,28 +407,28 @@ void rs690_bandwidth_update(struct radeon_device *rdev) * option. */ if (rdev->disp_priority == 2) { - tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); - tmp &= ~MC_DISP1R_INIT_LAT_MASK; - tmp &= ~MC_DISP0R_INIT_LAT_MASK; - if (mode1) - tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); + tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); + tmp &= C_000104_MC_DISP0R_INIT_LAT; + tmp &= C_000104_MC_DISP1R_INIT_LAT; if (mode0) - tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); - WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); + tmp |= S_000104_MC_DISP0R_INIT_LAT(1); + if (mode1) + tmp |= S_000104_MC_DISP1R_INIT_LAT(1); + WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); } rs690_line_buffer_adjust(rdev, mode0, mode1); if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) - WREG32(DCP_CONTROL, 0); + WREG32(R_006C9C_DCP_CONTROL, 0); if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) - WREG32(DCP_CONTROL, 2); + WREG32(R_006C9C_DCP_CONTROL, 2); rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); tmp = (wm0.lb_request_fifo_depth - 1); tmp |= (wm1.lb_request_fifo_depth - 1) << 16; - WREG32(LB_MAX_REQ_OUTSTANDING, tmp); + WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); if (mode0 && mode1) { if (rfixed_trunc(wm0.dbpp) > 64) @@ -561,10 +481,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; - WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); - WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); - WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); - WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); } else if (mode0) { if (rfixed_trunc(wm0.dbpp) > 64) a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); @@ -591,10 +511,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev) priority_mark02.full = 0; if (wm0.priority_mark_max.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark_max.full; - WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); - WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); - WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); - WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, + S_006D48_D2MODE_PRIORITY_A_OFF(1)); + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, + S_006D4C_D2MODE_PRIORITY_B_OFF(1)); } else { if (rfixed_trunc(wm1.dbpp) > 64) a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); @@ -621,30 +543,203 @@ void rs690_bandwidth_update(struct radeon_device *rdev) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; - WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); - WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); - WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); - WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, + S_006548_D1MODE_PRIORITY_A_OFF(1)); + WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, + S_00654C_D1MODE_PRIORITY_B_OFF(1)); + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); } } -/* - * Indirect registers accessor - */ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) { uint32_t r; - WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK)); - r = RREG32(RS690_MC_DATA); - WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK); + WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); + r = RREG32(R_00007C_MC_DATA); + WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); return r; } void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { - WREG32(RS690_MC_INDEX, - RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK)); - WREG32(RS690_MC_DATA, v); - WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); + WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | + S_000078_MC_IND_WR_EN(1)); + WREG32(R_00007C_MC_DATA, v); + WREG32(R_000078_MC_INDEX, 0x7F); +} + +void rs690_mc_program(struct radeon_device *rdev) +{ + struct rv515_mc_save save; + + /* Stops all mc clients */ + rv515_mc_stop(rdev, &save); + + /* Wait for mc idle */ + if (rs690_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); + /* Program MC, should be a 32bits limited address space */ + WREG32_MC(R_000100_MCCFG_FB_LOCATION, + S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | + S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); + WREG32(R_000134_HDP_FB_LOCATION, + S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); + + rv515_mc_resume(rdev, &save); +} + +static int rs690_startup(struct radeon_device *rdev) +{ + int r; + + rs690_mc_program(rdev); + /* Resume clock */ + rv515_clock_startup(rdev); + /* Initialize GPU configuration (# pipes, ...) */ + rs690_gpu_init(rdev); + /* Initialize GART (initialize after TTM so we can allocate + * memory through TTM but finalize after TTM) */ + r = rs400_gart_enable(rdev); + if (r) + return r; + /* Enable IRQ */ + rdev->irq.sw_int = true; + rs600_irq_set(rdev); + /* 1M ring buffer */ + r = r100_cp_init(rdev, 1024 * 1024); + if (r) { + dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + return r; + } + r = r100_wb_init(rdev); + if (r) + dev_err(rdev->dev, "failled initializing WB (%d).\n", r); + r = r100_ib_init(rdev); + if (r) { + dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + return r; + } + return 0; +} + +int rs690_resume(struct radeon_device *rdev) +{ + /* Make sur GART are not working */ + rs400_gart_disable(rdev); + /* Resume clock before doing reset */ + rv515_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* post */ + atom_asic_init(rdev->mode_info.atom_context); + /* Resume clock after posting */ + rv515_clock_startup(rdev); + return rs690_startup(rdev); +} + +int rs690_suspend(struct radeon_device *rdev) +{ + r100_cp_disable(rdev); + r100_wb_disable(rdev); + rs600_irq_disable(rdev); + rs400_gart_disable(rdev); + return 0; +} + +void rs690_fini(struct radeon_device *rdev) +{ + rs690_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + radeon_gem_fini(rdev); + rs400_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + radeon_fence_driver_fini(rdev); + radeon_object_fini(rdev); + radeon_atombios_fini(rdev); + kfree(rdev->bios); + rdev->bios = NULL; +} + +int rs690_init(struct radeon_device *rdev) +{ + int r; + + /* Disable VGA */ + rv515_vga_render_disable(rdev); + /* Initialize scratch registers */ + radeon_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); + /* TODO: disable VGA need to use VGA request */ + /* BIOS*/ + if (!radeon_get_bios(rdev)) { + if (ASIC_IS_AVIVO(rdev)) + return -EINVAL; + } + if (rdev->is_atom_bios) { + r = radeon_atombios_init(rdev); + if (r) + return r; + } else { + dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); + return -EINVAL; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* check if cards are posted or not */ + if (!radeon_card_posted(rdev) && rdev->bios) { + DRM_INFO("GPU not posted. posting now...\n"); + atom_asic_init(rdev->mode_info.atom_context); + } + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); + /* Get vram informations */ + rs690_vram_info(rdev); + /* Initialize memory controller (also test AGP) */ + r = r420_mc_init(rdev); + if (r) + return r; + rv515_debugfs(rdev); + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) + return r; + r = radeon_irq_kms_init(rdev); + if (r) + return r; + /* Memory manager */ + r = radeon_object_init(rdev); + if (r) + return r; + r = rs400_gart_init(rdev); + if (r) + return r; + rs600_set_safe_registers(rdev); + rdev->accel_working = true; + r = rs690_startup(rdev); + if (r) { + /* Somethings want wront with the accel init stop accel */ + dev_err(rdev->dev, "Disabling GPU acceleration\n"); + rs690_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + rs400_gart_fini(rdev); + radeon_irq_kms_fini(rdev); + rdev->accel_working = false; + } + return 0; } diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h new file mode 100644 index 00000000000..62d31e7a897 --- /dev/null +++ b/drivers/gpu/drm/radeon/rs690d.h @@ -0,0 +1,307 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef __RS690D_H__ +#define __RS690D_H__ + +/* Registers */ +#define R_000078_MC_INDEX 0x000078 +#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0) +#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF) +#define C_000078_MC_IND_ADDR 0xFFFFFE00 +#define S_000078_MC_IND_WR_EN(x) (((x) & 0x1) << 9) +#define G_000078_MC_IND_WR_EN(x) (((x) >> 9) & 0x1) +#define C_000078_MC_IND_WR_EN 0xFFFFFDFF +#define R_00007C_MC_DATA 0x00007C +#define S_00007C_MC_DATA(x) (((x) & 0xFFFFFFFF) << 0) +#define G_00007C_MC_DATA(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_00007C_MC_DATA 0x00000000 +#define R_0000F8_CONFIG_MEMSIZE 0x0000F8 +#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_0000F8_CONFIG_MEMSIZE 0x00000000 +#define R_000134_HDP_FB_LOCATION 0x000134 +#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) +#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) +#define C_000134_HDP_FB_START 0xFFFF0000 +#define R_0007C0_CP_STAT 0x0007C0 +#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) +#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) +#define C_0007C0_MRU_BUSY 0xFFFFFFFE +#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) +#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) +#define C_0007C0_MWU_BUSY 0xFFFFFFFD +#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) +#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) +#define C_0007C0_RSIU_BUSY 0xFFFFFFFB +#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) +#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) +#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 +#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) +#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) +#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF +#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) +#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) +#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF +#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) +#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) +#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF +#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) +#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) +#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF +#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) +#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) +#define C_0007C0_CSI_BUSY 0xFFFFDFFF +#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) +#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) +#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF +#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) +#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) +#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF +#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) +#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) +#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF +#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) +#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) +#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF +#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) +#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) +#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF +#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) +#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) +#define C_0007C0_CP_BUSY 0x7FFFFFFF +#define R_000E40_RBBM_STATUS 0x000E40 +#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) +#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) +#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 +#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) +#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) +#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF +#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) +#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) +#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF +#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) +#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) +#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF +#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) +#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) +#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF +#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) +#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) +#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF +#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) +#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) +#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF +#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) +#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) +#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF +#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) +#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) +#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF +#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) +#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) +#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF +#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) +#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) +#define C_000E40_E2_BUSY 0xFFFDFFFF +#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) +#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) +#define C_000E40_RB2D_BUSY 0xFFFBFFFF +#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) +#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) +#define C_000E40_RB3D_BUSY 0xFFF7FFFF +#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) +#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) +#define C_000E40_VAP_BUSY 0xFFEFFFFF +#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) +#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) +#define C_000E40_RE_BUSY 0xFFDFFFFF +#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) +#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) +#define C_000E40_TAM_BUSY 0xFFBFFFFF +#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) +#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) +#define C_000E40_TDM_BUSY 0xFF7FFFFF +#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) +#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) +#define C_000E40_PB_BUSY 0xFEFFFFFF +#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) +#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) +#define C_000E40_TIM_BUSY 0xFDFFFFFF +#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) +#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) +#define C_000E40_GA_BUSY 0xFBFFFFFF +#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) +#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) +#define C_000E40_CBA2D_BUSY 0xF7FFFFFF +#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) +#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) +#define C_000E40_GUI_ACTIVE 0x7FFFFFFF +#define R_006520_DC_LB_MEMORY_SPLIT 0x006520 +#define S_006520_DC_LB_MEMORY_SPLIT(x) (((x) & 0x3) << 0) +#define G_006520_DC_LB_MEMORY_SPLIT(x) (((x) >> 0) & 0x3) +#define C_006520_DC_LB_MEMORY_SPLIT 0xFFFFFFFC +#define S_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) & 0x1) << 2) +#define G_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) >> 2) & 0x1) +#define C_006520_DC_LB_MEMORY_SPLIT_MODE 0xFFFFFFFB +#define V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 +#define V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 +#define V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY 2 +#define V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 +#define S_006520_DC_LB_DISP1_END_ADR(x) (((x) & 0x7FF) << 4) +#define G_006520_DC_LB_DISP1_END_ADR(x) (((x) >> 4) & 0x7FF) +#define C_006520_DC_LB_DISP1_END_ADR 0xFFFF800F +#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548 +#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) +#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) +#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000 +#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) +#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) +#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF +#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) +#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) +#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF +#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C +#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) +#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) +#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000 +#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) +#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) +#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF +#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) +#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) +#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF +#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) +#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) +#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF +#define R_006C9C_DCP_CONTROL 0x006C9C +#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48 +#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) +#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) +#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000 +#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) +#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) +#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF +#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) +#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) +#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF +#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) +#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) +#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF +#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C +#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) +#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) +#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000 +#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) +#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) +#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF +#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) +#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) +#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF +#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) +#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) +#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF +#define R_006D58_LB_MAX_REQ_OUTSTANDING 0x006D58 +#define S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 0) +#define G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) >> 0) & 0xF) +#define C_006D58_LB_D1_MAX_REQ_OUTSTANDING 0xFFFFFFF0 +#define S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 16) +#define G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) >> 16) & 0xF) +#define C_006D58_LB_D2_MAX_REQ_OUTSTANDING 0xFFF0FFFF + + +#define R_000090_MC_SYSTEM_STATUS 0x000090 +#define S_000090_MC_SYSTEM_IDLE(x) (((x) & 0x1) << 0) +#define G_000090_MC_SYSTEM_IDLE(x) (((x) >> 0) & 0x1) +#define C_000090_MC_SYSTEM_IDLE 0xFFFFFFFE +#define S_000090_MC_SEQUENCER_IDLE(x) (((x) & 0x1) << 1) +#define G_000090_MC_SEQUENCER_IDLE(x) (((x) >> 1) & 0x1) +#define C_000090_MC_SEQUENCER_IDLE 0xFFFFFFFD +#define S_000090_MC_ARBITER_IDLE(x) (((x) & 0x1) << 2) +#define G_000090_MC_ARBITER_IDLE(x) (((x) >> 2) & 0x1) +#define C_000090_MC_ARBITER_IDLE 0xFFFFFFFB +#define S_000090_MC_SELECT_PM(x) (((x) & 0x1) << 3) +#define G_000090_MC_SELECT_PM(x) (((x) >> 3) & 0x1) +#define C_000090_MC_SELECT_PM 0xFFFFFFF7 +#define S_000090_RESERVED4(x) (((x) & 0xF) << 4) +#define G_000090_RESERVED4(x) (((x) >> 4) & 0xF) +#define C_000090_RESERVED4 0xFFFFFF0F +#define S_000090_RESERVED8(x) (((x) & 0xF) << 8) +#define G_000090_RESERVED8(x) (((x) >> 8) & 0xF) +#define C_000090_RESERVED8 0xFFFFF0FF +#define S_000090_RESERVED12(x) (((x) & 0xF) << 12) +#define G_000090_RESERVED12(x) (((x) >> 12) & 0xF) +#define C_000090_RESERVED12 0xFFFF0FFF +#define S_000090_MCA_INIT_EXECUTED(x) (((x) & 0x1) << 16) +#define G_000090_MCA_INIT_EXECUTED(x) (((x) >> 16) & 0x1) +#define C_000090_MCA_INIT_EXECUTED 0xFFFEFFFF +#define S_000090_MCA_IDLE(x) (((x) & 0x1) << 17) +#define G_000090_MCA_IDLE(x) (((x) >> 17) & 0x1) +#define C_000090_MCA_IDLE 0xFFFDFFFF +#define S_000090_MCA_SEQ_IDLE(x) (((x) & 0x1) << 18) +#define G_000090_MCA_SEQ_IDLE(x) (((x) >> 18) & 0x1) +#define C_000090_MCA_SEQ_IDLE 0xFFFBFFFF +#define S_000090_MCA_ARB_IDLE(x) (((x) & 0x1) << 19) +#define G_000090_MCA_ARB_IDLE(x) (((x) >> 19) & 0x1) +#define C_000090_MCA_ARB_IDLE 0xFFF7FFFF +#define S_000090_RESERVED20(x) (((x) & 0xFFF) << 20) +#define G_000090_RESERVED20(x) (((x) >> 20) & 0xFFF) +#define C_000090_RESERVED20 0x000FFFFF +#define R_000100_MCCFG_FB_LOCATION 0x000100 +#define S_000100_MC_FB_START(x) (((x) & 0xFFFF) << 0) +#define G_000100_MC_FB_START(x) (((x) >> 0) & 0xFFFF) +#define C_000100_MC_FB_START 0xFFFF0000 +#define S_000100_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) +#define G_000100_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_000100_MC_FB_TOP 0x0000FFFF +#define R_000104_MC_INIT_MISC_LAT_TIMER 0x000104 +#define S_000104_MC_CPR_INIT_LAT(x) (((x) & 0xF) << 0) +#define G_000104_MC_CPR_INIT_LAT(x) (((x) >> 0) & 0xF) +#define C_000104_MC_CPR_INIT_LAT 0xFFFFFFF0 +#define S_000104_MC_VF_INIT_LAT(x) (((x) & 0xF) << 4) +#define G_000104_MC_VF_INIT_LAT(x) (((x) >> 4) & 0xF) +#define C_000104_MC_VF_INIT_LAT 0xFFFFFF0F +#define S_000104_MC_DISP0R_INIT_LAT(x) (((x) & 0xF) << 8) +#define G_000104_MC_DISP0R_INIT_LAT(x) (((x) >> 8) & 0xF) +#define C_000104_MC_DISP0R_INIT_LAT 0xFFFFF0FF +#define S_000104_MC_DISP1R_INIT_LAT(x) (((x) & 0xF) << 12) +#define G_000104_MC_DISP1R_INIT_LAT(x) (((x) >> 12) & 0xF) +#define C_000104_MC_DISP1R_INIT_LAT 0xFFFF0FFF +#define S_000104_MC_FIXED_INIT_LAT(x) (((x) & 0xF) << 16) +#define G_000104_MC_FIXED_INIT_LAT(x) (((x) >> 16) & 0xF) +#define C_000104_MC_FIXED_INIT_LAT 0xFFF0FFFF +#define S_000104_MC_E2R_INIT_LAT(x) (((x) & 0xF) << 20) +#define G_000104_MC_E2R_INIT_LAT(x) (((x) >> 20) & 0xF) +#define C_000104_MC_E2R_INIT_LAT 0xFF0FFFFF +#define S_000104_SAME_PAGE_PRIO(x) (((x) & 0xF) << 24) +#define G_000104_SAME_PAGE_PRIO(x) (((x) >> 24) & 0xF) +#define C_000104_SAME_PAGE_PRIO 0xF0FFFFFF +#define S_000104_MC_GLOBW_INIT_LAT(x) (((x) & 0xF) << 28) +#define G_000104_MC_GLOBW_INIT_LAT(x) (((x) >> 28) & 0xF) +#define C_000104_MC_GLOBW_INIT_LAT 0x0FFFFFFF + +#endif diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h deleted file mode 100644 index c0d9faa2175..00000000000 --- a/drivers/gpu/drm/radeon/rs690r.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ -#ifndef RS690R_H -#define RS690R_H - -/* RS690/RS740 registers */ -#define MC_INDEX 0x0078 -# define MC_INDEX_MASK 0x1FF -# define MC_INDEX_WR_EN (1 << 9) -# define MC_INDEX_WR_ACK 0x7F -#define MC_DATA 0x007C -#define HDP_FB_LOCATION 0x0134 -#define DC_LB_MEMORY_SPLIT 0x6520 -#define DC_LB_MEMORY_SPLIT_MASK 0x00000003 -#define DC_LB_MEMORY_SPLIT_SHIFT 0 -#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 -#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 -#define DC_LB_MEMORY_SPLIT_D1_ONLY 2 -#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 -#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) -#define DC_LB_DISP1_END_ADR_SHIFT 4 -#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 -#define D1MODE_PRIORITY_A_CNT 0x6548 -#define MODE_PRIORITY_MARK_MASK 0x00007FFF -#define MODE_PRIORITY_OFF (1 << 16) -#define MODE_PRIORITY_ALWAYS_ON (1 << 20) -#define MODE_PRIORITY_FORCE_MASK (1 << 24) -#define D1MODE_PRIORITY_B_CNT 0x654C -#define LB_MAX_REQ_OUTSTANDING 0x6D58 -#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F -#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 -#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 -#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 -#define DCP_CONTROL 0x6C9C -#define D2MODE_PRIORITY_A_CNT 0x6D48 -#define D2MODE_PRIORITY_B_CNT 0x6D4C - -/* MC indirect registers */ -#define MC_STATUS_IDLE (1 << 0) -#define MC_MISC_CNTL 0x18 -#define DISABLE_GTW (1 << 1) -#define GART_INDEX_REG_EN (1 << 12) -#define BLOCK_GFX_D3_EN (1 << 14) -#define GART_FEATURE_ID 0x2B -#define HANG_EN (1 << 11) -#define TLB_ENABLE (1 << 18) -#define P2P_ENABLE (1 << 19) -#define GTW_LAC_EN (1 << 25) -#define LEVEL2_GART (0 << 30) -#define LEVEL1_GART (1 << 30) -#define PDC_EN (1 << 31) -#define GART_BASE 0x2C -#define GART_CACHE_CNTRL 0x2E -# define GART_CACHE_INVALIDATE (1 << 0) -#define MC_STATUS 0x90 -#define MCCFG_FB_LOCATION 0x100 -#define MC_FB_START_MASK 0x0000FFFF -#define MC_FB_START_SHIFT 0 -#define MC_FB_TOP_MASK 0xFFFF0000 -#define MC_FB_TOP_SHIFT 16 -#define MCCFG_AGP_LOCATION 0x101 -#define MC_AGP_START_MASK 0x0000FFFF -#define MC_AGP_START_SHIFT 0 -#define MC_AGP_TOP_MASK 0xFFFF0000 -#define MC_AGP_TOP_SHIFT 16 -#define MCCFG_AGP_BASE 0x102 -#define MCCFG_AGP_BASE_2 0x103 -#define MC_INIT_MISC_LAT_TIMER 0x104 -#define MC_DISP0R_INIT_LAT_SHIFT 8 -#define MC_DISP0R_INIT_LAT_MASK 0x00000F00 -#define MC_DISP1R_INIT_LAT_SHIFT 12 -#define MC_DISP1R_INIT_LAT_MASK 0x0000F000 - -#endif diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h new file mode 100644 index 00000000000..c5b398330c2 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv200d.h @@ -0,0 +1,36 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef __RV200D_H__ +#define __RV200D_H__ + +#define R_00015C_AGP_BASE_2 0x00015C +#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) +#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) +#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0 + +#endif diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h new file mode 100644 index 00000000000..e5a70b06fe1 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv250d.h @@ -0,0 +1,123 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef __RV250D_H__ +#define __RV250D_H__ + +#define R_00000D_SCLK_CNTL_M6 0x00000D +#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) +#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) +#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 +#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) +#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) +#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 +#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) +#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) +#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF +#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) +#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) +#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF +#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) +#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) +#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF +#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) +#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) +#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F +#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) +#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) +#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF +#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) +#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) +#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF +#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) +#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) +#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF +#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) +#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) +#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF +#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) +#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) +#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF +#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) +#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) +#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF +#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) +#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) +#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF +#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) +#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) +#define C_00000D_FORCE_DISP2 0xFFFF7FFF +#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) +#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) +#define C_00000D_FORCE_CP 0xFFFEFFFF +#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) +#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) +#define C_00000D_FORCE_HDP 0xFFFDFFFF +#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) +#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) +#define C_00000D_FORCE_DISP1 0xFFFBFFFF +#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) +#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) +#define C_00000D_FORCE_TOP 0xFFF7FFFF +#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) +#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) +#define C_00000D_FORCE_E2 0xFFEFFFFF +#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) +#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) +#define C_00000D_FORCE_SE 0xFFDFFFFF +#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) +#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) +#define C_00000D_FORCE_IDCT 0xFFBFFFFF +#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) +#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) +#define C_00000D_FORCE_VIP 0xFF7FFFFF +#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) +#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) +#define C_00000D_FORCE_RE 0xFEFFFFFF +#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) +#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) +#define C_00000D_FORCE_PB 0xFDFFFFFF +#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) +#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) +#define C_00000D_FORCE_TAM 0xFBFFFFFF +#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) +#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) +#define C_00000D_FORCE_TDM 0xF7FFFFFF +#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) +#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) +#define C_00000D_FORCE_RB 0xEFFFFFFF +#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) +#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) +#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF +#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) +#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) +#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF +#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) +#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) +#define C_00000D_FORCE_OV0 0x7FFFFFFF + +#endif diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h new file mode 100644 index 00000000000..c75c5ed9e65 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv350d.h @@ -0,0 +1,52 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef __RV350D_H__ +#define __RV350D_H__ + +/* RV350, RV380 registers */ +/* #define R_00000D_SCLK_CNTL 0x00000D */ +#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) +#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) +#define C_00000D_FORCE_VAP 0xFFDFFFFF +#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) +#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) +#define C_00000D_FORCE_SR 0xFDFFFFFF +#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) +#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) +#define C_00000D_FORCE_PX 0xFBFFFFFF +#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) +#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) +#define C_00000D_FORCE_TX 0xF7FFFFFF +#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) +#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) +#define C_00000D_FORCE_US 0xEFFFFFFF +#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) +#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) +#define C_00000D_FORCE_SU 0xBFFFFFFF + +#endif diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index e53b5ca7a25..41a34c23e6d 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -478,7 +478,7 @@ static int rv515_startup(struct radeon_device *rdev) } /* Enable IRQ */ rdev->irq.sw_int = true; - r100_irq_set(rdev); + rs600_irq_set(rdev); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -520,7 +520,7 @@ int rv515_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); r100_wb_disable(rdev); - r100_irq_disable(rdev); + rs600_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); return 0; @@ -553,7 +553,6 @@ int rv515_init(struct radeon_device *rdev) { int r; - rdev->new_init_path = true; /* Initialize scratch registers */ radeon_scratch_init(rdev); /* Initialize surface registers */ diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index e0b97d16139..595ac638039 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -75,7 +75,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); @@ -126,17 +126,36 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev) } -/* - * MC - */ -static void rv770_mc_resume(struct radeon_device *rdev) +void rv770_agp_enable(struct radeon_device *rdev) { - u32 d1vga_control, d2vga_control; - u32 vga_render_control, vga_hdp_control; - u32 d1crtc_control, d2crtc_control; - u32 new_d1grph_primary, new_d1grph_secondary; - u32 new_d2grph_primary, new_d2grph_secondary; - u64 old_vram_start; + u32 tmp; + int i; + + /* Setup L2 cache */ + WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | + ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | + EFFECTIVE_L2_QUEUE_SIZE(7)); + WREG32(VM_L2_CNTL2, 0); + WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); + /* Setup TLB control */ + tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | + SYSTEM_ACCESS_MODE_NOT_IN_SYS | + SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | + EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); + WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); + for (i = 0; i < 7; i++) + WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); +} + +static void rv770_mc_program(struct radeon_device *rdev) +{ + struct rv515_mc_save save; u32 tmp; int i, j; @@ -150,53 +169,42 @@ static void rv770_mc_resume(struct radeon_device *rdev) } WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); - d1vga_control = RREG32(D1VGA_CONTROL); - d2vga_control = RREG32(D2VGA_CONTROL); - vga_render_control = RREG32(VGA_RENDER_CONTROL); - vga_hdp_control = RREG32(VGA_HDP_CONTROL); - d1crtc_control = RREG32(D1CRTC_CONTROL); - d2crtc_control = RREG32(D2CRTC_CONTROL); - old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; - new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); - new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); - new_d1grph_primary += rdev->mc.vram_start - old_vram_start; - new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; - new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); - new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); - new_d2grph_primary += rdev->mc.vram_start - old_vram_start; - new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; - - /* Stop all video */ - WREG32(D1VGA_CONTROL, 0); - WREG32(D2VGA_CONTROL, 0); - WREG32(VGA_RENDER_CONTROL, 0); - WREG32(D1CRTC_UPDATE_LOCK, 1); - WREG32(D2CRTC_UPDATE_LOCK, 1); - WREG32(D1CRTC_CONTROL, 0); - WREG32(D2CRTC_CONTROL, 0); - WREG32(D1CRTC_UPDATE_LOCK, 0); - WREG32(D2CRTC_UPDATE_LOCK, 0); - - mdelay(1); + rv515_mc_stop(rdev, &save); if (r600_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "[drm] MC not idle !\n"); + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } - /* Lockout access through VGA aperture*/ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); - /* Update configuration */ - WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); - WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); + if (rdev->flags & RADEON_IS_AGP) { + if (rdev->mc.vram_start < rdev->mc.gtt_start) { + /* VRAM before AGP */ + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, + rdev->mc.vram_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + rdev->mc.gtt_end >> 12); + } else { + /* VRAM after AGP */ + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, + rdev->mc.gtt_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + rdev->mc.vram_end >> 12); + } + } else { + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, + rdev->mc.vram_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + rdev->mc.vram_end >> 12); + } WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); - tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; + tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); if (rdev->flags & RADEON_IS_AGP) { - WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); + WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); } else { @@ -204,31 +212,10 @@ static void rv770_mc_resume(struct radeon_device *rdev) WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); } - WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); - WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); - WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); - WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); - WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); - - /* Unlock host access */ - WREG32(VGA_HDP_CONTROL, vga_hdp_control); - - mdelay(1); if (r600_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "[drm] MC not idle !\n"); + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } - - /* Restore video state */ - WREG32(D1CRTC_UPDATE_LOCK, 1); - WREG32(D2CRTC_UPDATE_LOCK, 1); - WREG32(D1CRTC_CONTROL, d1crtc_control); - WREG32(D2CRTC_CONTROL, d2crtc_control); - WREG32(D1CRTC_UPDATE_LOCK, 0); - WREG32(D2CRTC_UPDATE_LOCK, 0); - WREG32(D1VGA_CONTROL, d1vga_control); - WREG32(D2VGA_CONTROL, d2vga_control); - WREG32(VGA_RENDER_CONTROL, vga_render_control); - + rv515_mc_resume(rdev, &save); /* we need to own VRAM, so turn off the VGA renderer here * to stop it overwriting our objects */ rv515_vga_render_disable(rdev); @@ -840,9 +827,9 @@ int rv770_mc_init(struct radeon_device *rdev) rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; } rdev->mc.vram_start = rdev->mc.vram_location; - rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; + rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; rdev->mc.gtt_start = rdev->mc.gtt_location; - rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; + rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; /* FIXME: we should enforce default clock in case GPU is not in * default setup */ @@ -861,11 +848,14 @@ static int rv770_startup(struct radeon_device *rdev) { int r; - radeon_gpu_reset(rdev); - rv770_mc_resume(rdev); - r = rv770_pcie_gart_enable(rdev); - if (r) - return r; + rv770_mc_program(rdev); + if (rdev->flags & RADEON_IS_AGP) { + rv770_agp_enable(rdev); + } else { + r = rv770_pcie_gart_enable(rdev); + if (r) + return r; + } rv770_gpu_init(rdev); r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, @@ -884,9 +874,8 @@ static int rv770_startup(struct radeon_device *rdev) r = r600_cp_resume(rdev); if (r) return r; - r = r600_wb_init(rdev); - if (r) - return r; + /* write back buffer are not vital so don't worry about failure */ + r600_wb_enable(rdev); return 0; } @@ -894,15 +883,12 @@ int rv770_resume(struct radeon_device *rdev) { int r; - if (radeon_gpu_reset(rdev)) { - /* FIXME: what do we want to do here ? */ - } + /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, + * posting will perform necessary task to bring back GPU into good + * shape. + */ /* post card */ - if (rdev->is_atom_bios) { - atom_asic_init(rdev->mode_info.atom_context); - } else { - radeon_combios_asic_init(rdev->ddev); - } + atom_asic_init(rdev->mode_info.atom_context); /* Initialize clocks */ r = radeon_clocks_init(rdev); if (r) { @@ -915,7 +901,7 @@ int rv770_resume(struct radeon_device *rdev) return r; } - r = radeon_ib_test(rdev); + r = r600_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); return r; @@ -929,8 +915,8 @@ int rv770_suspend(struct radeon_device *rdev) /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); rdev->cp.ready = false; + r600_wb_disable(rdev); rv770_pcie_gart_disable(rdev); - /* unpin shaders bo */ radeon_object_unpin(rdev->r600_blit.shader_obj); return 0; @@ -946,7 +932,6 @@ int rv770_init(struct radeon_device *rdev) { int r; - rdev->new_init_path = true; r = radeon_dummy_page_init(rdev); if (r) return r; @@ -960,8 +945,10 @@ int rv770_init(struct radeon_device *rdev) return -EINVAL; } /* Must be an ATOMBIOS */ - if (!rdev->is_atom_bios) + if (!rdev->is_atom_bios) { + dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); return -EINVAL; + } r = radeon_atombios_init(rdev); if (r) return r; @@ -983,15 +970,8 @@ int rv770_init(struct radeon_device *rdev) if (r) return r; r = rv770_mc_init(rdev); - if (r) { - if (rdev->flags & RADEON_IS_AGP) { - /* Retry with disabling AGP */ - rv770_fini(rdev); - rdev->flags &= ~RADEON_IS_AGP; - return rv770_init(rdev); - } + if (r) return r; - } /* Memory manager */ r = radeon_object_init(rdev); if (r) @@ -1020,12 +1000,10 @@ int rv770_init(struct radeon_device *rdev) r = rv770_startup(rdev); if (r) { - if (rdev->flags & RADEON_IS_AGP) { - /* Retry with disabling AGP */ - rv770_fini(rdev); - rdev->flags &= ~RADEON_IS_AGP; - return rv770_init(rdev); - } + rv770_suspend(rdev); + r600_wb_fini(rdev); + radeon_ring_fini(rdev); + rv770_pcie_gart_fini(rdev); rdev->accel_working = false; } if (rdev->accel_working) { @@ -1034,7 +1012,7 @@ int rv770_init(struct radeon_device *rdev) DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); rdev->accel_working = false; } - r = radeon_ib_test(rdev); + r = r600_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); rdev->accel_working = false; @@ -1049,20 +1027,15 @@ void rv770_fini(struct radeon_device *rdev) r600_blit_fini(rdev); radeon_ring_fini(rdev); + r600_wb_fini(rdev); rv770_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); -#if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) radeon_agp_fini(rdev); -#endif radeon_object_fini(rdev); - if (rdev->is_atom_bios) { - radeon_atombios_fini(rdev); - } else { - radeon_combios_fini(rdev); - } + radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; radeon_dummy_page_fini(rdev); diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c index 541744d00d3..b17007178a3 100644 --- a/drivers/gpu/drm/ttm/ttm_global.c +++ b/drivers/gpu/drm/ttm/ttm_global.c @@ -82,8 +82,8 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) if (unlikely(ret != 0)) goto out_err; - ++item->refcount; } + ++item->refcount; ref->object = item->object; object = item->object; mutex_unlock(&item->mutex); diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 0c6639ea03d..ba05275e510 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -30,6 +30,7 @@ #include <linux/major.h> #include <linux/hid.h> #include <linux/mutex.h> +#include <linux/sched.h> #include <linux/smp_lock.h> #include <linux/hidraw.h> diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index ea955edde87..2a7a85a6dc3 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -915,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp, return ret; } -static struct file_operations watchdog_fops = { +static const struct file_operations watchdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = watchdog_open, diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c index ecd739534f6..82b16808a27 100644 --- a/drivers/hwmon/lis3lv02d_spi.c +++ b/drivers/hwmon/lis3lv02d_spi.c @@ -83,7 +83,8 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi) struct lis3lv02d *lis3 = spi_get_drvdata(spi); lis3lv02d_joystick_disable(); lis3lv02d_poweroff(lis3); - return 0; + + return lis3lv02d_remove_fs(&lis3_dev); } #ifdef CONFIG_PM diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c index 6c9a04136e0..00d975eb5b8 100644 --- a/drivers/hwmon/ltc4215.c +++ b/drivers/hwmon/ltc4215.c @@ -20,11 +20,6 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> -static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; - -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(ltc4215); - /* Here are names of the chip's registers (a.k.a. commands) */ enum ltc4215_cmd { LTC4215_CONTROL = 0x00, /* rw */ @@ -246,9 +241,13 @@ static const struct attribute_group ltc4215_group = { static int ltc4215_probe(struct i2c_client *client, const struct i2c_device_id *id) { + struct i2c_adapter *adapter = client->adapter; struct ltc4215_data *data; int ret; + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; @@ -294,56 +293,20 @@ static int ltc4215_remove(struct i2c_client *client) return 0; } -static int ltc4215_detect(struct i2c_client *client, - int kind, - struct i2c_board_info *info) -{ - struct i2c_adapter *adapter = client->adapter; - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -ENODEV; - - if (kind < 0) { /* probed detection - check the chip type */ - s32 v; /* 8 bits from the chip, or -ERRNO */ - - /* - * Register 0x01 bit b7 is reserved, expect 0 - * Register 0x03 bit b6 and b7 are reserved, expect 0 - */ - v = i2c_smbus_read_byte_data(client, LTC4215_ALERT); - if (v < 0 || (v & (1 << 7)) != 0) - return -ENODEV; - - v = i2c_smbus_read_byte_data(client, LTC4215_FAULT); - if (v < 0 || (v & ((1 << 6) | (1 << 7))) != 0) - return -ENODEV; - } - - strlcpy(info->type, "ltc4215", I2C_NAME_SIZE); - dev_info(&adapter->dev, "ltc4215 %s at address 0x%02x\n", - kind < 0 ? "probed" : "forced", - client->addr); - - return 0; -} - static const struct i2c_device_id ltc4215_id[] = { - { "ltc4215", ltc4215 }, + { "ltc4215", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ltc4215_id); /* This is the driver that will be inserted */ static struct i2c_driver ltc4215_driver = { - .class = I2C_CLASS_HWMON, .driver = { .name = "ltc4215", }, .probe = ltc4215_probe, .remove = ltc4215_remove, .id_table = ltc4215_id, - .detect = ltc4215_detect, - .address_data = &addr_data, }; static int __init ltc4215_init(void) diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c index e3896433361..65c232a9d0c 100644 --- a/drivers/hwmon/ltc4245.c +++ b/drivers/hwmon/ltc4245.c @@ -22,15 +22,6 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> -/* Valid addresses are 0x20 - 0x3f - * - * For now, we do not probe, since some of these addresses - * are known to be unfriendly to probing */ -static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; - -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(ltc4245); - /* Here are names of the chip's registers (a.k.a. commands) */ enum ltc4245_cmd { LTC4245_STATUS = 0x00, /* readonly */ @@ -369,9 +360,13 @@ static const struct attribute_group ltc4245_group = { static int ltc4245_probe(struct i2c_client *client, const struct i2c_device_id *id) { + struct i2c_adapter *adapter = client->adapter; struct ltc4245_data *data; int ret; + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; @@ -418,136 +413,20 @@ static int ltc4245_remove(struct i2c_client *client) return 0; } -/* Check that some bits in a control register appear at all possible - * locations without changing value - * - * @client: the i2c client to use - * @reg: the register to read - * @bits: the bits to check (0xff checks all bits, - * 0x03 checks only the last two bits) - * - * return -ERRNO if the register read failed - * return -ENODEV if the register value doesn't stay constant at all - * possible addresses - * - * return 0 for success - */ -static int ltc4245_check_control_reg(struct i2c_client *client, u8 reg, u8 bits) -{ - int i; - s32 v, voff1, voff2; - - /* Read register and check for error */ - v = i2c_smbus_read_byte_data(client, reg); - if (v < 0) - return v; - - v &= bits; - - for (i = 0x00; i < 0xff; i += 0x20) { - - voff1 = i2c_smbus_read_byte_data(client, reg + i); - if (voff1 < 0) - return voff1; - - voff2 = i2c_smbus_read_byte_data(client, reg + i + 0x08); - if (voff2 < 0) - return voff2; - - voff1 &= bits; - voff2 &= bits; - - if (v != voff1 || v != voff2) - return -ENODEV; - } - - return 0; -} - -static int ltc4245_detect(struct i2c_client *client, - int kind, - struct i2c_board_info *info) -{ - struct i2c_adapter *adapter = client->adapter; - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -ENODEV; - - if (kind < 0) { /* probed detection - check the chip type */ - s32 v; /* 8 bits from the chip, or -ERRNO */ - - /* Chip registers 0x00-0x07 are control registers - * Chip registers 0x10-0x1f are data registers - * - * Address bits b7-b5 are ignored. This makes the chip "repeat" - * in steps of 0x20. Any control registers should appear with - * the same values across all duplicated addresses. - * - * Register 0x02 bit b2 is reserved, expect 0 - * Register 0x07 bits b7 to b4 are reserved, expect 0 - * - * Registers 0x01, 0x02 are control registers and should not - * change on their own. - * - * Register 0x06 bits b6 and b7 are control bits, and should - * not change on their own. - * - * Register 0x07 bits b3 to b0 are control bits, and should - * not change on their own. - */ - - /* read register 0x02 reserved bit, expect 0 */ - v = i2c_smbus_read_byte_data(client, LTC4245_CONTROL); - if (v < 0 || (v & 0x04) != 0) - return -ENODEV; - - /* read register 0x07 reserved bits, expect 0 */ - v = i2c_smbus_read_byte_data(client, LTC4245_ADCADR); - if (v < 0 || (v & 0xf0) != 0) - return -ENODEV; - - /* check that the alert register appears at all locations */ - if (ltc4245_check_control_reg(client, LTC4245_ALERT, 0xff)) - return -ENODEV; - - /* check that the control register appears at all locations */ - if (ltc4245_check_control_reg(client, LTC4245_CONTROL, 0xff)) - return -ENODEV; - - /* check that register 0x06 bits b6 and b7 stay constant */ - if (ltc4245_check_control_reg(client, LTC4245_GPIO, 0xc0)) - return -ENODEV; - - /* check that register 0x07 bits b3-b0 stay constant */ - if (ltc4245_check_control_reg(client, LTC4245_ADCADR, 0x0f)) - return -ENODEV; - } - - strlcpy(info->type, "ltc4245", I2C_NAME_SIZE); - dev_info(&adapter->dev, "ltc4245 %s at address 0x%02x\n", - kind < 0 ? "probed" : "forced", - client->addr); - - return 0; -} - static const struct i2c_device_id ltc4245_id[] = { - { "ltc4245", ltc4245 }, + { "ltc4245", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ltc4245_id); /* This is the driver that will be inserted */ static struct i2c_driver ltc4245_driver = { - .class = I2C_CLASS_HWMON, .driver = { .name = "ltc4245", }, .probe = ltc4245_probe, .remove = ltc4245_remove, .id_table = ltc4245_id, - .detect = ltc4245_detect, - .address_data = &addr_data, }; static int __init ltc4245_init(void) diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c index f7d6fe9c49b..8f0b90ef8c7 100644 --- a/drivers/i2c/busses/i2c-amd756.c +++ b/drivers/i2c/busses/i2c-amd756.c @@ -364,7 +364,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev, error = acpi_check_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name); if (error) - return error; + return -ENODEV; if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) { dev_err(&pdev->dev, "SMB region 0x%x already in use!\n", diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c index a7c59908c45..5b4ad86ca16 100644 --- a/drivers/i2c/busses/i2c-amd8111.c +++ b/drivers/i2c/busses/i2c-amd8111.c @@ -376,8 +376,10 @@ static int __devinit amd8111_probe(struct pci_dev *dev, smbus->size = pci_resource_len(dev, 0); error = acpi_check_resource_conflict(&dev->resource[0]); - if (error) + if (error) { + error = -ENODEV; goto out_kfree; + } if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) { error = -EBUSY; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 9d2c5adf5d4..55edcfe5b85 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -732,8 +732,10 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id } err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); - if (err) + if (err) { + err = -ENODEV; goto exit; + } err = pci_request_region(dev, SMBBAR, i801_driver.name); if (err) { diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index 9f6b8e0f863..dba6eb053e2 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c @@ -281,7 +281,7 @@ static int __devinit sch_probe(struct pci_dev *dev, return -ENODEV; } if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name)) - return -EBUSY; + return -ENODEV; if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) { dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", sch_smba); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index a782c7a08f9..d26a972aaca 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -169,7 +169,7 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev, } if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) - return -EBUSY; + return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", @@ -260,7 +260,7 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev, piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) - return -EBUSY; + return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c index 8295885b2fd..1649963b00d 100644 --- a/drivers/i2c/busses/i2c-sis96x.c +++ b/drivers/i2c/busses/i2c-sis96x.c @@ -280,7 +280,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev, retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); if (retval) - return retval; + return -ENODEV; /* Everything is happy, let's grab the memory and set things up. */ if (!request_region(sis96x_smbus_base, SMB_IOSIZE, diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index 54d810a4d00..e4b1543015a 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c @@ -365,7 +365,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev, found: error = acpi_check_region(vt596_smba, 8, vt596_driver.name); if (error) - return error; + return -ENODEV; if (!request_region(vt596_smba, 8, vt596_driver.name)) { dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n", diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 28d09a5d845..017c09540c2 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c @@ -273,14 +273,8 @@ static const struct ide_proc_devset ide_generic_settings[] = { static void proc_ide_settings_warn(void) { - static int warned; - - if (warned) - return; - - printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " + printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " "obsolete, and will be removed soon!\n"); - warned = 1; } static int ide_settings_proc_show(struct seq_file *m, void *v) diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c index afca22beaad..3b88eba04c9 100644 --- a/drivers/ide/sis5513.c +++ b/drivers/ide/sis5513.c @@ -2,7 +2,7 @@ * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> - * Copyright (C) 2007 Bartlomiej Zolnierkiewicz + * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License * @@ -281,11 +281,13 @@ static void config_drive_art_rwp(ide_drive_t *drive) pci_read_config_byte(dev, 0x4b, ®4bh); + rw_prefetch = reg4bh & ~(0x11 << drive->dn); + if (drive->media == ide_disk) - rw_prefetch = 0x11 << drive->dn; + rw_prefetch |= 0x11 << drive->dn; - if ((reg4bh & (0x11 << drive->dn)) != rw_prefetch) - pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch); + if (reg4bh != rw_prefetch) + pci_write_config_byte(dev, 0x4b, rw_prefetch); } static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 51bd9669cb1..f504c9b00c1 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -38,6 +38,7 @@ #include <linux/device.h> #include <linux/err.h> #include <linux/poll.h> +#include <linux/sched.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/cdev.h> diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 8c46f225709..7de02969ed7 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -44,6 +44,7 @@ #include <linux/mutex.h> #include <linux/kref.h> #include <linux/compat.h> +#include <linux/sched.h> #include <linux/semaphore.h> #include <asm/uaccess.h> diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index d3fff9e008a..aec0fbdfe7f 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -40,6 +40,7 @@ #include <linux/err.h> #include <linux/fs.h> #include <linux/poll.h> +#include <linux/sched.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/cdev.h> diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 1148140d08a..dee6706038a 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -13,6 +13,7 @@ #define EVDEV_BUFFER_SIZE 64 #include <linux/poll.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> diff --git a/drivers/input/input.c b/drivers/input/input.c index e828aab7dac..c6f88ebb40c 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -17,6 +17,7 @@ #include <linux/random.h> #include <linux/major.h> #include <linux/proc_fs.h> +#include <linux/sched.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/device.h> @@ -1273,6 +1274,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) } \ } while (0) +#ifdef CONFIG_PM static void input_dev_reset(struct input_dev *dev, bool activate) { if (!dev->event) @@ -1287,7 +1289,6 @@ static void input_dev_reset(struct input_dev *dev, bool activate) } } -#ifdef CONFIG_PM static int input_dev_suspend(struct device *dev) { struct input_dev *input_dev = to_input_dev(dev); diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 901b2525993..b1bd6dd3228 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -18,6 +18,7 @@ #include <linux/input.h> #include <linux/kernel.h> #include <linux/major.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/miscdevice.h> diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index c5a49aba418..d3f57245420 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -30,6 +30,7 @@ * - first public version */ #include <linux/poll.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 966b8868f79..a13d80f7da1 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -13,6 +13,7 @@ #define MOUSEDEV_MINORS 32 #define MOUSEDEV_MIX 31 +#include <linux/sched.h> #include <linux/slab.h> #include <linux/smp_lock.h> #include <linux/poll.h> diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 2d8352419c0..65bf91e16a4 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -603,7 +603,7 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb) if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) { u16 info = CAPIMSG_U16(skb->data, 12); // Info field - if (info == 0) { + if ((info & 0xff00) == 0) { mutex_lock(&cdev->ncci_list_mtx); capincci_alloc(cdev, CAPIMSG_NCCI(skb->data)); mutex_unlock(&cdev->ncci_list_mtx); diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index 650120261ab..3e6d17f42a9 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c @@ -40,7 +40,7 @@ static int debugmode = 0; MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); -module_param(debugmode, uint, 0); +module_param(debugmode, uint, S_IRUGO|S_IWUSR); /* -------- type definitions ----------------------------------------- */ @@ -671,8 +671,8 @@ static void n0(capidrv_contr * card, capidrv_ncci * ncci) NULL, /* Useruserdata */ /* $$$$ */ NULL /* Facilitydataarray */ ); - send_message(card, &cmsg); plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ); + send_message(card, &cmsg); cmd.command = ISDN_STAT_BHUP; cmd.driver = card->myid; @@ -924,8 +924,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) */ capi_cmsg_answer(cmsg); cmsg->Reject = 1; /* ignore */ - send_message(card, cmsg); plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); + send_message(card, cmsg); printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n", card->contrnr, cmd.parm.setup.phone, @@ -974,8 +974,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) case 2: /* Call will be rejected. */ capi_cmsg_answer(cmsg); cmsg->Reject = 2; /* reject call, normal call clearing */ - send_message(card, cmsg); plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); + send_message(card, cmsg); break; default: @@ -983,8 +983,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) capi_cmsg_answer(cmsg); cmsg->Reject = 8; /* reject call, destination out of order */ - send_message(card, cmsg); plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); + send_message(card, cmsg); break; } return; @@ -1020,8 +1020,8 @@ static void handle_plci(_cmsg * cmsg) card->bchans[plcip->chan].disconnecting = 1; plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND); capi_cmsg_answer(cmsg); - send_message(card, cmsg); plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP); + send_message(card, cmsg); break; case CAPI_DISCONNECT_CONF: /* plci */ @@ -1078,8 +1078,8 @@ static void handle_plci(_cmsg * cmsg) if (card->bchans[plcip->chan].incoming) { capi_cmsg_answer(cmsg); - send_message(card, cmsg); plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); + send_message(card, cmsg); } else { capidrv_ncci *nccip; capi_cmsg_answer(cmsg); @@ -1098,13 +1098,14 @@ static void handle_plci(_cmsg * cmsg) NULL /* NCPI */ ); nccip->msgid = cmsg->Messagenumber; + plci_change_state(card, plcip, + EV_PLCI_CONNECT_ACTIVE_IND); + ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ); send_message(card, cmsg); cmd.command = ISDN_STAT_DCONN; cmd.driver = card->myid; cmd.arg = plcip->chan; card->interface.statcallb(&cmd); - plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); - ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ); } break; @@ -1193,8 +1194,8 @@ static void handle_ncci(_cmsg * cmsg) goto notfound; capi_cmsg_answer(cmsg); - send_message(card, cmsg); ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND); + send_message(card, cmsg); cmd.command = ISDN_STAT_BCONN; cmd.driver = card->myid; @@ -1222,8 +1223,8 @@ static void handle_ncci(_cmsg * cmsg) 0, /* Reject */ NULL /* NCPI */ ); - send_message(card, cmsg); ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP); + send_message(card, cmsg); break; } printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr); @@ -1299,8 +1300,8 @@ static void handle_ncci(_cmsg * cmsg) card->bchans[nccip->chan].disconnecting = 1; ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND); capi_cmsg_answer(cmsg); - send_message(card, cmsg); ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP); + send_message(card, cmsg); break; case CAPI_DISCONNECT_B3_CONF: /* ncci */ @@ -2014,8 +2015,8 @@ static void send_listen(capidrv_contr *card) card->cipmask, card->cipmask2, NULL, NULL); - send_message(card, &cmdcmsg); listen_change_state(card, EV_LISTEN_REQ); + send_message(card, &cmdcmsg); } static void listentimerfunc(unsigned long x) diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index 8b256a617c8..3697c409bec 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c @@ -16,6 +16,7 @@ #else #include <linux/fs.h> #endif +#include <linux/sched.h> #include <linux/isdnif.h> #include <net/net_namespace.h> #include "isdn_divert.h" diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c index 234cc5d5331..44a58e6f8f6 100644 --- a/drivers/isdn/gigaset/asyncdata.c +++ b/drivers/isdn/gigaset/asyncdata.c @@ -334,7 +334,14 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes, return startbytes - numbytes; } -/* process a block of data received from the device +/** + * gigaset_m10x_input() - process a block of data received from the device + * @inbuf: received data and device descriptor structure. + * + * Called by hardware module {ser,usb}_gigaset with a block of received + * bytes. Separates the bytes received over the serial data channel into + * user data and command replies (locked/unlocked) according to the + * current state of the interface. */ void gigaset_m10x_input(struct inbuf_t *inbuf) { @@ -543,16 +550,17 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail) return iraw_skb; } -/* gigaset_send_skb - * called by common.c to queue an skb for sending - * and start transmission if necessary - * parameters: - * B Channel control structure - * skb +/** + * gigaset_m10x_send_skb() - queue an skb for sending + * @bcs: B channel descriptor structure. + * @skb: data to send. + * + * Called by i4l.c to encode and queue an skb for sending, and start + * transmission if necessary. + * * Return value: - * number of bytes accepted for sending - * (skb->len if ok, 0 if out of buffer space) - * or error code (< 0, eg. -EINVAL) + * number of bytes accepted for sending (skb->len) if ok, + * error code < 0 (eg. -ENOMEM) on error */ int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) { diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 781c4041f7b..5ed1d99eb9f 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -134,6 +134,7 @@ struct bas_cardstate { #define BS_ATRDPEND 0x040 /* urb_cmd_in in use */ #define BS_ATWRPEND 0x080 /* urb_cmd_out in use */ #define BS_SUSPEND 0x100 /* USB port suspended */ +#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */ static struct gigaset_driver *driver = NULL; @@ -319,6 +320,21 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) return -EINVAL; } +/* set/clear bits in base connection state, return previous state + */ +static inline int update_basstate(struct bas_cardstate *ucs, + int set, int clear) +{ + unsigned long flags; + int state; + + spin_lock_irqsave(&ucs->lock, flags); + state = ucs->basstate; + ucs->basstate = (state & ~clear) | set; + spin_unlock_irqrestore(&ucs->lock, flags); + return state; +} + /* error_hangup * hang up any existing connection because of an unrecoverable error * This function may be called from any context and takes care of scheduling @@ -350,12 +366,9 @@ static inline void error_hangup(struct bc_state *bcs) */ static inline void error_reset(struct cardstate *cs) { - /* close AT command channel to recover (ignore errors) */ - req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); - - //FIXME try to recover without bothering the user - dev_err(cs->dev, - "unrecoverable error - please disconnect Gigaset base to reset\n"); + /* reset interrupt pipe to recover (ignore errors) */ + update_basstate(cs->hw.bas, BS_RESETTING, 0); + req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT); } /* check_pending @@ -398,8 +411,13 @@ static void check_pending(struct bas_cardstate *ucs) case HD_DEVICE_INIT_ACK: /* no reply expected */ ucs->pending = 0; break; - /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE - * are handled separately and should never end up here + case HD_RESET_INTERRUPT_PIPE: + if (!(ucs->basstate & BS_RESETTING)) + ucs->pending = 0; + break; + /* + * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately + * and should never end up here */ default: dev_warn(&ucs->interface->dev, @@ -449,21 +467,6 @@ static void cmd_in_timeout(unsigned long data) error_reset(cs); } -/* set/clear bits in base connection state, return previous state - */ -inline static int update_basstate(struct bas_cardstate *ucs, - int set, int clear) -{ - unsigned long flags; - int state; - - spin_lock_irqsave(&ucs->lock, flags); - state = ucs->basstate; - ucs->basstate = (state & ~clear) | set; - spin_unlock_irqrestore(&ucs->lock, flags); - return state; -} - /* read_ctrl_callback * USB completion handler for control pipe input * called by the USB subsystem in interrupt context @@ -762,7 +765,8 @@ static void read_int_callback(struct urb *urb) break; case HD_RESET_INTERRUPT_PIPE_ACK: - gig_dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK"); + update_basstate(ucs, 0, BS_RESETTING); + dev_notice(cs->dev, "interrupt pipe reset\n"); break; case HD_SUSPEND_END: @@ -1331,28 +1335,24 @@ static void read_iso_tasklet(unsigned long data) rcvbuf = urb->transfer_buffer; totleft = urb->actual_length; for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { - if (unlikely(urb->iso_frame_desc[frame].status)) { + numbytes = urb->iso_frame_desc[frame].actual_length; + if (unlikely(urb->iso_frame_desc[frame].status)) dev_warn(cs->dev, - "isochronous read: frame %d: %s\n", - frame, + "isochronous read: frame %d[%d]: %s\n", + frame, numbytes, get_usb_statmsg( urb->iso_frame_desc[frame].status)); - break; - } - numbytes = urb->iso_frame_desc[frame].actual_length; - if (unlikely(numbytes > BAS_MAXFRAME)) { + if (unlikely(numbytes > BAS_MAXFRAME)) dev_warn(cs->dev, "isochronous read: frame %d: " "numbytes (%d) > BAS_MAXFRAME\n", frame, numbytes); - break; - } if (unlikely(numbytes > totleft)) { dev_warn(cs->dev, "isochronous read: frame %d: " "numbytes (%d) > totleft (%d)\n", frame, numbytes, totleft); - break; + numbytes = totleft; } offset = urb->iso_frame_desc[frame].offset; if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { @@ -1361,7 +1361,7 @@ static void read_iso_tasklet(unsigned long data) "offset (%d) + numbytes (%d) " "> BAS_INBUFSIZE\n", frame, offset, numbytes); - break; + numbytes = BAS_INBUFSIZE - offset; } gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); totleft -= numbytes; @@ -1433,6 +1433,7 @@ static void req_timeout(unsigned long data) case HD_CLOSE_ATCHANNEL: dev_err(bcs->cs->dev, "timeout closing AT channel\n"); + error_reset(bcs->cs); break; case HD_CLOSE_B2CHANNEL: @@ -1442,6 +1443,13 @@ static void req_timeout(unsigned long data) error_reset(bcs->cs); break; + case HD_RESET_INTERRUPT_PIPE: + /* error recovery escalation */ + dev_err(bcs->cs->dev, + "reset interrupt pipe timeout, attempting USB reset\n"); + usb_queue_reset_device(bcs->cs->hw.bas->interface); + break; + default: dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n", pending); @@ -1934,6 +1942,15 @@ static int gigaset_write_cmd(struct cardstate *cs, goto notqueued; } + /* translate "+++" escape sequence sent as a single separate command + * into "close AT channel" command for error recovery + * The next command will reopen the AT channel automatically. + */ + if (len == 3 && !memcmp(buf, "+++", 3)) { + rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); + goto notqueued; + } + if (len > IF_WRITEBUF) len = IF_WRITEBUF; if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index e4141bf8b2f..33dcd8d72b7 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c @@ -22,6 +22,12 @@ #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" #define DRIVER_DESC "Driver for Gigaset 307x" +#ifdef CONFIG_GIGASET_DEBUG +#define DRIVER_DESC_DEBUG " (debug build)" +#else +#define DRIVER_DESC_DEBUG "" +#endif + /* Module parameters */ int gigaset_debuglevel = DEBUG_DEFAULT; EXPORT_SYMBOL_GPL(gigaset_debuglevel); @@ -32,6 +38,17 @@ MODULE_PARM_DESC(debug, "debug level"); #define VALID_MINOR 0x01 #define VALID_ID 0x02 +/** + * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging + * @level: debugging level. + * @msg: message prefix. + * @len: number of bytes to dump. + * @buf: data to dump. + * + * If the current debugging level includes one of the bits set in @level, + * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio, + * prefixed by the text @msg. + */ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, size_t len, const unsigned char *buf) { @@ -274,6 +291,20 @@ static void clear_events(struct cardstate *cs) spin_unlock_irqrestore(&cs->ev_lock, flags); } +/** + * gigaset_add_event() - add event to device event queue + * @cs: device descriptor structure. + * @at_state: connection state structure. + * @type: event type. + * @ptr: pointer parameter for event. + * @parameter: integer parameter for event. + * @arg: pointer parameter for event. + * + * Allocate an event queue entry from the device's event queue, and set it up + * with the parameters given. + * + * Return value: added event + */ struct event_t *gigaset_add_event(struct cardstate *cs, struct at_state_t *at_state, int type, void *ptr, int parameter, void *arg) @@ -398,6 +429,15 @@ static void make_invalid(struct cardstate *cs, unsigned mask) spin_unlock_irqrestore(&drv->lock, flags); } +/** + * gigaset_freecs() - free all associated ressources of a device + * @cs: device descriptor structure. + * + * Stops all tasklets and timers, unregisters the device from all + * subsystems it was registered to, deallocates the device structure + * @cs and all structures referenced from it. + * Operations on the device should be stopped before calling this. + */ void gigaset_freecs(struct cardstate *cs) { int i; @@ -506,7 +546,12 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs, inbuf->inputstate = inputstate; } -/* append received bytes to inbuf */ +/** + * gigaset_fill_inbuf() - append received data to input buffer + * @inbuf: buffer structure. + * @src: received data. + * @numbytes: number of bytes received. + */ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, unsigned numbytes) { @@ -606,20 +651,22 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs, return NULL; } -/* gigaset_initcs +/** + * gigaset_initcs() - initialize device structure + * @drv: hardware driver the device belongs to + * @channels: number of B channels supported by device + * @onechannel: !=0 if B channel data and AT commands share one + * communication channel (M10x), + * ==0 if B channels have separate communication channels (base) + * @ignoreframes: number of frames to ignore after setting up B channel + * @cidmode: !=0: start in CallID mode + * @modulename: name of driver module for LL registration + * * Allocate and initialize cardstate structure for Gigaset driver * Calls hardware dependent gigaset_initcshw() function * Calls B channel initialization function gigaset_initbcs() for each B channel - * parameters: - * drv hardware driver the device belongs to - * channels number of B channels supported by device - * onechannel !=0: B channel data and AT commands share one - * communication channel - * ==0: B channels have separate communication channels - * ignoreframes number of frames to ignore after setting up B channel - * cidmode !=0: start in CallID mode - * modulename name of driver module (used for I4L registration) - * return value: + * + * Return value: * pointer to cardstate structure */ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, @@ -837,6 +884,17 @@ static void cleanup_cs(struct cardstate *cs) } +/** + * gigaset_start() - start device operations + * @cs: device descriptor structure. + * + * Prepares the device for use by setting up communication parameters, + * scheduling an EV_START event to initiate device initialization, and + * waiting for completion of the initialization. + * + * Return value: + * 1 - success, 0 - error + */ int gigaset_start(struct cardstate *cs) { unsigned long flags; @@ -879,9 +937,15 @@ error: } EXPORT_SYMBOL_GPL(gigaset_start); -/* gigaset_shutdown - * check if a device is associated to the cardstate structure and stop it - * return value: 0 if ok, -1 if no device was associated +/** + * gigaset_shutdown() - shut down device operations + * @cs: device descriptor structure. + * + * Deactivates the device by scheduling an EV_SHUTDOWN event and + * waiting for completion of the shutdown. + * + * Return value: + * 0 - success, -1 - error (no device associated) */ int gigaset_shutdown(struct cardstate *cs) { @@ -912,6 +976,13 @@ exit: } EXPORT_SYMBOL_GPL(gigaset_shutdown); +/** + * gigaset_stop() - stop device operations + * @cs: device descriptor structure. + * + * Stops operations on the device by scheduling an EV_STOP event and + * waiting for completion of the shutdown. + */ void gigaset_stop(struct cardstate *cs) { mutex_lock(&cs->mutex); @@ -1020,6 +1091,14 @@ struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty) return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); } +/** + * gigaset_freedriver() - free all associated ressources of a driver + * @drv: driver descriptor structure. + * + * Unregisters the driver from the system and deallocates the driver + * structure @drv and all structures referenced from it. + * All devices should be shut down before calling this. + */ void gigaset_freedriver(struct gigaset_driver *drv) { unsigned long flags; @@ -1035,14 +1114,16 @@ void gigaset_freedriver(struct gigaset_driver *drv) } EXPORT_SYMBOL_GPL(gigaset_freedriver); -/* gigaset_initdriver +/** + * gigaset_initdriver() - initialize driver structure + * @minor: First minor number + * @minors: Number of minors this driver can handle + * @procname: Name of the driver + * @devname: Name of the device files (prefix without minor number) + * * Allocate and initialize gigaset_driver structure. Initialize interface. - * parameters: - * minor First minor number - * minors Number of minors this driver can handle - * procname Name of the driver - * devname Name of the device files (prefix without minor number) - * return value: + * + * Return value: * Pointer to the gigaset_driver structure on success, NULL on failure. */ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, @@ -1095,6 +1176,13 @@ error: } EXPORT_SYMBOL_GPL(gigaset_initdriver); +/** + * gigaset_blockdriver() - block driver + * @drv: driver descriptor structure. + * + * Prevents the driver from attaching new devices, in preparation for + * deregistration. + */ void gigaset_blockdriver(struct gigaset_driver *drv) { drv->blocked = 1; @@ -1110,7 +1198,7 @@ static int __init gigaset_init_module(void) if (gigaset_debuglevel == 1) gigaset_debuglevel = DEBUG_DEFAULT; - pr_info(DRIVER_DESC "\n"); + pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n"); return 0; } diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index 2d91049571a..cc768caa38f 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c @@ -207,7 +207,6 @@ struct reply_t gigaset_tab_nocid[] = /* leave dle mode */ {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, {RSP_OK, 201,201, -1, 202,-1}, - //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, @@ -265,6 +264,7 @@ struct reply_t gigaset_tab_nocid[] = {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME /* misc. */ + {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME @@ -328,10 +328,9 @@ struct reply_t gigaset_tab_cid[] = {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? {RSP_OK, 401,401, -1, 402, 5}, {RSP_ZVLS, 402,402, 0, 403, 5}, - {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */ - //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver? - {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? - {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? + {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, + {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, + {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, @@ -474,8 +473,13 @@ static int cid_of_response(char *s) //FIXME is ;<digit>+ at end of non-CID response really impossible? } -/* This function will be called via task queue from the callback handler. - * We received a modem response and have to handle it.. +/** + * gigaset_handle_modem_response() - process received modem response + * @cs: device descriptor structure. + * + * Called by asyncdata/isocdata if a block of data received from the + * device must be processed as a modem command response. The data is + * already in the cs structure. */ void gigaset_handle_modem_response(struct cardstate *cs) { @@ -707,6 +711,11 @@ static void disconnect(struct at_state_t **at_state_p) if (bcs) { /* B channel assigned: invoke hardware specific handler */ cs->ops->close_bchannel(bcs); + /* notify LL */ + if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { + bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); + gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); + } } else { /* no B channel assigned: just deallocate */ spin_lock_irqsave(&cs->lock, flags); @@ -1429,11 +1438,12 @@ static void do_action(int action, struct cardstate *cs, cs->gotfwver = -1; dev_err(cs->dev, "could not read firmware version.\n"); break; -#ifdef CONFIG_GIGASET_DEBUG case ACT_ERROR: - *p_genresp = 1; - *p_resp_code = RSP_ERROR; + gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d", + __func__, at_state->ConState); + cs->cur_at_seq = SEQ_NONE; break; +#ifdef CONFIG_GIGASET_DEBUG case ACT_TEST: { static int count = 3; //2; //1; diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index 9b22f9cf2f3..654489d836c 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c @@ -51,6 +51,12 @@ static int writebuf_from_LL(int driverID, int channel, int ack, return -ENODEV; } bcs = &cs->bcs[channel]; + + /* can only handle linear sk_buffs */ + if (skb_linearize(skb) < 0) { + dev_err(cs->dev, "%s: skb_linearize failed\n", __func__); + return -ENOMEM; + } len = skb->len; gig_dbg(DEBUG_LLDATA, @@ -79,6 +85,14 @@ static int writebuf_from_LL(int driverID, int channel, int ack, return cs->ops->send_skb(bcs, skb); } +/** + * gigaset_skb_sent() - acknowledge sending an skb + * @bcs: B channel descriptor structure. + * @skb: sent data. + * + * Called by hardware module {bas,ser,usb}_gigaset when the data in a + * skb has been successfully sent, for signalling completion to the LL. + */ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) { unsigned len; @@ -455,6 +469,15 @@ int gigaset_isdn_setup_accept(struct at_state_t *at_state) return 0; } +/** + * gigaset_isdn_icall() - signal incoming call + * @at_state: connection state structure. + * + * Called by main module to notify the LL that an incoming call has been + * received. @at_state contains the parameters of the call. + * + * Return value: call disposition (ICALL_*) + */ int gigaset_isdn_icall(struct at_state_t *at_state) { struct cardstate *cs = at_state->cs; diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index f33ac27de64..6a8e1384e7b 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c @@ -616,6 +616,15 @@ void gigaset_if_free(struct cardstate *cs) tty_unregister_device(drv->tty, cs->minor_index); } +/** + * gigaset_if_receive() - pass a received block of data to the tty device + * @cs: device descriptor structure. + * @buffer: received data. + * @len: number of bytes received. + * + * Called by asyncdata/isocdata if a block of data received from the + * device must be sent to userspace through the ttyG* device. + */ void gigaset_if_receive(struct cardstate *cs, unsigned char *buffer, size_t len) { diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c index bed38fcc432..9f3ef7b4248 100644 --- a/drivers/isdn/gigaset/isocdata.c +++ b/drivers/isdn/gigaset/isocdata.c @@ -429,7 +429,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb, return -EAGAIN; } - dump_bytes(DEBUG_STREAM, "snd data", in, count); + dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count); /* bitstuff and checksum input data */ fcs = PPP_INITFCS; @@ -448,7 +448,6 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb, /* put closing flag and repeat byte for flag idle */ isowbuf_putflag(iwb); end = isowbuf_donewrite(iwb); - dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1); return end; } @@ -482,6 +481,8 @@ static inline int trans_buildframe(struct isowbuf_t *iwb, } gig_dbg(DEBUG_STREAM, "put %d bytes", count); + dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count); + write = iwb->write; do { c = bitrev8(*in++); @@ -583,7 +584,7 @@ static inline void hdlc_done(struct bc_state *bcs) procskb->tail -= 2; gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", __func__, procskb->len); - dump_bytes(DEBUG_STREAM, + dump_bytes(DEBUG_STREAM_DUMP, "rcv data", procskb->data, procskb->len); bcs->hw.bas->goodbytes += procskb->len; gigaset_rcv_skb(procskb, bcs->cs, bcs); @@ -878,6 +879,8 @@ static inline void trans_receive(unsigned char *src, unsigned count, dobytes--; } if (dobytes == 0) { + dump_bytes(DEBUG_STREAM_DUMP, + "rcv data", skb->data, skb->len); gigaset_rcv_skb(skb, bcs->cs, bcs); bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); if (!skb) { @@ -973,16 +976,17 @@ void gigaset_isoc_input(struct inbuf_t *inbuf) /* == data output ========================================================== */ -/* gigaset_send_skb - * called by common.c to queue an skb for sending - * and start transmission if necessary - * parameters: - * B Channel control structure - * skb - * return value: - * number of bytes accepted for sending - * (skb->len if ok, 0 if out of buffer space) - * or error code (< 0, eg. -EINVAL) +/** + * gigaset_isoc_send_skb() - queue an skb for sending + * @bcs: B channel descriptor structure. + * @skb: data to send. + * + * Called by i4l.c to queue an skb for sending, and start transmission if + * necessary. + * + * Return value: + * number of bytes accepted for sending (skb->len) if ok, + * error code < 0 (eg. -ENODEV) on error */ int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) { diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index c36f5213745..feb0fa45b66 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -415,7 +415,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) } static int data_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, int len) + char __user *optval, unsigned int len) { struct sock *sk = sock->sk; int err = 0, opt = 0; diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 708a8017c21..adc561eb59d 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c @@ -19,9 +19,6 @@ #include <linux/workqueue.h> #include <linux/leds-pca9532.h> -static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END}; -I2C_CLIENT_INSMOD_1(pca9532); - #define PCA9532_REG_PSC(i) (0x2+(i)*2) #define PCA9532_REG_PWM(i) (0x3+(i)*2) #define PCA9532_REG_LS0 0x6 diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index b4d3f7ca554..bd1632388e4 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -508,7 +508,7 @@ static int close(struct inode *inode, struct file *file) * uses: reading and writing a character device called /dev/lguest. All the * work happens in the read(), write() and close() routines: */ -static struct file_operations lguest_fops = { +static const struct file_operations lguest_fops = { .owner = THIS_MODULE, .release = close, .write = write, diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index fde377c60cc..556f0feaa4d 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -124,6 +124,8 @@ read_reg(struct thermostat* th, int reg) return data; } +static struct i2c_driver thermostat_driver; + static int attach_thermostat(struct i2c_adapter *adapter) { @@ -148,7 +150,7 @@ attach_thermostat(struct i2c_adapter *adapter) * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&client->detected, &client->driver->clients); + list_add_tail(&client->detected, &thermostat_driver.clients); return 0; } diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index a028598af2d..ea32c7e5a9a 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c @@ -286,6 +286,8 @@ struct fcu_fan_table fcu_fans[] = { }, }; +static struct i2c_driver therm_pm72_driver; + /* * Utility function to create an i2c_client structure and * attach it to one of u3 adapters @@ -318,7 +320,7 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name) * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&clt->detected, &clt->driver->clients); + list_add_tail(&clt->detected, &therm_pm72_driver.clients); return clt; } diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index 529886c7a82..ed6426a1077 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -115,6 +115,8 @@ static int wf_lm75_probe(struct i2c_client *client, return rc; } +static struct i2c_driver wf_lm75_driver; + static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, u8 addr, int ds1775, const char *loc) @@ -157,7 +159,7 @@ static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&client->detected, &client->driver->clients); + list_add_tail(&client->detected, &wf_lm75_driver.clients); return client; fail: return NULL; diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c index e2a55ecda2b..a67b349319e 100644 --- a/drivers/macintosh/windfarm_max6690_sensor.c +++ b/drivers/macintosh/windfarm_max6690_sensor.c @@ -88,6 +88,8 @@ static int wf_max6690_probe(struct i2c_client *client, return rc; } +static struct i2c_driver wf_max6690_driver; + static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, u8 addr, const char *loc) { @@ -119,7 +121,7 @@ static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&client->detected, &client->driver->clients); + list_add_tail(&client->detected, &wf_max6690_driver.clients); return client; fail: diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index 5da729e58f9..e20330a2895 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -194,6 +194,8 @@ static struct wf_sensor_ops wf_sat_ops = { .owner = THIS_MODULE, }; +static struct i2c_driver wf_sat_driver; + static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) { struct i2c_board_info info; @@ -222,7 +224,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&client->detected, &client->driver->clients); + list_add_tail(&client->detected, &wf_sat_driver.clients); } static int wf_sat_probe(struct i2c_client *client, diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index ba0edad2d04..54abf9e303b 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -129,11 +129,13 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr) * This is the connector callback that delivers data * that was sent from userspace. */ -static void cn_ulog_callback(void *data) +static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { - struct cn_msg *msg = (struct cn_msg *)data; struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); + if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) + return; + spin_lock(&receiving_list_lock); if (msg->len == 0) fill_pkg(msg, NULL); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 376f1ab48a2..23e76fe0d35 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -130,7 +130,7 @@ struct mapped_device { /* * A list of ios that arrived while we were suspended. */ - atomic_t pending[2]; + atomic_t pending; wait_queue_head_t wait; struct work_struct work; struct bio_list deferred; @@ -453,14 +453,13 @@ static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; int cpu; - int rw = bio_data_dir(io->bio); io->start_time = jiffies; cpu = part_stat_lock(); part_round_stats(cpu, &dm_disk(md)->part0); part_stat_unlock(); - dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); + dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); } static void end_io_acct(struct dm_io *io) @@ -480,9 +479,8 @@ static void end_io_acct(struct dm_io *io) * After this is decremented the bio must not be touched if it is * a barrier. */ - dm_disk(md)->part0.in_flight[rw] = pending = - atomic_dec_return(&md->pending[rw]); - pending += atomic_read(&md->pending[rw^0x1]); + dm_disk(md)->part0.in_flight = pending = + atomic_dec_return(&md->pending); /* nudge anyone waiting on suspend queue */ if (!pending) @@ -1787,8 +1785,7 @@ static struct mapped_device *alloc_dev(int minor) if (!md->disk) goto bad_disk; - atomic_set(&md->pending[0], 0); - atomic_set(&md->pending[1], 0); + atomic_set(&md->pending, 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); init_waitqueue_head(&md->eventq); @@ -2091,8 +2088,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) break; } spin_unlock_irqrestore(q->queue_lock, flags); - } else if (!atomic_read(&md->pending[0]) && - !atomic_read(&md->pending[1])) + } else if (!atomic_read(&md->pending)) break; if (interruptible == TASK_INTERRUPTIBLE && diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index 3750ff48cba..c37790ad92d 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c @@ -20,6 +20,7 @@ * */ +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -1203,7 +1204,7 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait) return mask; } -static struct file_operations dvb_dvr_fops = { +static const struct file_operations dvb_dvr_fops = { .owner = THIS_MODULE, .read = dvb_dvr_read, .write = dvb_dvr_write, diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c index eef6d361662..91c537bca8a 100644 --- a/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/drivers/media/dvb/dvb-core/dvb_demux.c @@ -21,6 +21,7 @@ * */ +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/vmalloc.h> diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c index eeb80d0ea3f..853e04b7cb3 100644 --- a/drivers/media/dvb/firewire/firedtv-ci.c +++ b/drivers/media/dvb/firewire/firedtv-ci.c @@ -215,7 +215,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait) return POLLIN; } -static struct file_operations fdtv_ca_fops = { +static const struct file_operations fdtv_ca_fops = { .owner = THIS_MODULE, .ioctl = dvb_generic_ioctl, .open = dvb_generic_open, diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c index 8b1440136c4..482d0f3be5f 100644 --- a/drivers/media/radio/radio-cadet.c +++ b/drivers/media/radio/radio-cadet.c @@ -38,6 +38,7 @@ #include <linux/videodev2.h> /* V4L2 API defs */ #include <linux/param.h> #include <linux/pnp.h> +#include <linux/sched.h> #include <linux/io.h> /* outb, outb_p */ #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c index 43ab0adf3b6..2377313c041 100644 --- a/drivers/media/video/cpia.c +++ b/drivers/media/video/cpia.c @@ -31,6 +31,7 @@ #include <linux/init.h> #include <linux/fs.h> #include <linux/vmalloc.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/ctype.h> diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 5447da16a17..61348102827 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -57,8 +57,6 @@ * The AB3100 is usually assigned address 0x48 (7-bit) * The chip is defined in the platform i2c_board_data section. */ -static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END }; -I2C_CLIENT_INSMOD_1(ab3100); u8 ab3100_get_chip_type(struct ab3100 *ab3100) { @@ -966,7 +964,7 @@ static int __exit ab3100_remove(struct i2c_client *client) } static const struct i2c_device_id ab3100_id[] = { - { "ab3100", ab3100 }, + { "ab3100", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ab3100_id); diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c index 2afc08006e6..fa294b6d600 100644 --- a/drivers/mfd/ucb1400_core.c +++ b/drivers/mfd/ucb1400_core.c @@ -21,6 +21,7 @@ */ #include <linux/module.h> +#include <linux/sched.h> #include <linux/ucb1400.h> unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index 3c0c58eed34..5a6b2bce8ad 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c @@ -33,12 +33,6 @@ #include <linux/i2c.h> #include <linux/mutex.h> -/* Do not scan - the MAX6875 access method will write to some EEPROM chips */ -static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; - -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(max6875); - /* The MAX6875 can only read/write 16 bytes at a time */ #define SLICE_SIZE 16 #define SLICE_BITS 4 @@ -146,31 +140,21 @@ static struct bin_attribute user_eeprom_attr = { .read = max6875_read, }; -/* Return 0 if detection is successful, -ENODEV otherwise */ -static int max6875_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int max6875_probe(struct i2c_client *client, + const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; + struct max6875_data *data; + int err; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA | I2C_FUNC_SMBUS_READ_BYTE)) return -ENODEV; - /* Only check even addresses */ + /* Only bind to even addresses */ if (client->addr & 1) return -ENODEV; - strlcpy(info->type, "max6875", I2C_NAME_SIZE); - - return 0; -} - -static int max6875_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - struct max6875_data *data; - int err; - if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) return -ENOMEM; @@ -222,9 +206,6 @@ static struct i2c_driver max6875_driver = { .probe = max6875_probe, .remove = max6875_remove, .id_table = max6875_id, - - .detect = max6875_detect, - .address_data = &addr_data, }; static int __init max6875_init(void) diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index fa57b67593a..90a95ce8dc3 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c @@ -271,7 +271,7 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait) return mask; } -static struct file_operations phantom_file_ops = { +static const struct file_operations phantom_file_ops = { .open = phantom_open, .release = phantom_release, .unlocked_ioctl = phantom_ioctl, diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 300e7ba391a..41c8fe2a928 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -53,7 +53,6 @@ struct gru_stats_s gru_stats; /* Guaranteed user available resources on each node */ static int max_user_cbrs, max_user_dsr_bytes; -static struct file_operations gru_fops; static struct miscdevice gru_miscdev; @@ -426,7 +425,7 @@ static void __exit gru_exit(void) gru_proc_exit(); } -static struct file_operations gru_fops = { +static const struct file_operations gru_fops = { .owner = THIS_MODULE, .unlocked_ioctl = gru_file_unlocked_ioctl, .mmap = gru_file_mmap, diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 610dbd1fcc8..96d10f40fb2 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -240,7 +240,7 @@ static int mmc_ext_csd_release(struct inode *inode, struct file *file) return 0; } -static struct file_operations mmc_dbg_ext_csd_fops = { +static const struct file_operations mmc_dbg_ext_csd_fops = { .open = mmc_ext_csd_open, .read = mmc_ext_csd_read, .release = mmc_ext_csd_release, diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index 6636354b48c..f85dcd53650 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -29,6 +29,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, unsigned i, nr_strings; char **buffer, *string; + /* Find all null-terminated (including zero length) strings in + the TPLLV1_INFO field. Trailing garbage is ignored. */ buf += 2; size -= 2; @@ -39,11 +41,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, if (buf[i] == 0) nr_strings++; } - - if (nr_strings < 4) { - printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n"); + if (nr_strings == 0) return 0; - } size = i; @@ -98,6 +97,22 @@ static const unsigned char speed_val[16] = static const unsigned int speed_unit[8] = { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; +/* FUNCE tuples with these types get passed to SDIO drivers */ +static const unsigned char funce_type_whitelist[] = { + 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */ +}; + +static int cistpl_funce_whitelisted(unsigned char type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) { + if (funce_type_whitelist[i] == type) + return 1; + } + return 0; +} + static int cistpl_funce_common(struct mmc_card *card, const unsigned char *buf, unsigned size) { @@ -120,6 +135,10 @@ static int cistpl_funce_func(struct sdio_func *func, unsigned vsn; unsigned min_size; + /* let SDIO drivers take care of whitelisted FUNCE tuples */ + if (cistpl_funce_whitelisted(buf[0])) + return -EILSEQ; + vsn = func->card->cccr.sdio_vsn; min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; @@ -154,13 +173,12 @@ static int cistpl_funce(struct mmc_card *card, struct sdio_func *func, else ret = cistpl_funce_common(card, buf, size); - if (ret) { + if (ret && ret != -EILSEQ) { printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u " "type %u\n", mmc_hostname(card->host), size, buf[0]); - return ret; } - return 0; + return ret; } typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, @@ -253,21 +271,12 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) if (cis_tpl_list[i].code == tpl_code) break; - if (i >= ARRAY_SIZE(cis_tpl_list)) { - /* this tuple is unknown to the core */ - this->next = NULL; - this->code = tpl_code; - this->size = tpl_link; - *prev = this; - prev = &this->next; - printk(KERN_DEBUG - "%s: queuing CIS tuple 0x%02x length %u\n", - mmc_hostname(card->host), tpl_code, tpl_link); - } else { + if (i < ARRAY_SIZE(cis_tpl_list)) { const struct cis_tpl *tpl = cis_tpl_list + i; if (tpl_link < tpl->min_size) { printk(KERN_ERR - "%s: bad CIS tuple 0x%02x (length = %u, expected >= %u)\n", + "%s: bad CIS tuple 0x%02x" + " (length = %u, expected >= %u)\n", mmc_hostname(card->host), tpl_code, tpl_link, tpl->min_size); ret = -EINVAL; @@ -275,7 +284,30 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) ret = tpl->parse(card, func, this->data, tpl_link); } - kfree(this); + /* + * We don't need the tuple anymore if it was + * successfully parsed by the SDIO core or if it is + * not going to be parsed by SDIO drivers. + */ + if (!ret || ret != -EILSEQ) + kfree(this); + } else { + /* unknown tuple */ + ret = -EILSEQ; + } + + if (ret == -EILSEQ) { + /* this tuple is unknown to the core or whitelisted */ + this->next = NULL; + this->code = tpl_code; + this->size = tpl_link; + *prev = this; + prev = &this->next; + printk(KERN_DEBUG + "%s: queuing CIS tuple 0x%02x length %u\n", + mmc_hostname(card->host), tpl_code, tpl_link); + /* keep on analyzing tuples */ + ret = 0; } ptr += tpl_link; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 7cb057f3f88..432ae8358c8 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -276,6 +276,47 @@ config MMC_S3C If unsure, say N. +config MMC_S3C_HW_SDIO_IRQ + bool "Hardware support for SDIO IRQ" + depends on MMC_S3C + help + Enable the hardware support for SDIO interrupts instead of using + the generic polling code. + +choice + prompt "Samsung S3C SD/MMC transfer code" + depends on MMC_S3C + +config MMC_S3C_PIO + bool "Use PIO transfers only" + help + Use PIO to transfer data between memory and the hardware. + + PIO is slower than DMA as it requires CPU instructions to + move the data. This has been the traditional default for + the S3C MCI driver. + +config MMC_S3C_DMA + bool "Use DMA transfers only (EXPERIMENTAL)" + depends on EXPERIMENTAL + help + Use DMA to transfer data between memory and the hardare. + + Currently, the DMA support in this driver seems to not be + working properly and needs to be debugged before this + option is useful. + +config MMC_S3C_PIODMA + bool "Support for both PIO and DMA (EXPERIMENTAL)" + help + Compile both the PIO and DMA transfer routines into the + driver and let the platform select at run-time which one + is best. + + See notes for the DMA option. + +endchoice + config MMC_SDRICOH_CS tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" depends on EXPERIMENTAL && PCI && PCMCIA diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 3d1e5329da1..705a5894a6b 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -678,7 +678,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) writel(0, host->base + MMCIMASK1); writel(0xfff, host->base + MMCICLEAR); -#ifdef CONFIG_GPIOLIB if (gpio_is_valid(plat->gpio_cd)) { ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); if (ret == 0) @@ -697,7 +696,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) else if (ret != -ENOSYS) goto err_gpio_wp; } -#endif ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); if (ret) diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 8c08cd7efa7..99b74a35102 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -17,6 +17,8 @@ #include <linux/mmc/host.h> #include <linux/platform_device.h> #include <linux/cpufreq.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> #include <linux/gpio.h> #include <linux/irq.h> #include <linux/io.h> @@ -58,8 +60,6 @@ static const int dbgmap_debug = dbg_err | dbg_debug; dev_dbg(&host->pdev->dev, args); \ } while (0) -#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1) - static struct s3c2410_dma_client s3cmci_dma_client = { .name = "s3c-mci", }; @@ -164,6 +164,40 @@ static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { } #endif /* CONFIG_MMC_DEBUG */ +/** + * s3cmci_host_usedma - return whether the host is using dma or pio + * @host: The host state + * + * Return true if the host is using DMA to transfer data, else false + * to use PIO mode. Will return static data depending on the driver + * configuration. + */ +static inline bool s3cmci_host_usedma(struct s3cmci_host *host) +{ +#ifdef CONFIG_MMC_S3C_PIO + return false; +#elif defined(CONFIG_MMC_S3C_DMA) + return true; +#else + return host->dodma; +#endif +} + +/** + * s3cmci_host_canpio - return true if host has pio code available + * + * Return true if the driver has been compiled with the PIO support code + * available. + */ +static inline bool s3cmci_host_canpio(void) +{ +#ifdef CONFIG_MMC_S3C_PIO + return true; +#else + return false; +#endif +} + static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) { u32 newmask; @@ -190,7 +224,33 @@ static inline u32 disable_imask(struct s3cmci_host *host, u32 imask) static inline void clear_imask(struct s3cmci_host *host) { - writel(0, host->base + host->sdiimsk); + u32 mask = readl(host->base + host->sdiimsk); + + /* preserve the SDIO IRQ mask state */ + mask &= S3C2410_SDIIMSK_SDIOIRQ; + writel(mask, host->base + host->sdiimsk); +} + +/** + * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled + * @host: The host to check. + * + * Test to see if the SDIO interrupt is being signalled in case the + * controller has failed to re-detect a card interrupt. Read GPE8 and + * see if it is low and if so, signal a SDIO interrupt. + * + * This is currently called if a request is finished (we assume that the + * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is + * already being indicated. +*/ +static void s3cmci_check_sdio_irq(struct s3cmci_host *host) +{ + if (host->sdio_irqen) { + if (gpio_get_value(S3C2410_GPE(8)) == 0) { + printk(KERN_DEBUG "%s: signalling irq\n", __func__); + mmc_signal_sdio_irq(host->mmc); + } + } } static inline int get_data_buffer(struct s3cmci_host *host, @@ -238,6 +298,64 @@ static inline u32 fifo_free(struct s3cmci_host *host) return 63 - fifostat; } +/** + * s3cmci_enable_irq - enable IRQ, after having disabled it. + * @host: The device state. + * @more: True if more IRQs are expected from transfer. + * + * Enable the main IRQ if needed after it has been disabled. + * + * The IRQ can be one of the following states: + * - disabled during IDLE + * - disabled whilst processing data + * - enabled during transfer + * - enabled whilst awaiting SDIO interrupt detection + */ +static void s3cmci_enable_irq(struct s3cmci_host *host, bool more) +{ + unsigned long flags; + bool enable = false; + + local_irq_save(flags); + + host->irq_enabled = more; + host->irq_disabled = false; + + enable = more | host->sdio_irqen; + + if (host->irq_state != enable) { + host->irq_state = enable; + + if (enable) + enable_irq(host->irq); + else + disable_irq(host->irq); + } + + local_irq_restore(flags); +} + +/** + * + */ +static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer) +{ + unsigned long flags; + + local_irq_save(flags); + + //printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer); + + host->irq_disabled = transfer; + + if (transfer && host->irq_state) { + host->irq_state = false; + disable_irq(host->irq); + } + + local_irq_restore(flags); +} + static void do_pio_read(struct s3cmci_host *host) { int res; @@ -374,8 +492,7 @@ static void pio_tasklet(unsigned long data) { struct s3cmci_host *host = (struct s3cmci_host *) data; - - disable_irq(host->irq); + s3cmci_disable_irq(host, true); if (host->pio_active == XFER_WRITE) do_pio_write(host); @@ -395,9 +512,10 @@ static void pio_tasklet(unsigned long data) host->mrq->data->error = -EINVAL; } + s3cmci_enable_irq(host, false); finalize_request(host); } else - enable_irq(host->irq); + s3cmci_enable_irq(host, true); } /* @@ -432,17 +550,27 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id) struct s3cmci_host *host = dev_id; struct mmc_command *cmd; u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk; - u32 mci_cclear, mci_dclear; + u32 mci_cclear = 0, mci_dclear; unsigned long iflags; + mci_dsta = readl(host->base + S3C2410_SDIDSTA); + mci_imsk = readl(host->base + host->sdiimsk); + + if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) { + if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) { + mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT; + writel(mci_dclear, host->base + S3C2410_SDIDSTA); + + mmc_signal_sdio_irq(host->mmc); + return IRQ_HANDLED; + } + } + spin_lock_irqsave(&host->complete_lock, iflags); mci_csta = readl(host->base + S3C2410_SDICMDSTAT); - mci_dsta = readl(host->base + S3C2410_SDIDSTA); mci_dcnt = readl(host->base + S3C2410_SDIDCNT); mci_fsta = readl(host->base + S3C2410_SDIFSTA); - mci_imsk = readl(host->base + host->sdiimsk); - mci_cclear = 0; mci_dclear = 0; if ((host->complete_what == COMPLETION_NONE) || @@ -466,7 +594,7 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id) goto irq_out; } - if (!host->dodma) { + if (!s3cmci_host_usedma(host)) { if ((host->pio_active == XFER_WRITE) && (mci_fsta & S3C2410_SDIFSTA_TFDET)) { @@ -673,6 +801,7 @@ static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n", size, mci_dsta, mci_dcnt); + host->dma_complete = 1; host->complete_what = COMPLETION_FINALIZE; out: @@ -683,9 +812,9 @@ out: fail_request: host->mrq->data->error = -EINVAL; host->complete_what = COMPLETION_FINALIZE; - writel(0, host->base + host->sdiimsk); - goto out; + clear_imask(host); + goto out; } static void finalize_request(struct s3cmci_host *host) @@ -702,8 +831,9 @@ static void finalize_request(struct s3cmci_host *host) if (cmd->data && (cmd->error == 0) && (cmd->data->error == 0)) { - if (host->dodma && (!host->dma_complete)) { - dbg(host, dbg_dma, "DMA Missing!\n"); + if (s3cmci_host_usedma(host) && (!host->dma_complete)) { + dbg(host, dbg_dma, "DMA Missing (%d)!\n", + host->dma_complete); return; } } @@ -728,7 +858,7 @@ static void finalize_request(struct s3cmci_host *host) writel(0, host->base + S3C2410_SDICMDARG); writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); writel(0, host->base + S3C2410_SDICMDCON); - writel(0, host->base + host->sdiimsk); + clear_imask(host); if (cmd->data && cmd->error) cmd->data->error = cmd->error; @@ -754,7 +884,7 @@ static void finalize_request(struct s3cmci_host *host) /* If we had an error while transfering data we flush the * DMA channel and the fifo to clear out any garbage. */ if (mrq->data->error != 0) { - if (host->dodma) + if (s3cmci_host_usedma(host)) s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); if (host->is2440) { @@ -776,6 +906,8 @@ static void finalize_request(struct s3cmci_host *host) request_done: host->complete_what = COMPLETION_NONE; host->mrq = NULL; + + s3cmci_check_sdio_irq(host); mmc_request_done(host->mmc, mrq); } @@ -872,7 +1004,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data) dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK; - if (host->dodma) + if (s3cmci_host_usedma(host)) dcon |= S3C2410_SDIDCON_DMAEN; if (host->bus_width == MMC_BUS_WIDTH_4) @@ -950,7 +1082,7 @@ static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data) static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) { int dma_len, i; - int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; + int rw = data->flags & MMC_DATA_WRITE; BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); @@ -958,7 +1090,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (dma_len == 0) return -ENOMEM; @@ -969,11 +1101,11 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) for (i = 0; i < dma_len; i++) { int res; - dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i, - sg_dma_address(&data->sg[i]), - sg_dma_len(&data->sg[i])); + dbg(host, dbg_dma, "enqueue %i: %08x@%u\n", i, + sg_dma_address(&data->sg[i]), + sg_dma_len(&data->sg[i])); - res = s3c2410_dma_enqueue(host->dma, (void *) host, + res = s3c2410_dma_enqueue(host->dma, host, sg_dma_address(&data->sg[i]), sg_dma_len(&data->sg[i])); @@ -1018,7 +1150,7 @@ static void s3cmci_send_request(struct mmc_host *mmc) return; } - if (host->dodma) + if (s3cmci_host_usedma(host)) res = s3cmci_prepare_dma(host, cmd->data); else res = s3cmci_prepare_pio(host, cmd->data); @@ -1037,7 +1169,7 @@ static void s3cmci_send_request(struct mmc_host *mmc) s3cmci_send_command(host, cmd); /* Enable Interrupt */ - enable_irq(host->irq); + s3cmci_enable_irq(host, true); } static int s3cmci_card_present(struct mmc_host *mmc) @@ -1049,7 +1181,7 @@ static int s3cmci_card_present(struct mmc_host *mmc) if (pdata->gpio_detect == 0) return -ENOSYS; - ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1; + ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1; return ret ^ pdata->detect_invert; } @@ -1104,12 +1236,12 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_ON: case MMC_POWER_UP: - s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK); - s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD); - s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0); - s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1); - s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2); - s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3); + s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK); + s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD); + s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0); + s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1); + s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2); + s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3); if (host->pdata->set_power) host->pdata->set_power(ios->power_mode, ios->vdd); @@ -1121,8 +1253,7 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_POWER_OFF: default: - s3c2410_gpio_setpin(S3C2410_GPE5, 0); - s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPIO_OUTPUT); + gpio_direction_output(S3C2410_GPE(5), 0); if (host->is2440) mci_con |= S3C2440_SDICON_SDRESET; @@ -1168,7 +1299,7 @@ static int s3cmci_get_ro(struct mmc_host *mmc) struct s3c24xx_mci_pdata *pdata = host->pdata; int ret; - if (pdata->gpio_wprotect == 0) + if (pdata->no_wprotect) return 0; ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); @@ -1179,11 +1310,52 @@ static int s3cmci_get_ro(struct mmc_host *mmc) return ret; } +static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct s3cmci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 con; + + local_irq_save(flags); + + con = readl(host->base + S3C2410_SDICON); + host->sdio_irqen = enable; + + if (enable == host->sdio_irqen) + goto same_state; + + if (enable) { + con |= S3C2410_SDICON_SDIOIRQ; + enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ); + + if (!host->irq_state && !host->irq_disabled) { + host->irq_state = true; + enable_irq(host->irq); + } + } else { + disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ); + con &= ~S3C2410_SDICON_SDIOIRQ; + + if (!host->irq_enabled && host->irq_state) { + disable_irq_nosync(host->irq); + host->irq_state = false; + } + } + + writel(con, host->base + S3C2410_SDICON); + + same_state: + local_irq_restore(flags); + + s3cmci_check_sdio_irq(host); +} + static struct mmc_host_ops s3cmci_ops = { .request = s3cmci_request, .set_ios = s3cmci_set_ios, .get_ro = s3cmci_get_ro, .get_cd = s3cmci_card_present, + .enable_sdio_irq = s3cmci_enable_sdio_irq, }; static struct s3c24xx_mci_pdata s3cmci_def_pdata = { @@ -1246,11 +1418,140 @@ static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host) } #endif -static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) + +#ifdef CONFIG_DEBUG_FS + +static int s3cmci_state_show(struct seq_file *seq, void *v) +{ + struct s3cmci_host *host = seq->private; + + seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base); + seq_printf(seq, "Clock rate = %ld\n", host->clk_rate); + seq_printf(seq, "Prescale = %d\n", host->prescaler); + seq_printf(seq, "is2440 = %d\n", host->is2440); + seq_printf(seq, "IRQ = %d\n", host->irq); + seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled); + seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled); + seq_printf(seq, "IRQ state = %d\n", host->irq_state); + seq_printf(seq, "CD IRQ = %d\n", host->irq_cd); + seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host)); + seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk); + seq_printf(seq, "SDIDATA at %d\n", host->sdidata); + + return 0; +} + +static int s3cmci_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, s3cmci_state_show, inode->i_private); +} + +static const struct file_operations s3cmci_fops_state = { + .owner = THIS_MODULE, + .open = s3cmci_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r } + +struct s3cmci_reg { + unsigned short addr; + unsigned char *name; +} debug_regs[] = { + DBG_REG(CON), + DBG_REG(PRE), + DBG_REG(CMDARG), + DBG_REG(CMDCON), + DBG_REG(CMDSTAT), + DBG_REG(RSP0), + DBG_REG(RSP1), + DBG_REG(RSP2), + DBG_REG(RSP3), + DBG_REG(TIMER), + DBG_REG(BSIZE), + DBG_REG(DCON), + DBG_REG(DCNT), + DBG_REG(DSTA), + DBG_REG(FSTA), + {} +}; + +static int s3cmci_regs_show(struct seq_file *seq, void *v) +{ + struct s3cmci_host *host = seq->private; + struct s3cmci_reg *rptr = debug_regs; + + for (; rptr->name; rptr++) + seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name, + readl(host->base + rptr->addr)); + + seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk)); + + return 0; +} + +static int s3cmci_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, s3cmci_regs_show, inode->i_private); +} + +static const struct file_operations s3cmci_fops_regs = { + .owner = THIS_MODULE, + .open = s3cmci_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void s3cmci_debugfs_attach(struct s3cmci_host *host) +{ + struct device *dev = &host->pdev->dev; + + host->debug_root = debugfs_create_dir(dev_name(dev), NULL); + if (IS_ERR(host->debug_root)) { + dev_err(dev, "failed to create debugfs root\n"); + return; + } + + host->debug_state = debugfs_create_file("state", 0444, + host->debug_root, host, + &s3cmci_fops_state); + + if (IS_ERR(host->debug_state)) + dev_err(dev, "failed to create debug state file\n"); + + host->debug_regs = debugfs_create_file("regs", 0444, + host->debug_root, host, + &s3cmci_fops_regs); + + if (IS_ERR(host->debug_regs)) + dev_err(dev, "failed to create debug regs file\n"); +} + +static void s3cmci_debugfs_remove(struct s3cmci_host *host) +{ + debugfs_remove(host->debug_regs); + debugfs_remove(host->debug_state); + debugfs_remove(host->debug_root); +} + +#else +static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { } +static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { } + +#endif /* CONFIG_DEBUG_FS */ + +static int __devinit s3cmci_probe(struct platform_device *pdev) { struct s3cmci_host *host; struct mmc_host *mmc; int ret; + int is2440; + int i; + + is2440 = platform_get_device_id(pdev)->driver_data; mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev); if (!mmc) { @@ -1258,6 +1559,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) goto probe_out; } + for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) { + ret = gpio_request(i, dev_name(&pdev->dev)); + if (ret) { + dev_err(&pdev->dev, "failed to get gpio %d\n", i); + + for (i--; i >= S3C2410_GPE(5); i--) + gpio_free(i); + + goto probe_free_host; + } + } + host = mmc_priv(mmc); host->mmc = mmc; host->pdev = pdev; @@ -1282,11 +1595,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) host->clk_div = 2; } - host->dodma = 0; host->complete_what = COMPLETION_NONE; host->pio_active = XFER_NONE; - host->dma = S3CMCI_DMA; +#ifdef CONFIG_MMC_S3C_PIODMA + host->dodma = host->pdata->dma; +#endif host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!host->mem) { @@ -1294,19 +1608,19 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) "failed to get io memory region resouce.\n"); ret = -ENOENT; - goto probe_free_host; + goto probe_free_gpio; } host->mem = request_mem_region(host->mem->start, - RESSIZE(host->mem), pdev->name); + resource_size(host->mem), pdev->name); if (!host->mem) { dev_err(&pdev->dev, "failed to request io memory region.\n"); ret = -ENOENT; - goto probe_free_host; + goto probe_free_gpio; } - host->base = ioremap(host->mem->start, RESSIZE(host->mem)); + host->base = ioremap(host->mem->start, resource_size(host->mem)); if (!host->base) { dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); ret = -EINVAL; @@ -1331,31 +1645,60 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) * ensure we don't lock the system with un-serviceable requests. */ disable_irq(host->irq); + host->irq_state = false; - host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); - - if (host->irq_cd >= 0) { - if (request_irq(host->irq_cd, s3cmci_irq_cd, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - DRIVER_NAME, host)) { - dev_err(&pdev->dev, "can't get card detect irq.\n"); - ret = -ENOENT; + if (!host->pdata->no_detect) { + ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect"); + if (ret) { + dev_err(&pdev->dev, "failed to get detect gpio\n"); goto probe_free_irq; } - } else { - dev_warn(&pdev->dev, "host detect has no irq available\n"); - s3c2410_gpio_cfgpin(host->pdata->gpio_detect, - S3C2410_GPIO_INPUT); + + host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); + + if (host->irq_cd >= 0) { + if (request_irq(host->irq_cd, s3cmci_irq_cd, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + DRIVER_NAME, host)) { + dev_err(&pdev->dev, + "can't get card detect irq.\n"); + ret = -ENOENT; + goto probe_free_gpio_cd; + } + } else { + dev_warn(&pdev->dev, + "host detect has no irq available\n"); + gpio_direction_input(host->pdata->gpio_detect); + } + } else + host->irq_cd = -1; + + if (!host->pdata->no_wprotect) { + ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp"); + if (ret) { + dev_err(&pdev->dev, "failed to get writeprotect\n"); + goto probe_free_irq_cd; + } + + gpio_direction_input(host->pdata->gpio_wprotect); } - if (host->pdata->gpio_wprotect) - s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect, - S3C2410_GPIO_INPUT); + /* depending on the dma state, get a dma channel to use. */ - if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) { - dev_err(&pdev->dev, "unable to get DMA channel.\n"); - ret = -EBUSY; - goto probe_free_irq_cd; + if (s3cmci_host_usedma(host)) { + host->dma = s3c2410_dma_request(DMACH_SDI, &s3cmci_dma_client, + host); + if (host->dma < 0) { + dev_err(&pdev->dev, "cannot get DMA channel.\n"); + if (!s3cmci_host_canpio()) { + ret = -EBUSY; + goto probe_free_gpio_wp; + } else { + dev_warn(&pdev->dev, "falling back to PIO.\n"); + host->dodma = 0; + } + } } host->clk = clk_get(&pdev->dev, "sdi"); @@ -1363,7 +1706,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) dev_err(&pdev->dev, "failed to find clock source.\n"); ret = PTR_ERR(host->clk); host->clk = NULL; - goto probe_free_host; + goto probe_free_dma; } ret = clk_enable(host->clk); @@ -1376,7 +1719,11 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) mmc->ops = &s3cmci_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; +#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; +#else mmc->caps = MMC_CAP_4_BIT_DATA; +#endif mmc->f_min = host->clk_rate / (host->clk_div * 256); mmc->f_max = host->clk_rate / host->clk_div; @@ -1408,8 +1755,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) goto free_cpufreq; } + s3cmci_debugfs_attach(host); + platform_set_drvdata(pdev, mmc); - dev_info(&pdev->dev, "initialisation done.\n"); + dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc), + s3cmci_host_usedma(host) ? "dma" : "pio", + mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw"); return 0; @@ -1422,6 +1773,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) clk_free: clk_put(host->clk); + probe_free_dma: + if (s3cmci_host_usedma(host)) + s3c2410_dma_free(host->dma, &s3cmci_dma_client); + + probe_free_gpio_wp: + if (!host->pdata->no_wprotect) + gpio_free(host->pdata->gpio_wprotect); + + probe_free_gpio_cd: + if (!host->pdata->no_detect) + gpio_free(host->pdata->gpio_detect); + probe_free_irq_cd: if (host->irq_cd >= 0) free_irq(host->irq_cd, host); @@ -1433,10 +1796,15 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) iounmap(host->base); probe_free_mem_region: - release_mem_region(host->mem->start, RESSIZE(host->mem)); + release_mem_region(host->mem->start, resource_size(host->mem)); + + probe_free_gpio: + for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) + gpio_free(i); probe_free_host: mmc_free_host(mmc); + probe_out: return ret; } @@ -1449,6 +1817,7 @@ static void s3cmci_shutdown(struct platform_device *pdev) if (host->irq_cd >= 0) free_irq(host->irq_cd, host); + s3cmci_debugfs_remove(host); s3cmci_cpufreq_deregister(host); mmc_remove_host(mmc); clk_disable(host->clk); @@ -1458,104 +1827,102 @@ static int __devexit s3cmci_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct s3cmci_host *host = mmc_priv(mmc); + struct s3c24xx_mci_pdata *pd = host->pdata; + int i; s3cmci_shutdown(pdev); clk_put(host->clk); tasklet_disable(&host->pio_tasklet); - s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); + + if (s3cmci_host_usedma(host)) + s3c2410_dma_free(host->dma, &s3cmci_dma_client); free_irq(host->irq, host); + if (!pd->no_wprotect) + gpio_free(pd->gpio_wprotect); + + if (!pd->no_detect) + gpio_free(pd->gpio_detect); + + for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) + gpio_free(i); + + iounmap(host->base); - release_mem_region(host->mem->start, RESSIZE(host->mem)); + release_mem_region(host->mem->start, resource_size(host->mem)); mmc_free_host(mmc); return 0; } -static int __devinit s3cmci_2410_probe(struct platform_device *dev) -{ - return s3cmci_probe(dev, 0); -} +static struct platform_device_id s3cmci_driver_ids[] = { + { + .name = "s3c2410-sdi", + .driver_data = 0, + }, { + .name = "s3c2412-sdi", + .driver_data = 1, + }, { + .name = "s3c2440-sdi", + .driver_data = 1, + }, + { } +}; -static int __devinit s3cmci_2412_probe(struct platform_device *dev) -{ - return s3cmci_probe(dev, 1); -} +MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids); -static int __devinit s3cmci_2440_probe(struct platform_device *dev) -{ - return s3cmci_probe(dev, 1); -} #ifdef CONFIG_PM -static int s3cmci_suspend(struct platform_device *dev, pm_message_t state) +static int s3cmci_suspend(struct device *dev) { - struct mmc_host *mmc = platform_get_drvdata(dev); + struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); + struct pm_message event = { PM_EVENT_SUSPEND }; - return mmc_suspend_host(mmc, state); + return mmc_suspend_host(mmc, event); } -static int s3cmci_resume(struct platform_device *dev) +static int s3cmci_resume(struct device *dev) { - struct mmc_host *mmc = platform_get_drvdata(dev); + struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); return mmc_resume_host(mmc); } -#else /* CONFIG_PM */ -#define s3cmci_suspend NULL -#define s3cmci_resume NULL -#endif /* CONFIG_PM */ - - -static struct platform_driver s3cmci_2410_driver = { - .driver.name = "s3c2410-sdi", - .driver.owner = THIS_MODULE, - .probe = s3cmci_2410_probe, - .remove = __devexit_p(s3cmci_remove), - .shutdown = s3cmci_shutdown, +static struct dev_pm_ops s3cmci_pm = { .suspend = s3cmci_suspend, .resume = s3cmci_resume, }; -static struct platform_driver s3cmci_2412_driver = { - .driver.name = "s3c2412-sdi", - .driver.owner = THIS_MODULE, - .probe = s3cmci_2412_probe, - .remove = __devexit_p(s3cmci_remove), - .shutdown = s3cmci_shutdown, - .suspend = s3cmci_suspend, - .resume = s3cmci_resume, -}; +#define s3cmci_pm_ops &s3cmci_pm +#else /* CONFIG_PM */ +#define s3cmci_pm_ops NULL +#endif /* CONFIG_PM */ -static struct platform_driver s3cmci_2440_driver = { - .driver.name = "s3c2440-sdi", - .driver.owner = THIS_MODULE, - .probe = s3cmci_2440_probe, + +static struct platform_driver s3cmci_driver = { + .driver = { + .name = "s3c-sdi", + .owner = THIS_MODULE, + .pm = s3cmci_pm_ops, + }, + .id_table = s3cmci_driver_ids, + .probe = s3cmci_probe, .remove = __devexit_p(s3cmci_remove), .shutdown = s3cmci_shutdown, - .suspend = s3cmci_suspend, - .resume = s3cmci_resume, }; - static int __init s3cmci_init(void) { - platform_driver_register(&s3cmci_2410_driver); - platform_driver_register(&s3cmci_2412_driver); - platform_driver_register(&s3cmci_2440_driver); - return 0; + return platform_driver_register(&s3cmci_driver); } static void __exit s3cmci_exit(void) { - platform_driver_unregister(&s3cmci_2410_driver); - platform_driver_unregister(&s3cmci_2412_driver); - platform_driver_unregister(&s3cmci_2440_driver); + platform_driver_unregister(&s3cmci_driver); } module_init(s3cmci_init); @@ -1564,6 +1931,3 @@ module_exit(s3cmci_exit); MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>"); -MODULE_ALIAS("platform:s3c2410-sdi"); -MODULE_ALIAS("platform:s3c2412-sdi"); -MODULE_ALIAS("platform:s3c2440-sdi"); diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h index ca1ba3d58cf..c76b53dbeb6 100644 --- a/drivers/mmc/host/s3cmci.h +++ b/drivers/mmc/host/s3cmci.h @@ -8,9 +8,6 @@ * published by the Free Software Foundation. */ -/* FIXME: DMA Resource management ?! */ -#define S3CMCI_DMA 0 - enum s3cmci_waitfor { COMPLETION_NONE, COMPLETION_FINALIZE, @@ -42,6 +39,11 @@ struct s3cmci_host { int dodma; int dmatogo; + bool irq_disabled; + bool irq_enabled; + bool irq_state; + int sdio_irqen; + struct mmc_request *mrq; int cmd_is_stop; @@ -68,6 +70,12 @@ struct s3cmci_host { unsigned int ccnt, dcnt; struct tasklet_struct pio_tasklet; +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_root; + struct dentry *debug_state; + struct dentry *debug_regs; +#endif + #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 0acbf4f5be5..8ca17a3e96e 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -32,14 +32,6 @@ struct mtd_blkcore_priv { spinlock_t queue_lock; }; -static int blktrans_discard_request(struct request_queue *q, - struct request *req) -{ - req->cmd_type = REQ_TYPE_LINUX_BLOCK; - req->cmd[0] = REQ_LB_OP_DISCARD; - return 0; -} - static int do_blktrans_request(struct mtd_blktrans_ops *tr, struct mtd_blktrans_dev *dev, struct request *req) @@ -52,10 +44,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, buf = req->buffer; - if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && - req->cmd[0] == REQ_LB_OP_DISCARD) - return tr->discard(dev, block, nsect); - if (!blk_fs_request(req)) return -EIO; @@ -63,6 +51,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, get_capacity(req->rq_disk)) return -EIO; + if (blk_discard_rq(req)) + return tr->discard(dev, block, nsect); + switch(rq_data_dir(req)) { case READ: for (; nsect > 0; nsect--, block++, buf += tr->blksize) @@ -380,8 +371,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) tr->blkcore_priv->rq->queuedata = tr; blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); if (tr->discard) - blk_queue_set_discard(tr->blkcore_priv->rq, - blktrans_discard_request); + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + tr->blkcore_priv->rq); tr->blkshift = ffs(tr->blksize) - 1; diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index b9eeadf01b7..975e25b19eb 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -805,52 +805,54 @@ static void poll_vortex(struct net_device *dev) #ifdef CONFIG_PM -static int vortex_suspend(struct pci_dev *pdev, pm_message_t state) +static int vortex_suspend(struct device *dev) { - struct net_device *dev = pci_get_drvdata(pdev); + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *ndev = pci_get_drvdata(pdev); + + if (!ndev || !netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + vortex_down(ndev, 1); - if (dev && netdev_priv(dev)) { - if (netif_running(dev)) { - netif_device_detach(dev); - vortex_down(dev, 1); - disable_irq(dev->irq); - } - pci_save_state(pdev); - pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - } return 0; } -static int vortex_resume(struct pci_dev *pdev) +static int vortex_resume(struct device *dev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct vortex_private *vp = netdev_priv(dev); + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *ndev = pci_get_drvdata(pdev); int err; - if (dev && vp) { - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - err = pci_enable_device(pdev); - if (err) { - pr_warning("%s: Could not enable device\n", - dev->name); - return err; - } - pci_set_master(pdev); - if (netif_running(dev)) { - err = vortex_up(dev); - if (err) - return err; - enable_irq(dev->irq); - netif_device_attach(dev); - } - } + if (!ndev || !netif_running(ndev)) + return 0; + + err = vortex_up(ndev); + if (err) + return err; + + netif_device_attach(ndev); + return 0; } -#endif /* CONFIG_PM */ +static struct dev_pm_ops vortex_pm_ops = { + .suspend = vortex_suspend, + .resume = vortex_resume, + .freeze = vortex_suspend, + .thaw = vortex_resume, + .poweroff = vortex_suspend, + .restore = vortex_resume, +}; + +#define VORTEX_PM_OPS (&vortex_pm_ops) + +#else /* !CONFIG_PM */ + +#define VORTEX_PM_OPS NULL + +#endif /* !CONFIG_PM */ #ifdef CONFIG_EISA static struct eisa_device_id vortex_eisa_ids[] = { @@ -3199,10 +3201,7 @@ static struct pci_driver vortex_driver = { .probe = vortex_init_one, .remove = __devexit_p(vortex_remove_one), .id_table = vortex_pci_tbl, -#ifdef CONFIG_PM - .suspend = vortex_suspend, - .resume = vortex_resume, -#endif + .driver.pm = VORTEX_PM_OPS, }; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2bea67c134f..712776089b4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1738,6 +1738,13 @@ config KS8851 help SPI driver for Micrel KS8851 SPI attached network chip. +config KS8851_MLL + tristate "Micrel KS8851 MLL" + depends on HAS_IOMEM + help + This platform driver is for Micrel KS8851 Address/data bus + multiplexed network chip. + config VIA_RHINE tristate "VIA Rhine support" depends on NET_PCI && PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index ae8cd30f13d..d866b8cf65d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -89,6 +89,7 @@ obj-$(CONFIG_SKY2) += sky2.o obj-$(CONFIG_SKFP) += skfp/ obj-$(CONFIG_KS8842) += ks8842.o obj-$(CONFIG_KS8851) += ks8851.o +obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o obj-$(CONFIG_VIA_RHINE) += via-rhine.o obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index fdf5937233f..04f63c77071 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -721,7 +721,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status) ps->rx_errors++; if (status & RX_MISSED_FRAME) ps->rx_missed_errors++; - if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) + if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) ps->rx_length_errors++; if (status & RX_CRC_ERROR) ps->rx_crc_errors++; @@ -794,8 +794,6 @@ static int au1000_rx(struct net_device *dev) printk("rx len error\n"); if (status & RX_U_CNTRL_FRAME) printk("rx u control frame\n"); - if (status & RX_MISSED_FRAME) - printk("rx miss\n"); } } prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c index 09d270913c5..ba29dc319b3 100644 --- a/drivers/net/bcm63xx_enet.c +++ b/drivers/net/bcm63xx_enet.c @@ -90,7 +90,7 @@ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) break; udelay(1); - } while (limit-- >= 0); + } while (limit-- > 0); return (limit < 0) ? 1 : 0; } diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 684c6fe24c8..a80da0e14a5 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -258,6 +258,7 @@ struct be_adapter { bool link_up; u32 port_num; bool promiscuous; + u32 cap; }; extern const struct ethtool_ops be_ethtool_ops; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 3dd76c4170b..89876ade5e3 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1068,7 +1068,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) } /* Uses mbox */ -int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num) +int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap) { struct be_mcc_wrb *wrb; struct be_cmd_req_query_fw_cfg *req; @@ -1088,6 +1088,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num) if (!status) { struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); *port_num = le32_to_cpu(resp->phys_port); + *cap = le32_to_cpu(resp->function_cap); } spin_unlock(&adapter->mbox_lock); @@ -1128,7 +1129,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); - req = embedded_payload(wrb); sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, cmd->size, false, 1); diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index 93e432f3d92..a86f917f85f 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -62,7 +62,7 @@ enum { MCC_STATUS_QUEUE_FLUSHING = 0x4, /* The command is completing with a DMA error */ MCC_STATUS_DMA_FAILED = 0x5, - MCC_STATUS_NOT_SUPPORTED = 0x66 + MCC_STATUS_NOT_SUPPORTED = 66 }; #define CQE_STATUS_COMPL_MASK 0xFFFF @@ -760,7 +760,8 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); extern int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); -extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num); +extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, + u32 *port_num, u32 *cap); extern int be_cmd_reset_function(struct be_adapter *adapter); extern int be_process_mcc(struct be_adapter *adapter); extern int be_cmd_write_flashrom(struct be_adapter *adapter, diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 11445df3dbc..cda5bf2fc50 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = { .get_rx_csum = be_get_rx_csum, .set_rx_csum = be_set_rx_csum, .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ethtool_op_set_tx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_tso = ethtool_op_get_tso, diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 409cf059590..6d5e81f7046 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter) /* no space available in linux */ dev_stats->tx_dropped = 0; - dev_stats->multicast = port_stats->tx_multicastframes; + dev_stats->multicast = port_stats->rx_multicast_frames; dev_stats->collisions = 0; /* detailed tx_errors */ @@ -747,9 +747,16 @@ static void be_rx_compl_process(struct be_adapter *adapter, struct be_eth_rx_compl *rxcp) { struct sk_buff *skb; - u32 vtp, vid; + u32 vlanf, vid; + u8 vtm; - vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); + vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); + vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); + + /* vlanf could be wrongly set in some cards. + * ignore if vtm is not set */ + if ((adapter->cap == 0x400) && !vtm) + vlanf = 0; skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); if (!skb) { @@ -772,7 +779,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, skb->protocol = eth_type_trans(skb, adapter->netdev); skb->dev = adapter->netdev; - if (vtp) { + if (vlanf) { if (!adapter->vlan_grp || adapter->num_vlans == 0) { kfree_skb(skb); return; @@ -797,11 +804,18 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, struct be_eq_obj *eq_obj = &adapter->rx_eq; u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; u16 i, rxq_idx = 0, vid, j; + u8 vtm; num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); + vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); + + /* vlanf could be wrongly set in some cards. + * ignore if vtm is not set */ + if ((adapter->cap == 0x400) && !vtm) + vlanf = 0; skb = napi_get_frags(&eq_obj->napi); if (!skb) { @@ -1885,8 +1899,8 @@ static void be_netdev_init(struct net_device *netdev) struct be_adapter *adapter = netdev_priv(netdev); netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_GRO; + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | + NETIF_F_GRO; netdev->flags |= IFF_MULTICAST; @@ -2045,7 +2059,8 @@ static int be_hw_up(struct be_adapter *adapter) if (status) return status; - status = be_cmd_query_fw_cfg(adapter, &adapter->port_num); + status = be_cmd_query_fw_cfg(adapter, + &adapter->port_num, &adapter->cap); return status; } diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 6044e12ff9f..ff449de6f3c 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1182,6 +1182,7 @@ static ssize_t bonding_store_primary(struct device *d, ": %s: Setting %s as primary slave.\n", bond->dev->name, slave->dev->name); bond->primary_slave = slave; + strcpy(bond->params.primary, slave->dev->name); bond_select_active_slave(bond); goto out; } diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 211c8e9182f..46c87ec7960 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -2733,7 +2733,8 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, cnic_ulp_init(dev); else if (event == NETDEV_UNREGISTER) cnic_ulp_exit(dev); - else if (event == NETDEV_UP) { + + if (event == NETDEV_UP) { if (cnic_register_netdev(dev) != 0) { cnic_put(dev); goto done; diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index a49235739ee..d8b09efdcb5 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h @@ -12,8 +12,8 @@ #ifndef CNIC_IF_H #define CNIC_IF_H -#define CNIC_MODULE_VERSION "2.0.0" -#define CNIC_MODULE_RELDATE "May 21, 2009" +#define CNIC_MODULE_VERSION "2.0.1" +#define CNIC_MODULE_RELDATE "Oct 01, 2009" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index b53b40ba88a..d1e0563a67d 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = { | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, .pba = 20, - .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_bm, @@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = { | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, .pba = 20, - .max_hw_frame_size = DEFAULT_JUMBO, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_bm, diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 16c193a6c95..0687c6aa4e4 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -4982,12 +4982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, goto err_pci_reg; /* AER (Advanced Error Reporting) hooks */ - err = pci_enable_pcie_error_reporting(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed " - "0x%x\n", err); - /* non-fatal, continue */ - } + pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); /* PCI config space info */ @@ -5263,7 +5258,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - int err; /* * flush_scheduled work may reschedule our watchdog task, so @@ -5299,10 +5293,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev) free_netdev(netdev); /* AER disable */ - err = pci_disable_pcie_error_reporting(pdev); - if (err) - dev_err(&pdev->dev, - "pci_disable_pcie_error_reporting failed 0x%x\n", err); + pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index b7311bc0025..34d0c69e67f 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c @@ -19,6 +19,10 @@ #include <linux/platform_device.h> #include <net/ethoc.h> +static int buffer_size = 0x8000; /* 32 KBytes */ +module_param(buffer_size, int, 0); +MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); + /* register offsets */ #define MODER 0x00 #define INT_SOURCE 0x04 @@ -167,6 +171,7 @@ * struct ethoc - driver-private device structure * @iobase: pointer to I/O memory region * @membase: pointer to buffer memory region + * @dma_alloc: dma allocated buffer size * @num_tx: number of send buffers * @cur_tx: last send buffer written * @dty_tx: last buffer actually sent @@ -185,6 +190,7 @@ struct ethoc { void __iomem *iobase; void __iomem *membase; + int dma_alloc; unsigned int num_tx; unsigned int cur_tx; @@ -284,7 +290,7 @@ static int ethoc_init_ring(struct ethoc *dev) dev->cur_rx = 0; /* setup transmission buffers */ - bd.addr = 0; + bd.addr = virt_to_phys(dev->membase); bd.stat = TX_BD_IRQ | TX_BD_CRC; for (i = 0; i < dev->num_tx; i++) { @@ -295,7 +301,6 @@ static int ethoc_init_ring(struct ethoc *dev) bd.addr += ETHOC_BUFSIZ; } - bd.addr = dev->num_tx * ETHOC_BUFSIZ; bd.stat = RX_BD_EMPTY | RX_BD_IRQ; for (i = 0; i < dev->num_rx; i++) { @@ -400,8 +405,12 @@ static int ethoc_rx(struct net_device *dev, int limit) if (ethoc_update_rx_stats(priv, &bd) == 0) { int size = bd.stat >> 16; struct sk_buff *skb = netdev_alloc_skb(dev, size); + + size -= 4; /* strip the CRC */ + skb_reserve(skb, 2); /* align TCP/IP header */ + if (likely(skb)) { - void *src = priv->membase + bd.addr; + void *src = phys_to_virt(bd.addr); memcpy_fromio(skb_put(skb, size), src, size); skb->protocol = eth_type_trans(skb, dev); priv->stats.rx_packets++; @@ -653,9 +662,9 @@ static int ethoc_open(struct net_device *dev) if (ret) return ret; - /* calculate the number of TX/RX buffers */ - num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; - priv->num_tx = min(min_tx, num_bd / 4); + /* calculate the number of TX/RX buffers, maximum 128 supported */ + num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); + priv->num_tx = max(min_tx, num_bd / 4); priv->num_rx = num_bd - priv->num_tx; ethoc_write(priv, TX_BD_NUM, priv->num_tx); @@ -823,7 +832,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) else bd.stat &= ~TX_BD_PAD; - dest = priv->membase + bd.addr; + dest = phys_to_virt(bd.addr); memcpy_toio(dest, skb->data, skb->len); bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); @@ -903,22 +912,19 @@ static int ethoc_probe(struct platform_device *pdev) /* obtain buffer memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(&pdev->dev, "cannot obtain memory space\n"); - ret = -ENXIO; - goto free; - } - - mem = devm_request_mem_region(&pdev->dev, res->start, + if (res) { + mem = devm_request_mem_region(&pdev->dev, res->start, res->end - res->start + 1, res->name); - if (!mem) { - dev_err(&pdev->dev, "cannot request memory space\n"); - ret = -ENXIO; - goto free; + if (!mem) { + dev_err(&pdev->dev, "cannot request memory space\n"); + ret = -ENXIO; + goto free; + } + + netdev->mem_start = mem->start; + netdev->mem_end = mem->end; } - netdev->mem_start = mem->start; - netdev->mem_end = mem->end; /* obtain device IRQ number */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); @@ -933,6 +939,7 @@ static int ethoc_probe(struct platform_device *pdev) /* setup driver-private data */ priv = netdev_priv(netdev); priv->netdev = netdev; + priv->dma_alloc = 0; priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, mmio->end - mmio->start + 1); @@ -942,12 +949,27 @@ static int ethoc_probe(struct platform_device *pdev) goto error; } - priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, - mem->end - mem->start + 1); - if (!priv->membase) { - dev_err(&pdev->dev, "cannot remap memory space\n"); - ret = -ENXIO; - goto error; + if (netdev->mem_end) { + priv->membase = devm_ioremap_nocache(&pdev->dev, + netdev->mem_start, mem->end - mem->start + 1); + if (!priv->membase) { + dev_err(&pdev->dev, "cannot remap memory space\n"); + ret = -ENXIO; + goto error; + } + } else { + /* Allocate buffer memory */ + priv->membase = dma_alloc_coherent(NULL, + buffer_size, (void *)&netdev->mem_start, + GFP_KERNEL); + if (!priv->membase) { + dev_err(&pdev->dev, "cannot allocate %dB buffer\n", + buffer_size); + ret = -ENOMEM; + goto error; + } + netdev->mem_end = netdev->mem_start + buffer_size; + priv->dma_alloc = buffer_size; } /* Allow the platform setup code to pass in a MAC address. */ @@ -1034,6 +1056,9 @@ free_mdio: kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: + if (priv->dma_alloc) + dma_free_coherent(NULL, priv->dma_alloc, priv->membase, + netdev->mem_start); free_netdev(netdev); out: return ret; @@ -1059,7 +1084,9 @@ static int ethoc_remove(struct platform_device *pdev) kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } - + if (priv->dma_alloc) + dma_free_coherent(NULL, priv->dma_alloc, priv->membase, + netdev->mem_start); unregister_netdev(netdev); free_netdev(netdev); } diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 33b55f72974..db4b7f1603f 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -258,7 +258,7 @@ static void ax_bump(struct mkiss *ax) } if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) { printk(KERN_INFO - "mkiss: %s: Switchting to crc-smack\n", + "mkiss: %s: Switching to crc-smack\n", ax->dev->name); ax->crcmode = CRC_MODE_SMACK; } @@ -272,7 +272,7 @@ static void ax_bump(struct mkiss *ax) } if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) { printk(KERN_INFO - "mkiss: %s: Switchting to crc-flexnet\n", + "mkiss: %s: Switching to crc-flexnet\n", ax->dev->name); ax->crcmode = CRC_MODE_FLEX; } diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 5d6c1530a8c..714c3a4a44e 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1246,12 +1246,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (err) goto err_pci_reg; - err = pci_enable_pcie_error_reporting(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed " - "0x%x\n", err); - /* non-fatal, continue */ - } + pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); pci_save_state(pdev); @@ -1628,7 +1623,6 @@ static void __devexit igb_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - int err; /* flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ @@ -1682,10 +1676,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) free_netdev(netdev); - err = pci_disable_pcie_error_reporting(pdev); - if (err) - dev_err(&pdev->dev, - "pci_disable_pcie_error_reporting failed 0x%x\n", err); + pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index e36e951cbc6..aa7286bc436 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -495,7 +495,7 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx, cnx->remote_lp); } else { memcpy(&cnx->cap_ack_event, event, - sizeof(&cnx->cap_ack_event)); + sizeof(cnx->cap_ack_event)); cnx->state |= VETH_STATE_GOTCAPACK; veth_kick_statemachine(cnx); } diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 56b12f3192f..e2d5343f127 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) #endif /* CONFIG_DCB */ default: hw_dbg(hw, "Flow control param set incorrectly\n"); - ret_val = -IXGBE_ERR_CONFIG; + ret_val = IXGBE_ERR_CONFIG; goto out; break; } diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 2ec58dcdb82..34b04924c8a 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -330,6 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: case IXGBE_DEV_ID_82599_XAUI_LOM: /* Default device ID is mezzanine card KX/KX4 */ media_type = ixgbe_media_type_backplane; diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 6621e172df3..40ff120a9ad 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) /** * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses * @hw: pointer to hardware structure - * @addr_list: the list of new addresses - * @addr_count: number of addresses - * @next: iterator function to walk the address list + * @uc_list: the list of new addresses * * The given list replaces any existing list. Clears the secondary addrs from * receive address registers. Uses unused receive address registers for the @@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) #endif /* CONFIG_DCB */ default: hw_dbg(hw, "Flow control param set incorrectly\n"); - ret_val = -IXGBE_ERR_CONFIG; + ret_val = IXGBE_ERR_CONFIG; goto out; break; } @@ -1734,75 +1732,140 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) s32 ret_val = 0; ixgbe_link_speed speed; u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + u32 links2, anlp1_reg, autoc_reg, links; bool link_up; /* * AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: * - FC autoneg is disabled, or if - * - we don't have multispeed fiber, or if - * - we're not running at 1G, or if - * - link is not up, or if - * - link is up but AN did not complete, or if - * - link is up and AN completed but timed out + * - link is not up. * - * Since we're being called from an LSC, link is already know to be up. + * Since we're being called from an LSC, link is already known to be up. * So use link_up_wait_to_complete=false. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); - linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); - - if (hw->fc.disable_fc_autoneg || - !hw->phy.multispeed_fiber || - (speed != IXGBE_LINK_SPEED_1GB_FULL) || - !link_up || - ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || - ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + + if (hw->fc.disable_fc_autoneg || (!link_up)) { hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; - hw_dbg(hw, "Autoneg FC was skipped.\n"); goto out; } /* + * On backplane, bail out if + * - backplane autoneg was not completed, or if + * - link partner is not AN enabled + */ + if (hw->phy.media_type == ixgbe_media_type_backplane) { + links = IXGBE_READ_REG(hw, IXGBE_LINKS); + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); + if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) || + ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + goto out; + } + } + + /* + * On multispeed fiber at 1g, bail out if + * - link is up but AN did not complete, or if + * - link is up and AN completed but timed out + */ + if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) { + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || + ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + goto out; + } + } + + /* * Read the AN advertisement and LP ability registers and resolve * local flow control settings accordingly */ - pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); - if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) { + if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && + (hw->phy.media_type != ixgbe_media_type_backplane)) { + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && + (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + hw_dbg(hw, "Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control=RX PAUSE only\n"); + } + } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && + (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && + (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && + (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); + } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && + (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && + !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && + (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; + hw_dbg(hw, "Flow Control = NONE.\n"); + } + } + + if (hw->phy.media_type == ixgbe_media_type_backplane) { /* - * Now we need to check if the user selected Rx ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. + * Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly */ - if (hw->fc.requested_mode == ixgbe_fc_full) { - hw->fc.current_mode = ixgbe_fc_full; - hw_dbg(hw, "Flow Control = FULL.\n"); - } else { + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + + if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) && + (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + hw_dbg(hw, "Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control=RX PAUSE only\n"); + } + } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) && + (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) && + (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) && + (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); + } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) && + (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) && + !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) && + (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) { hw->fc.current_mode = ixgbe_fc_rx_pause; hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; + hw_dbg(hw, "Flow Control = NONE.\n"); } - } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { - hw->fc.current_mode = ixgbe_fc_tx_pause; - hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); - } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && - !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { - hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_none; - hw_dbg(hw, "Flow Control = NONE.\n"); } - /* Record that current_mode is the result of a successful autoneg */ hw->fc.fc_was_autonegged = true; @@ -1919,7 +1982,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) #endif /* CONFIG_DCB */ default: hw_dbg(hw, "Flow control param set incorrectly\n"); - ret_val = -IXGBE_ERR_CONFIG; + ret_val = IXGBE_ERR_CONFIG; goto out; break; } @@ -1927,9 +1990,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - /* Enable and restart autoneg to inform the link partner */ - reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART; - /* Disable AN timeout */ if (hw->fc.strict_ieee) reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; @@ -1937,6 +1997,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); + /* + * Set up the 10G flow control advertisement registers so the HW + * can do fc autoneg once the cable is plugged in. If we end up + * using 1g instead, this is harmless. + */ + reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + reg |= (IXGBE_AUTOC_ASM_PAUSE); + reg &= ~(IXGBE_AUTOC_SYM_PAUSE); + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); + break; +#ifdef CONFIG_DCB + case ixgbe_fc_pfc: + goto out; + break; +#endif /* CONFIG_DCB */ + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + /* + * AUTOC restart handles negotiation of 1G and 10G. There is + * no need to set the PCS1GCTL register. + */ + reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg); + hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); + out: return ret_val; } @@ -2000,7 +2124,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) while (timeout) { if (ixgbe_get_eeprom_semaphore(hw)) - return -IXGBE_ERR_SWFW_SYNC; + return IXGBE_ERR_SWFW_SYNC; gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); if (!(gssr & (fwmask | swmask))) @@ -2017,7 +2141,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) if (!timeout) { hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); - return -IXGBE_ERR_SWFW_SYNC; + return IXGBE_ERR_SWFW_SYNC; } gssr |= swmask; diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 53b0a668025..fa314cb005a 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -53,6 +53,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, + {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, + {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, + {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, + {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, {"lsc_int", IXGBE_STAT(lsc_int)}, {"tx_busy", IXGBE_STAT(tx_busy)}, {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index c407bd9de0d..cbb143ca1eb 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -49,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "2.0.37-k2" +#define DRV_VERSION "2.0.44-k2" const char ixgbe_driver_version[] = DRV_VERSION; static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; @@ -97,8 +97,12 @@ static struct pci_device_id ixgbe_pci_tbl[] = { board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), + board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), + board_82599 }, /* required last entry */ {0, } @@ -1885,12 +1889,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); adapter->tx_ring[i].head = IXGBE_TDH(j); adapter->tx_ring[i].tail = IXGBE_TDT(j); - /* Disable Tx Head Writeback RO bit, since this hoses + /* + * Disable Tx Head Writeback RO bit, since this hoses * bookkeeping if things aren't delivered in order. */ - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); + break; + case ixgbe_mac_82599EB: + default: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); + break; + } txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); + break; + case ixgbe_mac_82599EB: + default: + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); + break; + } } if (hw->mac.type == ixgbe_mac_82599EB) { /* We enable 8 traffic classes, DCB only */ @@ -4432,10 +4453,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) /* 82598 hardware only has a 32 bit counter in the high register */ if (hw->mac.type == ixgbe_mac_82599EB) { + u64 tmp; adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); - IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ + tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */ + adapter->stats.gorc += (tmp << 32); adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); - IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ + tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */ + adapter->stats.gotc += (tmp << 32); adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); @@ -5071,7 +5095,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, /* Right now, we support IPv4 only */ struct ixgbe_atr_input atr_input; struct tcphdr *th; - struct udphdr *uh; struct iphdr *iph = ip_hdr(skb); struct ethhdr *eth = (struct ethhdr *)skb->data; u16 vlan_id, src_port, dst_port, flex_bytes; @@ -5085,12 +5108,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, dst_port = th->dest; l4type |= IXGBE_ATR_L4TYPE_TCP; /* l4type IPv4 type is 0, no need to assign */ - } else if(iph->protocol == IPPROTO_UDP) { - uh = udp_hdr(skb); - src_port = uh->source; - dst_port = uh->dest; - l4type |= IXGBE_ATR_L4TYPE_UDP; - /* l4type IPv4 type is 0, no need to assign */ } else { /* Unsupported L4 header, just bail here */ return; @@ -5494,12 +5511,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_pci_reg; } - err = pci_enable_pcie_error_reporting(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed " - "0x%x\n", err); - /* non-fatal, continue */ - } + pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); pci_save_state(pdev); @@ -5808,7 +5820,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbe_adapter *adapter = netdev_priv(netdev); - int err; set_bit(__IXGBE_DOWN, &adapter->state); /* clear the module not found bit to make sure the worker won't @@ -5859,10 +5870,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) free_netdev(netdev); - err = pci_disable_pcie_error_reporting(pdev); - if (err) - dev_err(&pdev->dev, - "pci_disable_pcie_error_reporting failed 0x%x\n", err); + pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 8761d7899f7..ef4bdd58e01 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -49,9 +49,11 @@ #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 #define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 #define IXGBE_DEV_ID_82599_CX4 0x10F9 #define IXGBE_DEV_ID_82599_SFP 0x10FB #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 /* General Registers */ #define IXGBE_CTRL 0x00000 @@ -1336,6 +1338,8 @@ #define IXGBE_AUTOC_KX4_SUPP 0x80000000 #define IXGBE_AUTOC_KX_SUPP 0x40000000 #define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 #define IXGBE_AUTOC_RF 0x08000000 #define IXGBE_AUTOC_PD_TMR 0x06000000 #define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 @@ -1404,6 +1408,8 @@ #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 + /* PCS1GLSTA Bit Masks */ #define IXGBE_PCS1GLSTA_LINK_OK 1 #define IXGBE_PCS1GLSTA_SYNK_OK 0x10 @@ -1424,6 +1430,11 @@ #define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 #define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 +/* ANLP1 Bit Masks */ +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 + /* SW Semaphore Register bitmasks */ #define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ #define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c new file mode 100644 index 00000000000..0be14d702be --- /dev/null +++ b/drivers/net/ks8851_mll.c @@ -0,0 +1,1697 @@ +/** + * drivers/net/ks8851_mll.c + * Copyright (c) 2009 Micrel Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/** + * Supports: + * KS8851 16bit MLL chip from Micrel Inc. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/cache.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/platform_device.h> +#include <linux/delay.h> + +#define DRV_NAME "ks8851_mll" + +static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; +#define MAX_RECV_FRAMES 32 +#define MAX_BUF_SIZE 2048 +#define TX_BUF_SIZE 2000 +#define RX_BUF_SIZE 2000 + +#define KS_CCR 0x08 +#define CCR_EEPROM (1 << 9) +#define CCR_SPI (1 << 8) +#define CCR_8BIT (1 << 7) +#define CCR_16BIT (1 << 6) +#define CCR_32BIT (1 << 5) +#define CCR_SHARED (1 << 4) +#define CCR_32PIN (1 << 0) + +/* MAC address registers */ +#define KS_MARL 0x10 +#define KS_MARM 0x12 +#define KS_MARH 0x14 + +#define KS_OBCR 0x20 +#define OBCR_ODS_16MA (1 << 6) + +#define KS_EEPCR 0x22 +#define EEPCR_EESA (1 << 4) +#define EEPCR_EESB (1 << 3) +#define EEPCR_EEDO (1 << 2) +#define EEPCR_EESCK (1 << 1) +#define EEPCR_EECS (1 << 0) + +#define KS_MBIR 0x24 +#define MBIR_TXMBF (1 << 12) +#define MBIR_TXMBFA (1 << 11) +#define MBIR_RXMBF (1 << 4) +#define MBIR_RXMBFA (1 << 3) + +#define KS_GRR 0x26 +#define GRR_QMU (1 << 1) +#define GRR_GSR (1 << 0) + +#define KS_WFCR 0x2A +#define WFCR_MPRXE (1 << 7) +#define WFCR_WF3E (1 << 3) +#define WFCR_WF2E (1 << 2) +#define WFCR_WF1E (1 << 1) +#define WFCR_WF0E (1 << 0) + +#define KS_WF0CRC0 0x30 +#define KS_WF0CRC1 0x32 +#define KS_WF0BM0 0x34 +#define KS_WF0BM1 0x36 +#define KS_WF0BM2 0x38 +#define KS_WF0BM3 0x3A + +#define KS_WF1CRC0 0x40 +#define KS_WF1CRC1 0x42 +#define KS_WF1BM0 0x44 +#define KS_WF1BM1 0x46 +#define KS_WF1BM2 0x48 +#define KS_WF1BM3 0x4A + +#define KS_WF2CRC0 0x50 +#define KS_WF2CRC1 0x52 +#define KS_WF2BM0 0x54 +#define KS_WF2BM1 0x56 +#define KS_WF2BM2 0x58 +#define KS_WF2BM3 0x5A + +#define KS_WF3CRC0 0x60 +#define KS_WF3CRC1 0x62 +#define KS_WF3BM0 0x64 +#define KS_WF3BM1 0x66 +#define KS_WF3BM2 0x68 +#define KS_WF3BM3 0x6A + +#define KS_TXCR 0x70 +#define TXCR_TCGICMP (1 << 8) +#define TXCR_TCGUDP (1 << 7) +#define TXCR_TCGTCP (1 << 6) +#define TXCR_TCGIP (1 << 5) +#define TXCR_FTXQ (1 << 4) +#define TXCR_TXFCE (1 << 3) +#define TXCR_TXPE (1 << 2) +#define TXCR_TXCRC (1 << 1) +#define TXCR_TXE (1 << 0) + +#define KS_TXSR 0x72 +#define TXSR_TXLC (1 << 13) +#define TXSR_TXMC (1 << 12) +#define TXSR_TXFID_MASK (0x3f << 0) +#define TXSR_TXFID_SHIFT (0) +#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f) + + +#define KS_RXCR1 0x74 +#define RXCR1_FRXQ (1 << 15) +#define RXCR1_RXUDPFCC (1 << 14) +#define RXCR1_RXTCPFCC (1 << 13) +#define RXCR1_RXIPFCC (1 << 12) +#define RXCR1_RXPAFMA (1 << 11) +#define RXCR1_RXFCE (1 << 10) +#define RXCR1_RXEFE (1 << 9) +#define RXCR1_RXMAFMA (1 << 8) +#define RXCR1_RXBE (1 << 7) +#define RXCR1_RXME (1 << 6) +#define RXCR1_RXUE (1 << 5) +#define RXCR1_RXAE (1 << 4) +#define RXCR1_RXINVF (1 << 1) +#define RXCR1_RXE (1 << 0) +#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \ + RXCR1_RXMAFMA | RXCR1_RXPAFMA) + +#define KS_RXCR2 0x76 +#define RXCR2_SRDBL_MASK (0x7 << 5) +#define RXCR2_SRDBL_SHIFT (5) +#define RXCR2_SRDBL_4B (0x0 << 5) +#define RXCR2_SRDBL_8B (0x1 << 5) +#define RXCR2_SRDBL_16B (0x2 << 5) +#define RXCR2_SRDBL_32B (0x3 << 5) +/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */ +#define RXCR2_IUFFP (1 << 4) +#define RXCR2_RXIUFCEZ (1 << 3) +#define RXCR2_UDPLFE (1 << 2) +#define RXCR2_RXICMPFCC (1 << 1) +#define RXCR2_RXSAF (1 << 0) + +#define KS_TXMIR 0x78 + +#define KS_RXFHSR 0x7C +#define RXFSHR_RXFV (1 << 15) +#define RXFSHR_RXICMPFCS (1 << 13) +#define RXFSHR_RXIPFCS (1 << 12) +#define RXFSHR_RXTCPFCS (1 << 11) +#define RXFSHR_RXUDPFCS (1 << 10) +#define RXFSHR_RXBF (1 << 7) +#define RXFSHR_RXMF (1 << 6) +#define RXFSHR_RXUF (1 << 5) +#define RXFSHR_RXMR (1 << 4) +#define RXFSHR_RXFT (1 << 3) +#define RXFSHR_RXFTL (1 << 2) +#define RXFSHR_RXRF (1 << 1) +#define RXFSHR_RXCE (1 << 0) +#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\ + RXFSHR_RXFTL | RXFSHR_RXMR |\ + RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\ + RXFSHR_RXTCPFCS) +#define KS_RXFHBCR 0x7E +#define RXFHBCR_CNT_MASK 0x0FFF + +#define KS_TXQCR 0x80 +#define TXQCR_AETFE (1 << 2) +#define TXQCR_TXQMAM (1 << 1) +#define TXQCR_METFE (1 << 0) + +#define KS_RXQCR 0x82 +#define RXQCR_RXDTTS (1 << 12) +#define RXQCR_RXDBCTS (1 << 11) +#define RXQCR_RXFCTS (1 << 10) +#define RXQCR_RXIPHTOE (1 << 9) +#define RXQCR_RXDTTE (1 << 7) +#define RXQCR_RXDBCTE (1 << 6) +#define RXQCR_RXFCTE (1 << 5) +#define RXQCR_ADRFE (1 << 4) +#define RXQCR_SDA (1 << 3) +#define RXQCR_RRXEF (1 << 0) +#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE) + +#define KS_TXFDPR 0x84 +#define TXFDPR_TXFPAI (1 << 14) +#define TXFDPR_TXFP_MASK (0x7ff << 0) +#define TXFDPR_TXFP_SHIFT (0) + +#define KS_RXFDPR 0x86 +#define RXFDPR_RXFPAI (1 << 14) + +#define KS_RXDTTR 0x8C +#define KS_RXDBCTR 0x8E + +#define KS_IER 0x90 +#define KS_ISR 0x92 +#define IRQ_LCI (1 << 15) +#define IRQ_TXI (1 << 14) +#define IRQ_RXI (1 << 13) +#define IRQ_RXOI (1 << 11) +#define IRQ_TXPSI (1 << 9) +#define IRQ_RXPSI (1 << 8) +#define IRQ_TXSAI (1 << 6) +#define IRQ_RXWFDI (1 << 5) +#define IRQ_RXMPDI (1 << 4) +#define IRQ_LDI (1 << 3) +#define IRQ_EDI (1 << 2) +#define IRQ_SPIBEI (1 << 1) +#define IRQ_DEDI (1 << 0) + +#define KS_RXFCTR 0x9C +#define RXFCTR_THRESHOLD_MASK 0x00FF + +#define KS_RXFC 0x9D +#define RXFCTR_RXFC_MASK (0xff << 8) +#define RXFCTR_RXFC_SHIFT (8) +#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff) +#define RXFCTR_RXFCT_MASK (0xff << 0) +#define RXFCTR_RXFCT_SHIFT (0) + +#define KS_TXNTFSR 0x9E + +#define KS_MAHTR0 0xA0 +#define KS_MAHTR1 0xA2 +#define KS_MAHTR2 0xA4 +#define KS_MAHTR3 0xA6 + +#define KS_FCLWR 0xB0 +#define KS_FCHWR 0xB2 +#define KS_FCOWR 0xB4 + +#define KS_CIDER 0xC0 +#define CIDER_ID 0x8870 +#define CIDER_REV_MASK (0x7 << 1) +#define CIDER_REV_SHIFT (1) +#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7) + +#define KS_CGCR 0xC6 +#define KS_IACR 0xC8 +#define IACR_RDEN (1 << 12) +#define IACR_TSEL_MASK (0x3 << 10) +#define IACR_TSEL_SHIFT (10) +#define IACR_TSEL_MIB (0x3 << 10) +#define IACR_ADDR_MASK (0x1f << 0) +#define IACR_ADDR_SHIFT (0) + +#define KS_IADLR 0xD0 +#define KS_IAHDR 0xD2 + +#define KS_PMECR 0xD4 +#define PMECR_PME_DELAY (1 << 14) +#define PMECR_PME_POL (1 << 12) +#define PMECR_WOL_WAKEUP (1 << 11) +#define PMECR_WOL_MAGICPKT (1 << 10) +#define PMECR_WOL_LINKUP (1 << 9) +#define PMECR_WOL_ENERGY (1 << 8) +#define PMECR_AUTO_WAKE_EN (1 << 7) +#define PMECR_WAKEUP_NORMAL (1 << 6) +#define PMECR_WKEVT_MASK (0xf << 2) +#define PMECR_WKEVT_SHIFT (2) +#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf) +#define PMECR_WKEVT_ENERGY (0x1 << 2) +#define PMECR_WKEVT_LINK (0x2 << 2) +#define PMECR_WKEVT_MAGICPKT (0x4 << 2) +#define PMECR_WKEVT_FRAME (0x8 << 2) +#define PMECR_PM_MASK (0x3 << 0) +#define PMECR_PM_SHIFT (0) +#define PMECR_PM_NORMAL (0x0 << 0) +#define PMECR_PM_ENERGY (0x1 << 0) +#define PMECR_PM_SOFTDOWN (0x2 << 0) +#define PMECR_PM_POWERSAVE (0x3 << 0) + +/* Standard MII PHY data */ +#define KS_P1MBCR 0xE4 +#define P1MBCR_FORCE_FDX (1 << 8) + +#define KS_P1MBSR 0xE6 +#define P1MBSR_AN_COMPLETE (1 << 5) +#define P1MBSR_AN_CAPABLE (1 << 3) +#define P1MBSR_LINK_UP (1 << 2) + +#define KS_PHY1ILR 0xE8 +#define KS_PHY1IHR 0xEA +#define KS_P1ANAR 0xEC +#define KS_P1ANLPR 0xEE + +#define KS_P1SCLMD 0xF4 +#define P1SCLMD_LEDOFF (1 << 15) +#define P1SCLMD_TXIDS (1 << 14) +#define P1SCLMD_RESTARTAN (1 << 13) +#define P1SCLMD_DISAUTOMDIX (1 << 10) +#define P1SCLMD_FORCEMDIX (1 << 9) +#define P1SCLMD_AUTONEGEN (1 << 7) +#define P1SCLMD_FORCE100 (1 << 6) +#define P1SCLMD_FORCEFDX (1 << 5) +#define P1SCLMD_ADV_FLOW (1 << 4) +#define P1SCLMD_ADV_100BT_FDX (1 << 3) +#define P1SCLMD_ADV_100BT_HDX (1 << 2) +#define P1SCLMD_ADV_10BT_FDX (1 << 1) +#define P1SCLMD_ADV_10BT_HDX (1 << 0) + +#define KS_P1CR 0xF6 +#define P1CR_HP_MDIX (1 << 15) +#define P1CR_REV_POL (1 << 13) +#define P1CR_OP_100M (1 << 10) +#define P1CR_OP_FDX (1 << 9) +#define P1CR_OP_MDI (1 << 7) +#define P1CR_AN_DONE (1 << 6) +#define P1CR_LINK_GOOD (1 << 5) +#define P1CR_PNTR_FLOW (1 << 4) +#define P1CR_PNTR_100BT_FDX (1 << 3) +#define P1CR_PNTR_100BT_HDX (1 << 2) +#define P1CR_PNTR_10BT_FDX (1 << 1) +#define P1CR_PNTR_10BT_HDX (1 << 0) + +/* TX Frame control */ + +#define TXFR_TXIC (1 << 15) +#define TXFR_TXFID_MASK (0x3f << 0) +#define TXFR_TXFID_SHIFT (0) + +#define KS_P1SR 0xF8 +#define P1SR_HP_MDIX (1 << 15) +#define P1SR_REV_POL (1 << 13) +#define P1SR_OP_100M (1 << 10) +#define P1SR_OP_FDX (1 << 9) +#define P1SR_OP_MDI (1 << 7) +#define P1SR_AN_DONE (1 << 6) +#define P1SR_LINK_GOOD (1 << 5) +#define P1SR_PNTR_FLOW (1 << 4) +#define P1SR_PNTR_100BT_FDX (1 << 3) +#define P1SR_PNTR_100BT_HDX (1 << 2) +#define P1SR_PNTR_10BT_FDX (1 << 1) +#define P1SR_PNTR_10BT_HDX (1 << 0) + +#define ENUM_BUS_NONE 0 +#define ENUM_BUS_8BIT 1 +#define ENUM_BUS_16BIT 2 +#define ENUM_BUS_32BIT 3 + +#define MAX_MCAST_LST 32 +#define HW_MCAST_SIZE 8 +#define MAC_ADDR_LEN 6 + +/** + * union ks_tx_hdr - tx header data + * @txb: The header as bytes + * @txw: The header as 16bit, little-endian words + * + * A dual representation of the tx header data to allow + * access to individual bytes, and to allow 16bit accesses + * with 16bit alignment. + */ +union ks_tx_hdr { + u8 txb[4]; + __le16 txw[2]; +}; + +/** + * struct ks_net - KS8851 driver private data + * @net_device : The network device we're bound to + * @hw_addr : start address of data register. + * @hw_addr_cmd : start address of command register. + * @txh : temporaly buffer to save status/length. + * @lock : Lock to ensure that the device is not accessed when busy. + * @pdev : Pointer to platform device. + * @mii : The MII state information for the mii calls. + * @frame_head_info : frame header information for multi-pkt rx. + * @statelock : Lock on this structure for tx list. + * @msg_enable : The message flags controlling driver output (see ethtool). + * @frame_cnt : number of frames received. + * @bus_width : i/o bus width. + * @irq : irq number assigned to this device. + * @rc_rxqcr : Cached copy of KS_RXQCR. + * @rc_txcr : Cached copy of KS_TXCR. + * @rc_ier : Cached copy of KS_IER. + * @sharedbus : Multipex(addr and data bus) mode indicator. + * @cmd_reg_cache : command register cached. + * @cmd_reg_cache_int : command register cached. Used in the irq handler. + * @promiscuous : promiscuous mode indicator. + * @all_mcast : mutlicast indicator. + * @mcast_lst_size : size of multicast list. + * @mcast_lst : multicast list. + * @mcast_bits : multicast enabed. + * @mac_addr : MAC address assigned to this device. + * @fid : frame id. + * @extra_byte : number of extra byte prepended rx pkt. + * @enabled : indicator this device works. + * + * The @lock ensures that the chip is protected when certain operations are + * in progress. When the read or write packet transfer is in progress, most + * of the chip registers are not accessible until the transfer is finished and + * the DMA has been de-asserted. + * + * The @statelock is used to protect information in the structure which may + * need to be accessed via several sources, such as the network driver layer + * or one of the work queues. + * + */ + +/* Receive multiplex framer header info */ +struct type_frame_head { + u16 sts; /* Frame status */ + u16 len; /* Byte count */ +}; + +struct ks_net { + struct net_device *netdev; + void __iomem *hw_addr; + void __iomem *hw_addr_cmd; + union ks_tx_hdr txh ____cacheline_aligned; + struct mutex lock; /* spinlock to be interrupt safe */ + struct platform_device *pdev; + struct mii_if_info mii; + struct type_frame_head *frame_head_info; + spinlock_t statelock; + u32 msg_enable; + u32 frame_cnt; + int bus_width; + int irq; + + u16 rc_rxqcr; + u16 rc_txcr; + u16 rc_ier; + u16 sharedbus; + u16 cmd_reg_cache; + u16 cmd_reg_cache_int; + u16 promiscuous; + u16 all_mcast; + u16 mcast_lst_size; + u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN]; + u8 mcast_bits[HW_MCAST_SIZE]; + u8 mac_addr[6]; + u8 fid; + u8 extra_byte; + u8 enabled; +}; + +static int msg_enable; + +#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg) +#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg) +#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg) +#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg) + +#define BE3 0x8000 /* Byte Enable 3 */ +#define BE2 0x4000 /* Byte Enable 2 */ +#define BE1 0x2000 /* Byte Enable 1 */ +#define BE0 0x1000 /* Byte Enable 0 */ + +/** + * register read/write calls. + * + * All these calls issue transactions to access the chip's registers. They + * all require that the necessary lock is held to prevent accesses when the + * chip is busy transfering packet data (RX/TX FIFO accesses). + */ + +/** + * ks_rdreg8 - read 8 bit register from device + * @ks : The chip information + * @offset: The register address + * + * Read a 8bit register from the chip, returning the result + */ +static u8 ks_rdreg8(struct ks_net *ks, int offset) +{ + u16 data; + u8 shift_bit = offset & 0x03; + u8 shift_data = (offset & 1) << 3; + ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + data = ioread16(ks->hw_addr); + return (u8)(data >> shift_data); +} + +/** + * ks_rdreg16 - read 16 bit register from device + * @ks : The chip information + * @offset: The register address + * + * Read a 16bit register from the chip, returning the result + */ + +static u16 ks_rdreg16(struct ks_net *ks, int offset) +{ + ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + return ioread16(ks->hw_addr); +} + +/** + * ks_wrreg8 - write 8bit register value to chip + * @ks: The chip information + * @offset: The register address + * @value: The value to write + * + */ +static void ks_wrreg8(struct ks_net *ks, int offset, u8 value) +{ + u8 shift_bit = (offset & 0x03); + u16 value_write = (u16)(value << ((offset & 1) << 3)); + ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + iowrite16(value_write, ks->hw_addr); +} + +/** + * ks_wrreg16 - write 16bit register value to chip + * @ks: The chip information + * @offset: The register address + * @value: The value to write + * + */ + +static void ks_wrreg16(struct ks_net *ks, int offset, u16 value) +{ + ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + iowrite16(value, ks->hw_addr); +} + +/** + * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled. + * @ks: The chip state + * @wptr: buffer address to save data + * @len: length in byte to read + * + */ +static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len) +{ + len >>= 1; + while (len--) + *wptr++ = (u16)ioread16(ks->hw_addr); +} + +/** + * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled. + * @ks: The chip information + * @wptr: buffer address + * @len: length in byte to write + * + */ +static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len) +{ + len >>= 1; + while (len--) + iowrite16(*wptr++, ks->hw_addr); +} + +/** + * ks_tx_fifo_space - return the available hardware buffer size. + * @ks: The chip information + * + */ +static inline u16 ks_tx_fifo_space(struct ks_net *ks) +{ + return ks_rdreg16(ks, KS_TXMIR) & 0x1fff; +} + +/** + * ks_save_cmd_reg - save the command register from the cache. + * @ks: The chip information + * + */ +static inline void ks_save_cmd_reg(struct ks_net *ks) +{ + /*ks8851 MLL has a bug to read back the command register. + * So rely on software to save the content of command register. + */ + ks->cmd_reg_cache_int = ks->cmd_reg_cache; +} + +/** + * ks_restore_cmd_reg - restore the command register from the cache and + * write to hardware register. + * @ks: The chip information + * + */ +static inline void ks_restore_cmd_reg(struct ks_net *ks) +{ + ks->cmd_reg_cache = ks->cmd_reg_cache_int; + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); +} + +/** + * ks_set_powermode - set power mode of the device + * @ks: The chip information + * @pwrmode: The power mode value to write to KS_PMECR. + * + * Change the power mode of the chip. + */ +static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode) +{ + unsigned pmecr; + + if (netif_msg_hw(ks)) + ks_dbg(ks, "setting power mode %d\n", pwrmode); + + ks_rdreg16(ks, KS_GRR); + pmecr = ks_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_PM_MASK; + pmecr |= pwrmode; + + ks_wrreg16(ks, KS_PMECR, pmecr); +} + +/** + * ks_read_config - read chip configuration of bus width. + * @ks: The chip information + * + */ +static void ks_read_config(struct ks_net *ks) +{ + u16 reg_data = 0; + + /* Regardless of bus width, 8 bit read should always work.*/ + reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF; + reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8; + + /* addr/data bus are multiplexed */ + ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED; + + /* There are garbage data when reading data from QMU, + depending on bus-width. + */ + + if (reg_data & CCR_8BIT) { + ks->bus_width = ENUM_BUS_8BIT; + ks->extra_byte = 1; + } else if (reg_data & CCR_16BIT) { + ks->bus_width = ENUM_BUS_16BIT; + ks->extra_byte = 2; + } else { + ks->bus_width = ENUM_BUS_32BIT; + ks->extra_byte = 4; + } +} + +/** + * ks_soft_reset - issue one of the soft reset to the device + * @ks: The device state. + * @op: The bit(s) to set in the GRR + * + * Issue the relevant soft-reset command to the device's GRR register + * specified by @op. + * + * Note, the delays are in there as a caution to ensure that the reset + * has time to take effect and then complete. Since the datasheet does + * not currently specify the exact sequence, we have chosen something + * that seems to work with our device. + */ +static void ks_soft_reset(struct ks_net *ks, unsigned op) +{ + /* Disable interrupt first */ + ks_wrreg16(ks, KS_IER, 0x0000); + ks_wrreg16(ks, KS_GRR, op); + mdelay(10); /* wait a short time to effect reset */ + ks_wrreg16(ks, KS_GRR, 0); + mdelay(1); /* wait for condition to clear */ +} + + +/** + * ks_read_qmu - read 1 pkt data from the QMU. + * @ks: The chip information + * @buf: buffer address to save 1 pkt + * @len: Pkt length + * Here is the sequence to read 1 pkt: + * 1. set sudo DMA mode + * 2. read prepend data + * 3. read pkt data + * 4. reset sudo DMA Mode + */ +static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len) +{ + u32 r = ks->extra_byte & 0x1 ; + u32 w = ks->extra_byte - r; + + /* 1. set sudo DMA mode */ + ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); + ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); + + /* 2. read prepend data */ + /** + * read 4 + extra bytes and discard them. + * extra bytes for dummy, 2 for status, 2 for len + */ + + /* use likely(r) for 8 bit access for performance */ + if (unlikely(r)) + ioread8(ks->hw_addr); + ks_inblk(ks, buf, w + 2 + 2); + + /* 3. read pkt data */ + ks_inblk(ks, buf, ALIGN(len, 4)); + + /* 4. reset sudo DMA Mode */ + ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); +} + +/** + * ks_rcv - read multiple pkts data from the QMU. + * @ks: The chip information + * @netdev: The network device being opened. + * + * Read all of header information before reading pkt content. + * It is not allowed only port of pkts in QMU after issuing + * interrupt ack. + */ +static void ks_rcv(struct ks_net *ks, struct net_device *netdev) +{ + u32 i; + struct type_frame_head *frame_hdr = ks->frame_head_info; + struct sk_buff *skb; + + ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8; + + /* read all header information */ + for (i = 0; i < ks->frame_cnt; i++) { + /* Checking Received packet status */ + frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR); + /* Get packet len from hardware */ + frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR); + frame_hdr++; + } + + frame_hdr = ks->frame_head_info; + while (ks->frame_cnt--) { + skb = dev_alloc_skb(frame_hdr->len + 16); + if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) && + (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) { + skb_reserve(skb, 2); + /* read data block including CRC 4 bytes */ + ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4); + skb_put(skb, frame_hdr->len); + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); + } else { + printk(KERN_ERR "%s: err:skb alloc\n", __func__); + ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); + if (skb) + dev_kfree_skb_irq(skb); + } + frame_hdr++; + } +} + +/** + * ks_update_link_status - link status update. + * @netdev: The network device being opened. + * @ks: The chip information + * + */ + +static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks) +{ + /* check the status of the link */ + u32 link_up_status; + if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) { + netif_carrier_on(netdev); + link_up_status = true; + } else { + netif_carrier_off(netdev); + link_up_status = false; + } + if (netif_msg_link(ks)) + ks_dbg(ks, "%s: %s\n", + __func__, link_up_status ? "UP" : "DOWN"); +} + +/** + * ks_irq - device interrupt handler + * @irq: Interrupt number passed from the IRQ hnalder. + * @pw: The private word passed to register_irq(), our struct ks_net. + * + * This is the handler invoked to find out what happened + * + * Read the interrupt status, work out what needs to be done and then clear + * any of the interrupts that are not needed. + */ + +static irqreturn_t ks_irq(int irq, void *pw) +{ + struct ks_net *ks = pw; + struct net_device *netdev = ks->netdev; + u16 status; + + /*this should be the first in IRQ handler */ + ks_save_cmd_reg(ks); + + status = ks_rdreg16(ks, KS_ISR); + if (unlikely(!status)) { + ks_restore_cmd_reg(ks); + return IRQ_NONE; + } + + ks_wrreg16(ks, KS_ISR, status); + + if (likely(status & IRQ_RXI)) + ks_rcv(ks, netdev); + + if (unlikely(status & IRQ_LCI)) + ks_update_link_status(netdev, ks); + + if (unlikely(status & IRQ_TXI)) + netif_wake_queue(netdev); + + if (unlikely(status & IRQ_LDI)) { + + u16 pmecr = ks_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_WKEVT_MASK; + ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); + } + + /* this should be the last in IRQ handler*/ + ks_restore_cmd_reg(ks); + return IRQ_HANDLED; +} + + +/** + * ks_net_open - open network device + * @netdev: The network device being opened. + * + * Called when the network device is marked active, such as a user executing + * 'ifconfig up' on the device. + */ +static int ks_net_open(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + int err; + +#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW) + /* lock the card, even if we may not actually do anything + * else at the moment. + */ + + if (netif_msg_ifup(ks)) + ks_dbg(ks, "%s - entry\n", __func__); + + /* reset the HW */ + err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks); + + if (err) { + printk(KERN_ERR "Failed to request IRQ: %d: %d\n", + ks->irq, err); + return err; + } + + if (netif_msg_ifup(ks)) + ks_dbg(ks, "network device %s up\n", netdev->name); + + return 0; +} + +/** + * ks_net_stop - close network device + * @netdev: The device being closed. + * + * Called to close down a network device which has been active. Cancell any + * work, shutdown the RX and TX process and then place the chip into a low + * power state whilst it is not being used. + */ +static int ks_net_stop(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + + if (netif_msg_ifdown(ks)) + ks_info(ks, "%s: shutting down\n", netdev->name); + + netif_stop_queue(netdev); + + kfree(ks->frame_head_info); + + mutex_lock(&ks->lock); + + /* turn off the IRQs and ack any outstanding */ + ks_wrreg16(ks, KS_IER, 0x0000); + ks_wrreg16(ks, KS_ISR, 0xffff); + + /* shutdown RX process */ + ks_wrreg16(ks, KS_RXCR1, 0x0000); + + /* shutdown TX process */ + ks_wrreg16(ks, KS_TXCR, 0x0000); + + /* set powermode to soft power down to save power */ + ks_set_powermode(ks, PMECR_PM_SOFTDOWN); + free_irq(ks->irq, netdev); + mutex_unlock(&ks->lock); + return 0; +} + + +/** + * ks_write_qmu - write 1 pkt data to the QMU. + * @ks: The chip information + * @pdata: buffer address to save 1 pkt + * @len: Pkt length in byte + * Here is the sequence to write 1 pkt: + * 1. set sudo DMA mode + * 2. write status/length + * 3. write pkt data + * 4. reset sudo DMA Mode + * 5. reset sudo DMA mode + * 6. Wait until pkt is out + */ +static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) +{ + unsigned fid = ks->fid; + + fid = ks->fid; + ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK; + + /* reduce the tx interrupt occurrances. */ + if (!fid) + fid |= TXFR_TXIC; /* irq on completion */ + + /* start header at txb[0] to align txw entries */ + ks->txh.txw[0] = cpu_to_le16(fid); + ks->txh.txw[1] = cpu_to_le16(len); + + /* 1. set sudo-DMA mode */ + ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); + /* 2. write status/lenth info */ + ks_outblk(ks, ks->txh.txw, 4); + /* 3. write pkt data */ + ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4)); + /* 4. reset sudo-DMA mode */ + ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); + /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */ + ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE); + /* 6. wait until TXQCR_METFE is auto-cleared */ + while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE) + ; +} + +static void ks_disable_int(struct ks_net *ks) +{ + ks_wrreg16(ks, KS_IER, 0x0000); +} /* ks_disable_int */ + +static void ks_enable_int(struct ks_net *ks) +{ + ks_wrreg16(ks, KS_IER, ks->rc_ier); +} /* ks_enable_int */ + +/** + * ks_start_xmit - transmit packet + * @skb : The buffer to transmit + * @netdev : The device used to transmit the packet. + * + * Called by the network layer to transmit the @skb. + * spin_lock_irqsave is required because tx and rx should be mutual exclusive. + * So while tx is in-progress, prevent IRQ interrupt from happenning. + */ +static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + int retv = NETDEV_TX_OK; + struct ks_net *ks = netdev_priv(netdev); + + disable_irq(netdev->irq); + ks_disable_int(ks); + spin_lock(&ks->statelock); + + /* Extra space are required: + * 4 byte for alignment, 4 for status/length, 4 for CRC + */ + + if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) { + ks_write_qmu(ks, skb->data, skb->len); + dev_kfree_skb(skb); + } else + retv = NETDEV_TX_BUSY; + spin_unlock(&ks->statelock); + ks_enable_int(ks); + enable_irq(netdev->irq); + return retv; +} + +/** + * ks_start_rx - ready to serve pkts + * @ks : The chip information + * + */ +static void ks_start_rx(struct ks_net *ks) +{ + u16 cntl; + + /* Enables QMU Receive (RXCR1). */ + cntl = ks_rdreg16(ks, KS_RXCR1); + cntl |= RXCR1_RXE ; + ks_wrreg16(ks, KS_RXCR1, cntl); +} /* ks_start_rx */ + +/** + * ks_stop_rx - stop to serve pkts + * @ks : The chip information + * + */ +static void ks_stop_rx(struct ks_net *ks) +{ + u16 cntl; + + /* Disables QMU Receive (RXCR1). */ + cntl = ks_rdreg16(ks, KS_RXCR1); + cntl &= ~RXCR1_RXE ; + ks_wrreg16(ks, KS_RXCR1, cntl); + +} /* ks_stop_rx */ + +static unsigned long const ethernet_polynomial = 0x04c11db7U; + +static unsigned long ether_gen_crc(int length, u8 *data) +{ + long crc = -1; + while (--length >= 0) { + u8 current_octet = *data++; + int bit; + + for (bit = 0; bit < 8; bit++, current_octet >>= 1) { + crc = (crc << 1) ^ + ((crc < 0) ^ (current_octet & 1) ? + ethernet_polynomial : 0); + } + } + return (unsigned long)crc; +} /* ether_gen_crc */ + +/** +* ks_set_grpaddr - set multicast information +* @ks : The chip information +*/ + +static void ks_set_grpaddr(struct ks_net *ks) +{ + u8 i; + u32 index, position, value; + + memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE); + + for (i = 0; i < ks->mcast_lst_size; i++) { + position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f; + index = position >> 3; + value = 1 << (position & 7); + ks->mcast_bits[index] |= (u8)value; + } + + for (i = 0; i < HW_MCAST_SIZE; i++) { + if (i & 1) { + ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1), + (ks->mcast_bits[i] << 8) | + ks->mcast_bits[i - 1]); + } + } +} /* ks_set_grpaddr */ + +/* +* ks_clear_mcast - clear multicast information +* +* @ks : The chip information +* This routine removes all mcast addresses set in the hardware. +*/ + +static void ks_clear_mcast(struct ks_net *ks) +{ + u16 i, mcast_size; + for (i = 0; i < HW_MCAST_SIZE; i++) + ks->mcast_bits[i] = 0; + + mcast_size = HW_MCAST_SIZE >> 2; + for (i = 0; i < mcast_size; i++) + ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0); +} + +static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode) +{ + u16 cntl; + ks->promiscuous = promiscuous_mode; + ks_stop_rx(ks); /* Stop receiving for reconfiguration */ + cntl = ks_rdreg16(ks, KS_RXCR1); + + cntl &= ~RXCR1_FILTER_MASK; + if (promiscuous_mode) + /* Enable Promiscuous mode */ + cntl |= RXCR1_RXAE | RXCR1_RXINVF; + else + /* Disable Promiscuous mode (default normal mode) */ + cntl |= RXCR1_RXPAFMA; + + ks_wrreg16(ks, KS_RXCR1, cntl); + + if (ks->enabled) + ks_start_rx(ks); + +} /* ks_set_promis */ + +static void ks_set_mcast(struct ks_net *ks, u16 mcast) +{ + u16 cntl; + + ks->all_mcast = mcast; + ks_stop_rx(ks); /* Stop receiving for reconfiguration */ + cntl = ks_rdreg16(ks, KS_RXCR1); + cntl &= ~RXCR1_FILTER_MASK; + if (mcast) + /* Enable "Perfect with Multicast address passed mode" */ + cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA); + else + /** + * Disable "Perfect with Multicast address passed + * mode" (normal mode). + */ + cntl |= RXCR1_RXPAFMA; + + ks_wrreg16(ks, KS_RXCR1, cntl); + + if (ks->enabled) + ks_start_rx(ks); +} /* ks_set_mcast */ + +static void ks_set_rx_mode(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + struct dev_mc_list *ptr; + + /* Turn on/off promiscuous mode. */ + if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC) + ks_set_promis(ks, + (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC)); + /* Turn on/off all mcast mode. */ + else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI) + ks_set_mcast(ks, + (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)); + else + ks_set_promis(ks, false); + + if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) { + if (netdev->mc_count <= MAX_MCAST_LST) { + int i = 0; + for (ptr = netdev->mc_list; ptr; ptr = ptr->next) { + if (!(*ptr->dmi_addr & 1)) + continue; + if (i >= MAX_MCAST_LST) + break; + memcpy(ks->mcast_lst[i++], ptr->dmi_addr, + MAC_ADDR_LEN); + } + ks->mcast_lst_size = (u8)i; + ks_set_grpaddr(ks); + } else { + /** + * List too big to support so + * turn on all mcast mode. + */ + ks->mcast_lst_size = MAX_MCAST_LST; + ks_set_mcast(ks, true); + } + } else { + ks->mcast_lst_size = 0; + ks_clear_mcast(ks); + } +} /* ks_set_rx_mode */ + +static void ks_set_mac(struct ks_net *ks, u8 *data) +{ + u16 *pw = (u16 *)data; + u16 w, u; + + ks_stop_rx(ks); /* Stop receiving for reconfiguration */ + + u = *pw++; + w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); + ks_wrreg16(ks, KS_MARH, w); + + u = *pw++; + w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); + ks_wrreg16(ks, KS_MARM, w); + + u = *pw; + w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); + ks_wrreg16(ks, KS_MARL, w); + + memcpy(ks->mac_addr, data, 6); + + if (ks->enabled) + ks_start_rx(ks); +} + +static int ks_set_mac_address(struct net_device *netdev, void *paddr) +{ + struct ks_net *ks = netdev_priv(netdev); + struct sockaddr *addr = paddr; + u8 *da; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + da = (u8 *)netdev->dev_addr; + + ks_set_mac(ks, da); + return 0; +} + +static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct ks_net *ks = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL); +} + +static const struct net_device_ops ks_netdev_ops = { + .ndo_open = ks_net_open, + .ndo_stop = ks_net_stop, + .ndo_do_ioctl = ks_net_ioctl, + .ndo_start_xmit = ks_start_xmit, + .ndo_set_mac_address = ks_set_mac_address, + .ndo_set_rx_mode = ks_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +/* ethtool support */ + +static void ks_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *di) +{ + strlcpy(di->driver, DRV_NAME, sizeof(di->driver)); + strlcpy(di->version, "1.00", sizeof(di->version)); + strlcpy(di->bus_info, dev_name(netdev->dev.parent), + sizeof(di->bus_info)); +} + +static u32 ks_get_msglevel(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + return ks->msg_enable; +} + +static void ks_set_msglevel(struct net_device *netdev, u32 to) +{ + struct ks_net *ks = netdev_priv(netdev); + ks->msg_enable = to; +} + +static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct ks_net *ks = netdev_priv(netdev); + return mii_ethtool_gset(&ks->mii, cmd); +} + +static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct ks_net *ks = netdev_priv(netdev); + return mii_ethtool_sset(&ks->mii, cmd); +} + +static u32 ks_get_link(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + return mii_link_ok(&ks->mii); +} + +static int ks_nway_reset(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + return mii_nway_restart(&ks->mii); +} + +static const struct ethtool_ops ks_ethtool_ops = { + .get_drvinfo = ks_get_drvinfo, + .get_msglevel = ks_get_msglevel, + .set_msglevel = ks_set_msglevel, + .get_settings = ks_get_settings, + .set_settings = ks_set_settings, + .get_link = ks_get_link, + .nway_reset = ks_nway_reset, +}; + +/* MII interface controls */ + +/** + * ks_phy_reg - convert MII register into a KS8851 register + * @reg: MII register number. + * + * Return the KS8851 register number for the corresponding MII PHY register + * if possible. Return zero if the MII register has no direct mapping to the + * KS8851 register set. + */ +static int ks_phy_reg(int reg) +{ + switch (reg) { + case MII_BMCR: + return KS_P1MBCR; + case MII_BMSR: + return KS_P1MBSR; + case MII_PHYSID1: + return KS_PHY1ILR; + case MII_PHYSID2: + return KS_PHY1IHR; + case MII_ADVERTISE: + return KS_P1ANAR; + case MII_LPA: + return KS_P1ANLPR; + } + + return 0x0; +} + +/** + * ks_phy_read - MII interface PHY register read. + * @netdev: The network device the PHY is on. + * @phy_addr: Address of PHY (ignored as we only have one) + * @reg: The register to read. + * + * This call reads data from the PHY register specified in @reg. Since the + * device does not support all the MII registers, the non-existant values + * are always returned as zero. + * + * We return zero for unsupported registers as the MII code does not check + * the value returned for any error status, and simply returns it to the + * caller. The mii-tool that the driver was tested with takes any -ve error + * as real PHY capabilities, thus displaying incorrect data to the user. + */ +static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg) +{ + struct ks_net *ks = netdev_priv(netdev); + int ksreg; + int result; + + ksreg = ks_phy_reg(reg); + if (!ksreg) + return 0x0; /* no error return allowed, so use zero */ + + mutex_lock(&ks->lock); + result = ks_rdreg16(ks, ksreg); + mutex_unlock(&ks->lock); + + return result; +} + +static void ks_phy_write(struct net_device *netdev, + int phy, int reg, int value) +{ + struct ks_net *ks = netdev_priv(netdev); + int ksreg; + + ksreg = ks_phy_reg(reg); + if (ksreg) { + mutex_lock(&ks->lock); + ks_wrreg16(ks, ksreg, value); + mutex_unlock(&ks->lock); + } +} + +/** + * ks_read_selftest - read the selftest memory info. + * @ks: The device state + * + * Read and check the TX/RX memory selftest information. + */ +static int ks_read_selftest(struct ks_net *ks) +{ + unsigned both_done = MBIR_TXMBF | MBIR_RXMBF; + int ret = 0; + unsigned rd; + + rd = ks_rdreg16(ks, KS_MBIR); + + if ((rd & both_done) != both_done) { + ks_warn(ks, "Memory selftest not finished\n"); + return 0; + } + + if (rd & MBIR_TXMBFA) { + ks_err(ks, "TX memory selftest fails\n"); + ret |= 1; + } + + if (rd & MBIR_RXMBFA) { + ks_err(ks, "RX memory selftest fails\n"); + ret |= 2; + } + + ks_info(ks, "the selftest passes\n"); + return ret; +} + +static void ks_disable(struct ks_net *ks) +{ + u16 w; + + w = ks_rdreg16(ks, KS_TXCR); + + /* Disables QMU Transmit (TXCR). */ + w &= ~TXCR_TXE; + ks_wrreg16(ks, KS_TXCR, w); + + /* Disables QMU Receive (RXCR1). */ + w = ks_rdreg16(ks, KS_RXCR1); + w &= ~RXCR1_RXE ; + ks_wrreg16(ks, KS_RXCR1, w); + + ks->enabled = false; + +} /* ks_disable */ + +static void ks_setup(struct ks_net *ks) +{ + u16 w; + + /** + * Configure QMU Transmit + */ + + /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */ + ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI); + + /* Setup Receive Frame Data Pointer Auto-Increment */ + ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); + + /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */ + ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK); + + /* Setup RxQ Command Control (RXQCR) */ + ks->rc_rxqcr = RXQCR_CMD_CNTL; + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + + /** + * set the force mode to half duplex, default is full duplex + * because if the auto-negotiation fails, most switch uses + * half-duplex. + */ + + w = ks_rdreg16(ks, KS_P1MBCR); + w &= ~P1MBCR_FORCE_FDX; + ks_wrreg16(ks, KS_P1MBCR, w); + + w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; + ks_wrreg16(ks, KS_TXCR, w); + + w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE; + + if (ks->promiscuous) /* bPromiscuous */ + w |= (RXCR1_RXAE | RXCR1_RXINVF); + else if (ks->all_mcast) /* Multicast address passed mode */ + w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA); + else /* Normal mode */ + w |= RXCR1_RXPAFMA; + + ks_wrreg16(ks, KS_RXCR1, w); +} /*ks_setup */ + + +static void ks_setup_int(struct ks_net *ks) +{ + ks->rc_ier = 0x00; + /* Clear the interrupts status of the hardware. */ + ks_wrreg16(ks, KS_ISR, 0xffff); + + /* Enables the interrupts of the hardware. */ + ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI); +} /* ks_setup_int */ + +void ks_enable(struct ks_net *ks) +{ + u16 w; + + w = ks_rdreg16(ks, KS_TXCR); + /* Enables QMU Transmit (TXCR). */ + ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE); + + /* + * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame + * Enable + */ + + w = ks_rdreg16(ks, KS_RXQCR); + ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE); + + /* Enables QMU Receive (RXCR1). */ + w = ks_rdreg16(ks, KS_RXCR1); + ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE); + ks->enabled = true; +} /* ks_enable */ + +static int ks_hw_init(struct ks_net *ks) +{ +#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES) + ks->promiscuous = 0; + ks->all_mcast = 0; + ks->mcast_lst_size = 0; + + ks->frame_head_info = (struct type_frame_head *) \ + kmalloc(MHEADER_SIZE, GFP_KERNEL); + if (!ks->frame_head_info) { + printk(KERN_ERR "Error: Fail to allocate frame memory\n"); + return false; + } + + ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS); + return true; +} + + +static int __devinit ks8851_probe(struct platform_device *pdev) +{ + int err = -ENOMEM; + struct resource *io_d, *io_c; + struct net_device *netdev; + struct ks_net *ks; + u16 id, data; + + io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0); + io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1); + + if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME)) + goto err_mem_region; + + if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME)) + goto err_mem_region1; + + netdev = alloc_etherdev(sizeof(struct ks_net)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + ks = netdev_priv(netdev); + ks->netdev = netdev; + ks->hw_addr = ioremap(io_d->start, resource_size(io_d)); + + if (!ks->hw_addr) + goto err_ioremap; + + ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c)); + if (!ks->hw_addr_cmd) + goto err_ioremap1; + + ks->irq = platform_get_irq(pdev, 0); + + if (ks->irq < 0) { + err = ks->irq; + goto err_get_irq; + } + + ks->pdev = pdev; + + mutex_init(&ks->lock); + spin_lock_init(&ks->statelock); + + netdev->netdev_ops = &ks_netdev_ops; + netdev->ethtool_ops = &ks_ethtool_ops; + + /* setup mii state */ + ks->mii.dev = netdev; + ks->mii.phy_id = 1, + ks->mii.phy_id_mask = 1; + ks->mii.reg_num_mask = 0xf; + ks->mii.mdio_read = ks_phy_read; + ks->mii.mdio_write = ks_phy_write; + + ks_info(ks, "message enable is %d\n", msg_enable); + /* set the default message enable */ + ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | + NETIF_MSG_PROBE | + NETIF_MSG_LINK)); + ks_read_config(ks); + + /* simple check for a valid chip being connected to the bus */ + if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { + ks_err(ks, "failed to read device ID\n"); + err = -ENODEV; + goto err_register; + } + + if (ks_read_selftest(ks)) { + ks_err(ks, "failed to read device ID\n"); + err = -ENODEV; + goto err_register; + } + + err = register_netdev(netdev); + if (err) + goto err_register; + + platform_set_drvdata(pdev, netdev); + + ks_soft_reset(ks, GRR_GSR); + ks_hw_init(ks); + ks_disable(ks); + ks_setup(ks); + ks_setup_int(ks); + ks_enable_int(ks); + ks_enable(ks); + memcpy(netdev->dev_addr, ks->mac_addr, 6); + + data = ks_rdreg16(ks, KS_OBCR); + ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); + + /** + * If you want to use the default MAC addr, + * comment out the 2 functions below. + */ + + random_ether_addr(netdev->dev_addr); + ks_set_mac(ks, netdev->dev_addr); + + id = ks_rdreg16(ks, KS_CIDER); + + printk(KERN_INFO DRV_NAME + " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", + (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); + return 0; + +err_register: +err_get_irq: + iounmap(ks->hw_addr_cmd); +err_ioremap1: + iounmap(ks->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + release_mem_region(io_c->start, resource_size(io_c)); +err_mem_region1: + release_mem_region(io_d->start, resource_size(io_d)); +err_mem_region: + return err; +} + +static int __devexit ks8851_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + struct ks_net *ks = netdev_priv(netdev); + struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + unregister_netdev(netdev); + iounmap(ks->hw_addr); + free_netdev(netdev); + release_mem_region(iomem->start, resource_size(iomem)); + platform_set_drvdata(pdev, NULL); + return 0; + +} + +static struct platform_driver ks8851_platform_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = ks8851_probe, + .remove = __devexit_p(ks8851_remove), +}; + +static int __init ks8851_init(void) +{ + return platform_driver_register(&ks8851_platform_driver); +} + +static void __exit ks8851_exit(void) +{ + platform_driver_unregister(&ks8851_platform_driver); +} + +module_init(ks8851_init); +module_exit(ks8851_exit); + +MODULE_DESCRIPTION("KS8851 MLL Network driver"); +MODULE_AUTHOR("David Choi <david.choi@micrel.com>"); +MODULE_LICENSE("GPL"); +module_param_named(message, msg_enable, int, 0); +MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); + diff --git a/drivers/net/meth.c b/drivers/net/meth.c index 92ceb689b4d..2af81735386 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c @@ -828,7 +828,7 @@ static int __exit meth_remove(struct platform_device *pdev) static struct platform_driver meth_driver = { .probe = meth_probe, - .remove = __devexit_p(meth_remove), + .remove = __exit_p(meth_remove), .driver = { .name = "meth", .owner = THIS_MODULE, diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index b5aa974827e..9b9eab10770 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -1714,7 +1714,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* 4 fragments per cmd des */ no_of_desc = (frag_count + 3) >> 2; - if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) { + if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) { netif_stop_queue(netdev); return NETDEV_TX_BUSY; } diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c index 064a4fe1dd9..28a86224879 100644 --- a/drivers/net/pasemi_mac_ethtool.c +++ b/drivers/net/pasemi_mac_ethtool.c @@ -71,6 +71,9 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev, struct pasemi_mac *mac = netdev_priv(netdev); struct phy_device *phydev = mac->phydev; + if (!phydev) + return -EOPNOTSUPP; + return phy_ethtool_gset(phydev, cmd); } diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 474876c879c..bd3447f0490 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1754,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), - PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", 0xb4be14e3, 0x43ac239b, 0x0877b627), diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index cc394d07375..5910df60c93 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -2179,7 +2179,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, * session or the special tunnel type. */ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, int optlen) + char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct pppol2tp_session *session = sk->sk_user_data; diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index a9845a2f243..3ec6e85587a 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -9,6 +9,7 @@ #include <linux/pci.h> #include <linux/netdevice.h> +#include <linux/rtnetlink.h> /* * General definitions... @@ -135,9 +136,9 @@ enum { RST_FO_TFO = (1 << 0), RST_FO_RR_MASK = 0x00060000, RST_FO_RR_CQ_CAM = 0x00000000, - RST_FO_RR_DROP = 0x00000001, - RST_FO_RR_DQ = 0x00000002, - RST_FO_RR_RCV_FUNC_CQ = 0x00000003, + RST_FO_RR_DROP = 0x00000002, + RST_FO_RR_DQ = 0x00000004, + RST_FO_RR_RCV_FUNC_CQ = 0x00000006, RST_FO_FRB = (1 << 12), RST_FO_MOP = (1 << 13), RST_FO_REG = (1 << 14), @@ -1381,15 +1382,15 @@ struct intr_context { /* adapter flags definitions. */ enum { - QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */ - QL_LEGACY_ENABLED = (1 << 3), - QL_MSI_ENABLED = (1 << 3), - QL_MSIX_ENABLED = (1 << 4), - QL_DMA64 = (1 << 5), - QL_PROMISCUOUS = (1 << 6), - QL_ALLMULTI = (1 << 7), - QL_PORT_CFG = (1 << 8), - QL_CAM_RT_SET = (1 << 9), + QL_ADAPTER_UP = 0, /* Adapter has been brought up. */ + QL_LEGACY_ENABLED = 1, + QL_MSI_ENABLED = 2, + QL_MSIX_ENABLED = 3, + QL_DMA64 = 4, + QL_PROMISCUOUS = 5, + QL_ALLMULTI = 6, + QL_PORT_CFG = 7, + QL_CAM_RT_SET = 8, }; /* link_status bit definitions */ @@ -1477,7 +1478,6 @@ struct ql_adapter { u32 mailbox_in; u32 mailbox_out; struct mbox_params idc_mbc; - struct mutex mpi_mutex; int tx_ring_size; int rx_ring_size; diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c index 68f9bd280f8..52073946bce 100644 --- a/drivers/net/qlge/qlge_ethtool.c +++ b/drivers/net/qlge/qlge_ethtool.c @@ -45,7 +45,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) if (!netif_running(qdev->ndev)) return status; - spin_lock(&qdev->hw_lock); /* Skip the default queue, and update the outbound handler * queues if they changed. */ @@ -92,7 +91,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) } } exit: - spin_unlock(&qdev->hw_lock); return status; } diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 7783c5db81d..61680715cde 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -34,7 +34,6 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> -#include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <linux/delay.h> #include <linux/mm.h> @@ -1926,12 +1925,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return; - spin_lock(&qdev->hw_lock); if (ql_set_mac_addr_reg (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); } - spin_unlock(&qdev->hw_lock); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); } @@ -1945,12 +1942,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) if (status) return; - spin_lock(&qdev->hw_lock); if (ql_set_mac_addr_reg (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); } - spin_unlock(&qdev->hw_lock); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); } @@ -2001,15 +1996,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) /* * Check MPI processor activity. */ - if (var & STS_PI) { + if ((var & STS_PI) && + (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { /* * We've got an async event or mailbox completion. * Handle it and clear the source of the interrupt. */ QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); ql_disable_completion_interrupt(qdev, intr_context->intr); - queue_delayed_work_on(smp_processor_id(), qdev->workqueue, - &qdev->mpi_work, 0); + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work_on(smp_processor_id(), + qdev->workqueue, &qdev->mpi_work, 0); work_done++; } @@ -3142,14 +3139,14 @@ static int ql_route_initialize(struct ql_adapter *qdev) { int status = 0; - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + /* Clear all the entries in the routing table. */ + status = ql_clear_routing_entries(qdev); if (status) return status; - /* Clear all the entries in the routing table. */ - status = ql_clear_routing_entries(qdev); + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) - goto exit; + return status; status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); if (status) { @@ -3380,12 +3377,10 @@ static int ql_adapter_down(struct ql_adapter *qdev) ql_free_rx_buffers(qdev); - spin_lock(&qdev->hw_lock); status = ql_adapter_reset(qdev); if (status) QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", qdev->func); - spin_unlock(&qdev->hw_lock); return status; } @@ -3587,7 +3582,6 @@ static void qlge_set_multicast_list(struct net_device *ndev) status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return; - spin_lock(&qdev->hw_lock); /* * Set or clear promiscuous mode if a * transition is taking place. @@ -3664,7 +3658,6 @@ static void qlge_set_multicast_list(struct net_device *ndev) } } exit: - spin_unlock(&qdev->hw_lock); ql_sem_unlock(qdev, SEM_RT_IDX_MASK); } @@ -3684,10 +3677,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; - spin_lock(&qdev->hw_lock); status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); - spin_unlock(&qdev->hw_lock); if (status) QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); @@ -3705,7 +3696,7 @@ static void ql_asic_reset_work(struct work_struct *work) struct ql_adapter *qdev = container_of(work, struct ql_adapter, asic_reset_work.work); int status; - + rtnl_lock(); status = ql_adapter_down(qdev); if (status) goto error; @@ -3713,12 +3704,12 @@ static void ql_asic_reset_work(struct work_struct *work) status = ql_adapter_up(qdev); if (status) goto error; - + rtnl_unlock(); return; error: QPRINTK(qdev, IFUP, ALERT, "Driver up/down cycle failed, closing device\n"); - rtnl_lock(); + set_bit(QL_ADAPTER_UP, &qdev->flags); dev_close(qdev->ndev); rtnl_unlock(); @@ -3834,11 +3825,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev, return err; } + qdev->ndev = ndev; + qdev->pdev = pdev; + pci_set_drvdata(pdev, ndev); pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (pos <= 0) { dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " "aborting.\n"); - goto err_out; + return pos; } else { pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; @@ -3851,7 +3845,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "PCI region request failed.\n"); - goto err_out; + return err; } pci_set_master(pdev); @@ -3869,7 +3863,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev, goto err_out; } - pci_set_drvdata(pdev, ndev); qdev->reg_base = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); @@ -3889,8 +3882,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev, goto err_out; } - qdev->ndev = ndev; - qdev->pdev = pdev; err = ql_get_board_info(qdev); if (err) { dev_err(&pdev->dev, "Register access failed.\n"); @@ -3930,7 +3921,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev, INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); - mutex_init(&qdev->mpi_mutex); init_completion(&qdev->ide_completion); if (!cards_found) { diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index 6685bd97da9..c2e43073047 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -472,7 +472,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status, count; - mutex_lock(&qdev->mpi_mutex); /* Begin polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); @@ -541,7 +540,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) status = -EIO; } end: - mutex_unlock(&qdev->mpi_mutex); /* End polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); return status; @@ -776,7 +774,9 @@ static int ql_idc_wait(struct ql_adapter *qdev) static int ql_set_port_cfg(struct ql_adapter *qdev) { int status; + rtnl_lock(); status = ql_mb_set_port_cfg(qdev); + rtnl_unlock(); if (status) return status; status = ql_idc_wait(qdev); @@ -797,7 +797,9 @@ void ql_mpi_port_cfg_work(struct work_struct *work) container_of(work, struct ql_adapter, mpi_port_cfg_work.work); int status; + rtnl_lock(); status = ql_mb_get_port_cfg(qdev); + rtnl_unlock(); if (status) { QPRINTK(qdev, DRV, ERR, "Bug: Failed to get port config data.\n"); @@ -855,7 +857,9 @@ void ql_mpi_idc_work(struct work_struct *work) * needs to be set. * */ set_bit(QL_CAM_RT_SET, &qdev->flags); + rtnl_lock(); status = ql_mb_idc_ack(qdev); + rtnl_unlock(); if (status) { QPRINTK(qdev, DRV, ERR, "Bug: No pending IDC!\n"); @@ -871,7 +875,7 @@ void ql_mpi_work(struct work_struct *work) struct mbox_params *mbcp = &mbc; int err = 0; - mutex_lock(&qdev->mpi_mutex); + rtnl_lock(); while (ql_read32(qdev, STS) & STS_PI) { memset(mbcp, 0, sizeof(struct mbox_params)); @@ -884,7 +888,7 @@ void ql_mpi_work(struct work_struct *work) break; } - mutex_unlock(&qdev->mpi_mutex); + rtnl_unlock(); ql_enable_completion_interrupt(qdev, 0); } diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index ecf3279fbef..f4dfd1f679a 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -826,7 +826,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev) static struct platform_driver sgiseeq_driver = { .probe = sgiseeq_probe, - .remove = __devexit_p(sgiseeq_remove), + .remove = __exit_p(sgiseeq_remove), .driver = { .name = "sgiseeq", .owner = THIS_MODULE, diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 55bad408196..01f6811f132 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -3935,11 +3935,14 @@ static int __devinit skge_probe(struct pci_dev *pdev, #endif err = -ENOMEM; - hw = kzalloc(sizeof(*hw), GFP_KERNEL); + /* space for skge@pci:0000:04:00.0 */ + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" ) + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); if (!hw) { dev_err(&pdev->dev, "cannot allocate hardware struct\n"); goto err_out_free_regions; } + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); hw->pdev = pdev; spin_lock_init(&hw->hw_lock); @@ -3974,7 +3977,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, goto err_out_free_netdev; } - err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); + err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw); if (err) { dev_err(&pdev->dev, "%s: cannot assign irq %d\n", dev->name, pdev->irq); @@ -3982,14 +3985,17 @@ static int __devinit skge_probe(struct pci_dev *pdev, } skge_show_addr(dev); - if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { - if (register_netdev(dev1) == 0) + if (hw->ports > 1) { + dev1 = skge_devinit(hw, 1, using_dac); + if (dev1 && register_netdev(dev1) == 0) skge_show_addr(dev1); else { /* Failure to register second port need not be fatal */ dev_warn(&pdev->dev, "register of second port failed\n"); hw->dev[1] = NULL; - free_netdev(dev1); + hw->ports = 1; + if (dev1) + free_netdev(dev1); } } pci_set_drvdata(pdev, hw); diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 17caccbb768..831de1b6e96 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h @@ -2423,6 +2423,8 @@ struct skge_hw { u16 phy_addr; spinlock_t phy_lock; struct tasklet_struct phy_task; + + char irq_name[0]; /* skge@pci:000:04:00.0 */ }; enum pause_control { diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index ef1165718dd..2ab5c39f33c 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -4487,13 +4487,16 @@ static int __devinit sky2_probe(struct pci_dev *pdev, wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; err = -ENOMEM; - hw = kzalloc(sizeof(*hw), GFP_KERNEL); + + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); if (!hw) { dev_err(&pdev->dev, "cannot allocate hardware struct\n"); goto err_out_free_regions; } hw->pdev = pdev; + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { @@ -4539,7 +4542,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, err = request_irq(pdev->irq, sky2_intr, (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, - dev->name, hw); + hw->irq_name, hw); if (err) { dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); goto err_out_unregister; diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index e0f23a10104..ed54129698b 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -2085,6 +2085,8 @@ struct sky2_hw { struct timer_list watchdog_timer; struct work_struct restart_work; wait_queue_head_t msi_wait; + + char irq_name[0]; }; static inline int sky2_is_copper(const struct sky2_hw *hw) diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f09bc5dfe8b..ba5d3fe753b 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -902,11 +902,12 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) struct tg3 *tp = bp->priv; u32 val; - if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) - return -EAGAIN; + spin_lock_bh(&tp->lock); if (tg3_readphy(tp, reg, &val)) - return -EIO; + val = -EIO; + + spin_unlock_bh(&tp->lock); return val; } @@ -914,14 +915,16 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) { struct tg3 *tp = bp->priv; + u32 ret = 0; - if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) - return -EAGAIN; + spin_lock_bh(&tp->lock); if (tg3_writephy(tp, reg, val)) - return -EIO; + ret = -EIO; - return 0; + spin_unlock_bh(&tp->lock); + + return ret; } static int tg3_mdio_reset(struct mii_bus *bp) @@ -1011,12 +1014,6 @@ static void tg3_mdio_config_5785(struct tg3 *tp) static void tg3_mdio_start(struct tg3 *tp) { - if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { - mutex_lock(&tp->mdio_bus->mdio_lock); - tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; - mutex_unlock(&tp->mdio_bus->mdio_lock); - } - tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); @@ -1041,15 +1038,6 @@ static void tg3_mdio_start(struct tg3 *tp) tg3_mdio_config_5785(tp); } -static void tg3_mdio_stop(struct tg3 *tp) -{ - if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { - mutex_lock(&tp->mdio_bus->mdio_lock); - tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED; - mutex_unlock(&tp->mdio_bus->mdio_lock); - } -} - static int tg3_mdio_init(struct tg3 *tp) { int i; @@ -1141,7 +1129,6 @@ static void tg3_mdio_fini(struct tg3 *tp) tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; mdiobus_unregister(tp->mdio_bus); mdiobus_free(tp->mdio_bus); - tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; } } @@ -1363,7 +1350,7 @@ static void tg3_adjust_link(struct net_device *dev) struct tg3 *tp = netdev_priv(dev); struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; - spin_lock(&tp->lock); + spin_lock_bh(&tp->lock); mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); @@ -1431,7 +1418,7 @@ static void tg3_adjust_link(struct net_device *dev) tp->link_config.active_speed = phydev->speed; tp->link_config.active_duplex = phydev->duplex; - spin_unlock(&tp->lock); + spin_unlock_bh(&tp->lock); if (linkmesg) tg3_link_report(tp); @@ -6392,8 +6379,6 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_nvram_lock(tp); - tg3_mdio_stop(tp); - tg3_ape_lock(tp, TG3_APE_LOCK_GRC); /* No matching tg3_nvram_unlock() after this because @@ -8698,6 +8683,8 @@ static int tg3_close(struct net_device *dev) del_timer_sync(&tp->timer); + tg3_phy_stop(tp); + tg3_full_lock(tp, 1); #if 0 tg3_dump_state(tp); diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 82b45d8797b..bab7940158e 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -2412,7 +2412,6 @@ struct ring_info { struct tx_ring_info { struct sk_buff *skb; - u32 prev_vlan_tag; }; struct tg3_config_info { @@ -2749,7 +2748,6 @@ struct tg3 { #define TG3_FLG3_5701_DMA_BUG 0x00000008 #define TG3_FLG3_USE_PHYLIB 0x00000010 #define TG3_FLG3_MDIOBUS_INITED 0x00000020 -#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040 #define TG3_FLG3_PHY_CONNECTED 0x00000080 #define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 #define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index d032bba9bc4..0caa8008c51 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -418,6 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) goto halt_fail_and_release; } memcpy(net->dev_addr, bp, ETH_ALEN); + memcpy(net->perm_addr, bp, ETH_ALEN); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d445845f277..8d009760277 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -948,7 +948,7 @@ free: return err; } -static void virtnet_remove(struct virtio_device *vdev) +static void __devexit virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; struct sk_buff *skb; diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 49ea9c92b7e..d7a764a2fc1 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -31,13 +31,12 @@ config STRIP ---help--- Say Y if you have a Metricom radio and intend to use Starmode Radio IP. STRIP is a radio protocol developed for the MosquitoNet project - (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet - traffic using Metricom radios. Metricom radios are small, battery - powered, 100kbit/sec packet radio transceivers, about the size and - weight of a cellular telephone. (You may also have heard them called - "Metricom modems" but we avoid the term "modem" because it misleads - many people into thinking that you can plug a Metricom modem into a - phone line and use it as a modem.) + to send Internet traffic using Metricom radios. Metricom radios are + small, battery powered, 100kbit/sec packet radio transceivers, about + the size and weight of a cellular telephone. (You may also have heard + them called "Metricom modems" but we avoid the term "modem" because + it misleads many people into thinking that you can plug a Metricom + modem into a phone line and use it as a modem.) You can use STRIP on any Linux machine with a serial port, although it is obviously most useful for people with laptop computers. If you diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c index b3e5cf3735b..dbd488da18b 100644 --- a/drivers/net/wireless/ath/ar9170/phy.c +++ b/drivers/net/wireless/ath/ar9170/phy.c @@ -1141,7 +1141,8 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar, u8 vpds[2][AR5416_PD_GAIN_ICEPTS]; u8 pwrs[2][AR5416_PD_GAIN_ICEPTS]; int chain, idx, i; - u8 f; + u32 phy_data = 0; + u8 f, tmp; switch (channel->band) { case IEEE80211_BAND_2GHZ: @@ -1208,9 +1209,6 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar, } for (i = 0; i < 76; i++) { - u32 phy_data; - u8 tmp; - if (i < 25) { tmp = ar9170_interpolate_val(i, &pwrs[0][0], &vpds[0][0]); diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c index e96091b3149..9c1397996e0 100644 --- a/drivers/net/wireless/b43/pio.c +++ b/drivers/net/wireless/b43/pio.c @@ -340,10 +340,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, q->mmio_base + B43_PIO_TXDATA, sizeof(u16)); if (data_len & 1) { + u8 tail[2] = { 0, }; + /* Write the last byte. */ ctl &= ~B43_PIO_TXCTL_WRITEHI; b43_piotx_write16(q, B43_PIO_TXCTL, ctl); - b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]); + tail[0] = data[data_len - 1]; + ssb_block_write(dev->dev, tail, 2, + q->mmio_base + B43_PIO_TXDATA, + sizeof(u16)); } return ctl; @@ -386,26 +391,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, q->mmio_base + B43_PIO8_TXDATA, sizeof(u32)); if (data_len & 3) { - u32 value = 0; + u8 tail[4] = { 0, }; /* Write the last few bytes. */ ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31); - data = &(data[data_len - 1]); switch (data_len & 3) { case 3: - ctl |= B43_PIO8_TXCTL_16_23; - value |= (u32)(*data) << 16; - data--; + ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15; + tail[0] = data[data_len - 3]; + tail[1] = data[data_len - 2]; + tail[2] = data[data_len - 1]; + break; case 2: ctl |= B43_PIO8_TXCTL_8_15; - value |= (u32)(*data) << 8; - data--; + tail[0] = data[data_len - 2]; + tail[1] = data[data_len - 1]; + break; case 1: - value |= (u32)(*data); + tail[0] = data[data_len - 1]; + break; } b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); - b43_piotx_write32(q, B43_PIO8_TXDATA, value); + ssb_block_write(dev->dev, tail, 4, + q->mmio_base + B43_PIO8_TXDATA, + sizeof(u32)); } return ctl; @@ -693,21 +703,25 @@ data_ready: q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); if (len & 3) { - u32 value; - char *data; + u8 tail[4] = { 0, }; /* Read the last few bytes. */ - value = b43_piorx_read32(q, B43_PIO8_RXDATA); - data = &(skb->data[len + padding - 1]); + ssb_block_read(dev->dev, tail, 4, + q->mmio_base + B43_PIO8_RXDATA, + sizeof(u32)); switch (len & 3) { case 3: - *data = (value >> 16); - data--; + skb->data[len + padding - 3] = tail[0]; + skb->data[len + padding - 2] = tail[1]; + skb->data[len + padding - 1] = tail[2]; + break; case 2: - *data = (value >> 8); - data--; + skb->data[len + padding - 2] = tail[0]; + skb->data[len + padding - 1] = tail[1]; + break; case 1: - *data = value; + skb->data[len + padding - 1] = tail[0]; + break; } } } else { @@ -715,11 +729,13 @@ data_ready: q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); if (len & 1) { - u16 value; + u8 tail[2] = { 0, }; /* Read the last byte. */ - value = b43_piorx_read16(q, B43_PIO_RXDATA); - skb->data[len + padding - 1] = value; + ssb_block_read(dev->dev, tail, 2, + q->mmio_base + B43_PIO_RXDATA, + sizeof(u16)); + skb->data[len + padding - 1] = tail[0]; } } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 896f532182f..38cfd79e059 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -631,6 +631,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; if (WARN_ON(!data->beacon_int)) data->beacon_int = 1; + if (data->started) + mod_timer(&data->beacon_timer, + jiffies + data->beacon_int); } if (changed & BSS_CHANGED_ERP_CTS_PROT) { diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 1cbd9b4a3ef..b8f5ee33445 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2381,6 +2381,7 @@ static struct usb_device_id rt73usb_device_table[] = { /* Huawei-3Com */ { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, /* Hercules */ + { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, /* Linksys */ diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c index f424146a2bc..ac8aa09ba0d 100644 --- a/drivers/pcmcia/sa1100_assabet.c +++ b/drivers/pcmcia/sa1100_assabet.c @@ -130,7 +130,7 @@ static struct pcmcia_low_level assabet_pcmcia_ops = { .socket_suspend = assabet_pcmcia_socket_suspend, }; -int __init pcmcia_assabet_init(struct device *dev) +int pcmcia_assabet_init(struct device *dev) { int ret = -ENODEV; diff --git a/drivers/pcmcia/sa1100_neponset.c b/drivers/pcmcia/sa1100_neponset.c index 4c41e86ccff..0c76d337815 100644 --- a/drivers/pcmcia/sa1100_neponset.c +++ b/drivers/pcmcia/sa1100_neponset.c @@ -123,7 +123,7 @@ static struct pcmcia_low_level neponset_pcmcia_ops = { .socket_suspend = sa1111_pcmcia_socket_suspend, }; -int __init pcmcia_neponset_init(struct sa1111_dev *sadev) +int pcmcia_neponset_init(struct sa1111_dev *sadev) { int ret = -ENODEV; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index afdbdaaf80c..a2a742c8ff7 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1211,15 +1211,6 @@ static int sony_nc_add(struct acpi_device *device) } } - /* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1 - * should be respected as we already checked for the device presence above */ - if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) { - dprintk("Invoking _INI\n"); - if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI, - NULL, NULL))) - dprintk("_INI Method failed\n"); - } - if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", &handle))) { if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) @@ -1399,27 +1390,20 @@ struct sonypi_eventtypes { struct sonypi_event *events; }; -struct device_ctrl { +struct sony_pic_dev { + struct acpi_device *acpi_dev; + struct sony_pic_irq *cur_irq; + struct sony_pic_ioport *cur_ioport; + struct list_head interrupts; + struct list_head ioports; + struct mutex lock; + struct sonypi_eventtypes *event_types; + int (*handle_irq)(const u8, const u8); int model; - int (*handle_irq)(const u8, const u8); u16 evport_offset; - u8 has_camera; - u8 has_bluetooth; - u8 has_wwan; - struct sonypi_eventtypes *event_types; -}; - -struct sony_pic_dev { - struct device_ctrl *control; - struct acpi_device *acpi_dev; - struct sony_pic_irq *cur_irq; - struct sony_pic_ioport *cur_ioport; - struct list_head interrupts; - struct list_head ioports; - struct mutex lock; - u8 camera_power; - u8 bluetooth_power; - u8 wwan_power; + u8 camera_power; + u8 bluetooth_power; + u8 wwan_power; }; static struct sony_pic_dev spic_dev = { @@ -1427,6 +1411,8 @@ static struct sony_pic_dev spic_dev = { .ioports = LIST_HEAD_INIT(spic_dev.ioports), }; +static int spic_drv_registered; + /* Event masks */ #define SONYPI_JOGGER_MASK 0x00000001 #define SONYPI_CAPTURE_MASK 0x00000002 @@ -1724,27 +1710,6 @@ static int type3_handle_irq(const u8 data_mask, const u8 ev) return 1; } -static struct device_ctrl spic_types[] = { - { - .model = SONYPI_DEVICE_TYPE1, - .handle_irq = NULL, - .evport_offset = SONYPI_TYPE1_OFFSET, - .event_types = type1_events, - }, - { - .model = SONYPI_DEVICE_TYPE2, - .handle_irq = NULL, - .evport_offset = SONYPI_TYPE2_OFFSET, - .event_types = type2_events, - }, - { - .model = SONYPI_DEVICE_TYPE3, - .handle_irq = type3_handle_irq, - .evport_offset = SONYPI_TYPE3_OFFSET, - .event_types = type3_events, - }, -}; - static void sony_pic_detect_device_type(struct sony_pic_dev *dev) { struct pci_dev *pcidev; @@ -1752,48 +1717,63 @@ static void sony_pic_detect_device_type(struct sony_pic_dev *dev) pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); if (pcidev) { - dev->control = &spic_types[0]; + dev->model = SONYPI_DEVICE_TYPE1; + dev->evport_offset = SONYPI_TYPE1_OFFSET; + dev->event_types = type1_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, NULL); if (pcidev) { - dev->control = &spic_types[2]; + dev->model = SONYPI_DEVICE_TYPE2; + dev->evport_offset = SONYPI_TYPE2_OFFSET; + dev->event_types = type2_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, NULL); if (pcidev) { - dev->control = &spic_types[2]; + dev->model = SONYPI_DEVICE_TYPE3; + dev->handle_irq = type3_handle_irq; + dev->evport_offset = SONYPI_TYPE3_OFFSET; + dev->event_types = type3_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, NULL); if (pcidev) { - dev->control = &spic_types[2]; + dev->model = SONYPI_DEVICE_TYPE3; + dev->handle_irq = type3_handle_irq; + dev->evport_offset = SONYPI_TYPE3_OFFSET; + dev->event_types = type3_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_1, NULL); if (pcidev) { - dev->control = &spic_types[2]; + dev->model = SONYPI_DEVICE_TYPE3; + dev->handle_irq = type3_handle_irq; + dev->evport_offset = SONYPI_TYPE3_OFFSET; + dev->event_types = type3_events; goto out; } /* default */ - dev->control = &spic_types[1]; + dev->model = SONYPI_DEVICE_TYPE2; + dev->evport_offset = SONYPI_TYPE2_OFFSET; + dev->event_types = type2_events; out: if (pcidev) pci_dev_put(pcidev); printk(KERN_INFO DRV_PFX "detected Type%d model\n", - dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 : - dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); + dev->model == SONYPI_DEVICE_TYPE1 ? 1 : + dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); } /* camera tests and poweron/poweroff */ @@ -2566,7 +2546,7 @@ static int sony_pic_enable(struct acpi_device *device, buffer.pointer = resource; /* setup Type 1 resources */ - if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) { + if (spic_dev.model == SONYPI_DEVICE_TYPE1) { /* setup io resources */ resource->res1.type = ACPI_RESOURCE_TYPE_IO; @@ -2649,29 +2629,28 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id) data_mask = inb_p(dev->cur_ioport->io2.minimum); else data_mask = inb_p(dev->cur_ioport->io1.minimum + - dev->control->evport_offset); + dev->evport_offset); dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", ev, data_mask, dev->cur_ioport->io1.minimum, - dev->control->evport_offset); + dev->evport_offset); if (ev == 0x00 || ev == 0xff) return IRQ_HANDLED; - for (i = 0; dev->control->event_types[i].mask; i++) { + for (i = 0; dev->event_types[i].mask; i++) { - if ((data_mask & dev->control->event_types[i].data) != - dev->control->event_types[i].data) + if ((data_mask & dev->event_types[i].data) != + dev->event_types[i].data) continue; - if (!(mask & dev->control->event_types[i].mask)) + if (!(mask & dev->event_types[i].mask)) continue; - for (j = 0; dev->control->event_types[i].events[j].event; j++) { - if (ev == dev->control->event_types[i].events[j].data) { + for (j = 0; dev->event_types[i].events[j].event; j++) { + if (ev == dev->event_types[i].events[j].data) { device_event = - dev->control-> - event_types[i].events[j].event; + dev->event_types[i].events[j].event; goto found; } } @@ -2679,13 +2658,12 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id) /* Still not able to decode the event try to pass * it over to the minidriver */ - if (dev->control->handle_irq && - dev->control->handle_irq(data_mask, ev) == 0) + if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0) return IRQ_HANDLED; dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", ev, data_mask, dev->cur_ioport->io1.minimum, - dev->control->evport_offset); + dev->evport_offset); return IRQ_HANDLED; found: @@ -2816,7 +2794,7 @@ static int sony_pic_add(struct acpi_device *device) /* request IRQ */ list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, - IRQF_SHARED, "sony-laptop", &spic_dev)) { + IRQF_DISABLED, "sony-laptop", &spic_dev)) { dprintk("IRQ: %d - triggering: %d - " "polarity: %d - shr: %d\n", irq->irq.interrupts[0], @@ -2949,6 +2927,7 @@ static int __init sony_laptop_init(void) "Unable to register SPIC driver."); goto out; } + spic_drv_registered = 1; } result = acpi_bus_register_driver(&sony_nc_driver); @@ -2960,7 +2939,7 @@ static int __init sony_laptop_init(void) return 0; out_unregister_pic: - if (!no_spic) + if (spic_drv_registered) acpi_bus_unregister_driver(&sony_pic_driver); out: return result; @@ -2969,7 +2948,7 @@ out: static void __exit sony_laptop_exit(void) { acpi_bus_unregister_driver(&sony_nc_driver); - if (!no_spic) + if (spic_drv_registered) acpi_bus_unregister_driver(&sony_pic_driver); } diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 1b78f639ead..76769978285 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -125,7 +125,7 @@ static int qstat_seq_open(struct inode *inode, struct file *filp) filp->f_path.dentry->d_inode->i_private); } -static struct file_operations debugfs_fops = { +static const struct file_operations debugfs_fops = { .owner = THIS_MODULE, .open = qstat_seq_open, .read = seq_read, diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c index eff943923c6..968e3c7c263 100644 --- a/drivers/s390/cio/qdio_perf.c +++ b/drivers/s390/cio/qdio_perf.c @@ -84,7 +84,7 @@ static int qdio_perf_seq_open(struct inode *inode, struct file *filp) return single_open(filp, qdio_perf_proc_show, NULL); } -static struct file_operations qdio_perf_proc_fops = { +static const struct file_operations qdio_perf_proc_fops = { .owner = THIS_MODULE, .open = qdio_perf_seq_open, .read = seq_read, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 0cb049f5cc5..747a5e5c127 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1317,7 +1317,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate) } } -static struct file_operations sg_fops = { +static const struct file_operations sg_fops = { .owner = THIS_MODULE, .read = sg_read, .write = sg_write, @@ -2194,9 +2194,11 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v); static int sg_proc_single_open_adio(struct inode *inode, struct file *file); static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off); -static struct file_operations adio_fops = { - /* .owner, .read and .llseek added in sg_proc_init() */ +static const struct file_operations adio_fops = { + .owner = THIS_MODULE, .open = sg_proc_single_open_adio, + .read = seq_read, + .llseek = seq_lseek, .write = sg_proc_write_adio, .release = single_release, }; @@ -2204,23 +2206,32 @@ static struct file_operations adio_fops = { static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); -static struct file_operations dressz_fops = { +static const struct file_operations dressz_fops = { + .owner = THIS_MODULE, .open = sg_proc_single_open_dressz, + .read = seq_read, + .llseek = seq_lseek, .write = sg_proc_write_dressz, .release = single_release, }; static int sg_proc_seq_show_version(struct seq_file *s, void *v); static int sg_proc_single_open_version(struct inode *inode, struct file *file); -static struct file_operations version_fops = { +static const struct file_operations version_fops = { + .owner = THIS_MODULE, .open = sg_proc_single_open_version, + .read = seq_read, + .llseek = seq_lseek, .release = single_release, }; static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); -static struct file_operations devhdr_fops = { +static const struct file_operations devhdr_fops = { + .owner = THIS_MODULE, .open = sg_proc_single_open_devhdr, + .read = seq_read, + .llseek = seq_lseek, .release = single_release, }; @@ -2229,8 +2240,11 @@ static int sg_proc_open_dev(struct inode *inode, struct file *file); static void * dev_seq_start(struct seq_file *s, loff_t *pos); static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); static void dev_seq_stop(struct seq_file *s, void *v); -static struct file_operations dev_fops = { +static const struct file_operations dev_fops = { + .owner = THIS_MODULE, .open = sg_proc_open_dev, + .read = seq_read, + .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations dev_seq_ops = { @@ -2242,8 +2256,11 @@ static const struct seq_operations dev_seq_ops = { static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); static int sg_proc_open_devstrs(struct inode *inode, struct file *file); -static struct file_operations devstrs_fops = { +static const struct file_operations devstrs_fops = { + .owner = THIS_MODULE, .open = sg_proc_open_devstrs, + .read = seq_read, + .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations devstrs_seq_ops = { @@ -2255,8 +2272,11 @@ static const struct seq_operations devstrs_seq_ops = { static int sg_proc_seq_show_debug(struct seq_file *s, void *v); static int sg_proc_open_debug(struct inode *inode, struct file *file); -static struct file_operations debug_fops = { +static const struct file_operations debug_fops = { + .owner = THIS_MODULE, .open = sg_proc_open_debug, + .read = seq_read, + .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations debug_seq_ops = { @@ -2269,7 +2289,7 @@ static const struct seq_operations debug_seq_ops = { struct sg_proc_leaf { const char * name; - struct file_operations * fops; + const struct file_operations * fops; }; static struct sg_proc_leaf sg_proc_leaf_arr[] = { @@ -2295,9 +2315,6 @@ sg_proc_init(void) for (k = 0; k < num_leaves; ++k) { leaf = &sg_proc_leaf_arr[k]; mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; - leaf->fops->owner = THIS_MODULE; - leaf->fops->read = seq_read; - leaf->fops->llseek = seq_lseek; proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); } return 0; diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 2209620d234..b1ae774016f 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -64,6 +64,8 @@ static int serial_index(struct uart_port *port) return (serial8250_reg.minor - 64) + port->line; } +static unsigned int skip_txen_test; /* force skip of txen test at init time */ + /* * Debugging. */ @@ -2108,7 +2110,7 @@ static int serial8250_startup(struct uart_port *port) is variable. So, let's just don't test if we receive TX irq. This way, we'll never enable UART_BUG_TXEN. */ - if (up->port.flags & UPF_NO_TXEN_TEST) + if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST) goto dont_test_tx_en; /* @@ -3248,6 +3250,9 @@ MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices" module_param(nr_uarts, uint, 0644); MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")"); +module_param(skip_txen_test, uint, 0644); +MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time"); + #ifdef CONFIG_SERIAL_8250_RSA module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444); MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index e70712044a7..e5225725727 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -862,7 +862,7 @@ config SERIAL_IMX_CONSOLE config SERIAL_UARTLITE tristate "Xilinx uartlite serial port support" - depends on PPC32 || MICROBLAZE + depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE select SERIAL_CORE help Say Y here if you want to use the Xilinx uartlite serial controller. diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c index 2d7feecaf49..0028b6f89ce 100644 --- a/drivers/serial/icom.c +++ b/drivers/serial/icom.c @@ -307,7 +307,7 @@ static void stop_processor(struct icom_port *icom_port) if (port < 4) { temp = readl(stop_proc[port].global_control_reg); temp = - (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id; + (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id; writel(temp, stop_proc[port].global_control_reg); /* write flush */ @@ -336,7 +336,7 @@ static void start_processor(struct icom_port *icom_port) if (port < 4) { temp = readl(start_proc[port].global_control_reg); temp = - (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id; + (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id; writel(temp, start_proc[port].global_control_reg); /* write flush */ @@ -509,8 +509,8 @@ static void load_code(struct icom_port *icom_port) dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n"); } - if (new_page != NULL) - pci_free_consistent(dev, 4096, new_page, temp_pci); + if (new_page != NULL) + pci_free_consistent(dev, 4096, new_page, temp_pci); } static int startup(struct icom_port *icom_port) @@ -1493,15 +1493,15 @@ static int __devinit icom_probe(struct pci_dev *dev, const struct pci_device_id *ent) { int index; - unsigned int command_reg; - int retval; - struct icom_adapter *icom_adapter; - struct icom_port *icom_port; + unsigned int command_reg; + int retval; + struct icom_adapter *icom_adapter; + struct icom_port *icom_port; - retval = pci_enable_device(dev); - if (retval) { + retval = pci_enable_device(dev); + if (retval) { dev_err(&dev->dev, "Device enable FAILED\n"); - return retval; + return retval; } if ( (retval = pci_request_regions(dev, "icom"))) { @@ -1510,23 +1510,23 @@ static int __devinit icom_probe(struct pci_dev *dev, return retval; } - pci_set_master(dev); + pci_set_master(dev); - if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) { + if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) { dev_err(&dev->dev, "PCI Config read FAILED\n"); - return retval; - } + return retval; + } pci_write_config_dword(dev, PCI_COMMAND, command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_PARITY | PCI_COMMAND_SERR); - if (ent->driver_data == ADAPTER_V1) { + if (ent->driver_data == ADAPTER_V1) { pci_write_config_dword(dev, 0x44, 0x8300830A); - } else { + } else { pci_write_config_dword(dev, 0x44, 0x42004200); pci_write_config_dword(dev, 0x48, 0x42004200); - } + } retval = icom_alloc_adapter(&icom_adapter); @@ -1536,10 +1536,10 @@ static int __devinit icom_probe(struct pci_dev *dev, goto probe_exit0; } - icom_adapter->base_addr_pci = pci_resource_start(dev, 0); - icom_adapter->pci_dev = dev; - icom_adapter->version = ent->driver_data; - icom_adapter->subsystem_id = ent->subdevice; + icom_adapter->base_addr_pci = pci_resource_start(dev, 0); + icom_adapter->pci_dev = dev; + icom_adapter->version = ent->driver_data; + icom_adapter->subsystem_id = ent->subdevice; retval = icom_init_ports(icom_adapter); @@ -1548,7 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev, goto probe_exit1; } - icom_adapter->base_addr = pci_ioremap_bar(dev, 0); + icom_adapter->base_addr = pci_ioremap_bar(dev, 0); if (!icom_adapter->base_addr) goto probe_exit1; @@ -1562,7 +1562,7 @@ static int __devinit icom_probe(struct pci_dev *dev, retval = icom_load_ports(icom_adapter); - for (index = 0; index < icom_adapter->numb_ports; index++) { + for (index = 0; index < icom_adapter->numb_ports; index++) { icom_port = &icom_adapter->port_info[index]; if (icom_port->status == ICOM_PORT_ACTIVE) { @@ -1579,7 +1579,7 @@ static int __devinit icom_probe(struct pci_dev *dev, icom_port->status = ICOM_PORT_OFF; dev_err(&dev->dev, "Device add failed\n"); } else - dev_info(&dev->dev, "Device added\n"); + dev_info(&dev->dev, "Device added\n"); } } @@ -1595,9 +1595,7 @@ probe_exit0: pci_release_regions(dev); pci_disable_device(dev); - return retval; - - + return retval; } static void __devexit icom_remove(struct pci_dev *dev) diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c index 7f5e2687322..2199d819a98 100644 --- a/drivers/serial/sa1100.c +++ b/drivers/serial/sa1100.c @@ -638,7 +638,7 @@ static void __init sa1100_init_ports(void) PPSR |= PPC_TXD1 | PPC_TXD3; } -void __init sa1100_register_uart_fns(struct sa1100_port_fns *fns) +void __devinit sa1100_register_uart_fns(struct sa1100_port_fns *fns) { if (fns->get_mctrl) sa1100_pops.get_mctrl = fns->get_mctrl; diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index a3bb49031a7..ff4617e2142 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c @@ -873,10 +873,10 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), - PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ @@ -884,9 +884,9 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "COMpad2.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"), - PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), + PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c index 0f7cf4c453e..c50e9fbbf74 100644 --- a/drivers/serial/serial_txx9.c +++ b/drivers/serial/serial_txx9.c @@ -221,21 +221,26 @@ sio_quot_set(struct uart_txx9_port *up, int quot) sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6); } +static struct uart_txx9_port *to_uart_txx9_port(struct uart_port *port) +{ + return container_of(port, struct uart_txx9_port, port); +} + static void serial_txx9_stop_tx(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE); } static void serial_txx9_start_tx(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE); } static void serial_txx9_stop_rx(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); up->port.read_status_mask &= ~TXX9_SIDISR_RDIS; } @@ -246,7 +251,7 @@ static void serial_txx9_enable_ms(struct uart_port *port) static void serial_txx9_initialize(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned int tmout = 10000; sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST); @@ -414,7 +419,7 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id) static unsigned int serial_txx9_tx_empty(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned long flags; unsigned int ret; @@ -427,7 +432,7 @@ static unsigned int serial_txx9_tx_empty(struct uart_port *port) static unsigned int serial_txx9_get_mctrl(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned int ret; /* no modem control lines */ @@ -440,7 +445,7 @@ static unsigned int serial_txx9_get_mctrl(struct uart_port *port) static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); if (mctrl & TIOCM_RTS) sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC); @@ -450,7 +455,7 @@ static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl) static void serial_txx9_break_ctl(struct uart_port *port, int break_state) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); @@ -494,7 +499,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port) { unsigned int ier; unsigned char c; - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); /* * First save the IER then disable the interrupts @@ -520,7 +525,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port) static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) { unsigned int ier; - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); /* * First save the IER then disable the interrupts @@ -551,7 +556,7 @@ static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) static int serial_txx9_startup(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned long flags; int retval; @@ -596,7 +601,7 @@ static int serial_txx9_startup(struct uart_port *port) static void serial_txx9_shutdown(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned long flags; /* @@ -636,7 +641,7 @@ static void serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned int cval, fcr = 0; unsigned long flags; unsigned int baud, quot; @@ -814,19 +819,19 @@ static void serial_txx9_release_resource(struct uart_txx9_port *up) static void serial_txx9_release_port(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); serial_txx9_release_resource(up); } static int serial_txx9_request_port(struct uart_port *port) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); return serial_txx9_request_resource(up); } static void serial_txx9_config_port(struct uart_port *port, int uflags) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); int ret; /* @@ -897,7 +902,7 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv, static void serial_txx9_console_putchar(struct uart_port *port, int ch) { - struct uart_txx9_port *up = (struct uart_txx9_port *)port; + struct uart_txx9_port *up = to_uart_txx9_port(port); wait_for_xmitr(up); sio_out(up, TXX9_SITFIFO, ch); diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c index d3b49680047..b204a092913 100644 --- a/drivers/sfi/sfi_core.c +++ b/drivers/sfi/sfi_core.c @@ -90,7 +90,11 @@ static struct sfi_table_simple *syst_va __read_mostly; */ static u32 sfi_use_ioremap __read_mostly; -static void __iomem *sfi_map_memory(u64 phys, u32 size) +/* + * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function + * and introduces section mismatch. So use __ref to make it calm. + */ +static void __iomem * __ref sfi_map_memory(u64 phys, u32 size) { if (!phys || !size) return NULL; @@ -101,7 +105,7 @@ static void __iomem *sfi_map_memory(u64 phys, u32 size) return early_ioremap(phys, size); } -static void sfi_unmap_memory(void __iomem *virt, u32 size) +static void __ref sfi_unmap_memory(void __iomem *virt, u32 size) { if (!virt || !size) return; @@ -125,7 +129,7 @@ static void sfi_print_table_header(unsigned long long pa, * sfi_verify_table() * Sanity check table lengh, calculate checksum */ -static __init int sfi_verify_table(struct sfi_table_header *table) +static int sfi_verify_table(struct sfi_table_header *table) { u8 checksum = 0; @@ -213,12 +217,17 @@ static int sfi_table_check_key(struct sfi_table_header *th, * the mapped virt address will be returned, and the virt space * will be released by call sfi_put_table() later * + * This two cases are from two different functions with two different + * sections and causes section mismatch warning. So use __ref to tell + * modpost not to make any noise. + * * Return value: * NULL: when can't find a table matching the key * ERR_PTR(error): error value * virt table address: when a matched table is found */ -struct sfi_table_header *sfi_check_table(u64 pa, struct sfi_table_key *key) +struct sfi_table_header * + __ref sfi_check_table(u64 pa, struct sfi_table_key *key) { struct sfi_table_header *th; void *ret = NULL; diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 6d7a3f82c54..21a118269ca 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o obj-$(CONFIG_SPI_AU1550) += au1550_spi.o obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o obj-$(CONFIG_SPI_GPIO) += spi_gpio.o -obj-$(CONFIG_SPI_IMX) += mxc_spi.o +obj-$(CONFIG_SPI_IMX) += spi_imx.o obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o diff --git a/drivers/spi/mxc_spi.c b/drivers/spi/spi_imx.c index b1447236ae8..89c22efedfb 100644 --- a/drivers/spi/mxc_spi.c +++ b/drivers/spi/spi_imx.c @@ -48,14 +48,14 @@ #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ -struct mxc_spi_config { +struct spi_imx_config { unsigned int speed_hz; unsigned int bpw; unsigned int mode; int cs; }; -struct mxc_spi_data { +struct spi_imx_data { struct spi_bitbang bitbang; struct completion xfer_done; @@ -66,43 +66,43 @@ struct mxc_spi_data { int *chipselect; unsigned int count; - void (*tx)(struct mxc_spi_data *); - void (*rx)(struct mxc_spi_data *); + void (*tx)(struct spi_imx_data *); + void (*rx)(struct spi_imx_data *); void *rx_buf; const void *tx_buf; unsigned int txfifo; /* number of words pushed in tx FIFO */ /* SoC specific functions */ - void (*intctrl)(struct mxc_spi_data *, int); - int (*config)(struct mxc_spi_data *, struct mxc_spi_config *); - void (*trigger)(struct mxc_spi_data *); - int (*rx_available)(struct mxc_spi_data *); + void (*intctrl)(struct spi_imx_data *, int); + int (*config)(struct spi_imx_data *, struct spi_imx_config *); + void (*trigger)(struct spi_imx_data *); + int (*rx_available)(struct spi_imx_data *); }; #define MXC_SPI_BUF_RX(type) \ -static void mxc_spi_buf_rx_##type(struct mxc_spi_data *mxc_spi) \ +static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ { \ - unsigned int val = readl(mxc_spi->base + MXC_CSPIRXDATA); \ + unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ \ - if (mxc_spi->rx_buf) { \ - *(type *)mxc_spi->rx_buf = val; \ - mxc_spi->rx_buf += sizeof(type); \ + if (spi_imx->rx_buf) { \ + *(type *)spi_imx->rx_buf = val; \ + spi_imx->rx_buf += sizeof(type); \ } \ } #define MXC_SPI_BUF_TX(type) \ -static void mxc_spi_buf_tx_##type(struct mxc_spi_data *mxc_spi) \ +static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ { \ type val = 0; \ \ - if (mxc_spi->tx_buf) { \ - val = *(type *)mxc_spi->tx_buf; \ - mxc_spi->tx_buf += sizeof(type); \ + if (spi_imx->tx_buf) { \ + val = *(type *)spi_imx->tx_buf; \ + spi_imx->tx_buf += sizeof(type); \ } \ \ - mxc_spi->count -= sizeof(type); \ + spi_imx->count -= sizeof(type); \ \ - writel(val, mxc_spi->base + MXC_CSPITXDATA); \ + writel(val, spi_imx->base + MXC_CSPITXDATA); \ } MXC_SPI_BUF_RX(u8) @@ -119,7 +119,7 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024}; /* MX21, MX27 */ -static unsigned int mxc_spi_clkdiv_1(unsigned int fin, +static unsigned int spi_imx_clkdiv_1(unsigned int fin, unsigned int fspi) { int i, max; @@ -137,7 +137,7 @@ static unsigned int mxc_spi_clkdiv_1(unsigned int fin, } /* MX1, MX31, MX35 */ -static unsigned int mxc_spi_clkdiv_2(unsigned int fin, +static unsigned int spi_imx_clkdiv_2(unsigned int fin, unsigned int fspi) { int i, div = 4; @@ -174,7 +174,7 @@ static unsigned int mxc_spi_clkdiv_2(unsigned int fin, * the i.MX35 has a slightly different register layout for bits * we do not use here. */ -static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable) +static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; @@ -183,24 +183,24 @@ static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable) if (enable & MXC_INT_RR) val |= MX31_INTREG_RREN; - writel(val, mxc_spi->base + MXC_CSPIINT); + writel(val, spi_imx->base + MXC_CSPIINT); } -static void mx31_trigger(struct mxc_spi_data *mxc_spi) +static void mx31_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; - reg = readl(mxc_spi->base + MXC_CSPICTRL); + reg = readl(spi_imx->base + MXC_CSPICTRL); reg |= MX31_CSPICTRL_XCH; - writel(reg, mxc_spi->base + MXC_CSPICTRL); + writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int mx31_config(struct mxc_spi_data *mxc_spi, - struct mxc_spi_config *config) +static int mx31_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) { unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; - reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << MX31_CSPICTRL_DR_SHIFT; if (cpu_is_mx31()) @@ -223,14 +223,14 @@ static int mx31_config(struct mxc_spi_data *mxc_spi, reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT; } - writel(reg, mxc_spi->base + MXC_CSPICTRL); + writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } -static int mx31_rx_available(struct mxc_spi_data *mxc_spi) +static int mx31_rx_available(struct spi_imx_data *spi_imx) { - return readl(mxc_spi->base + MX31_CSPISTATUS) & MX31_STATUS_RR; + return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; } #define MX27_INTREG_RR (1 << 4) @@ -246,7 +246,7 @@ static int mx31_rx_available(struct mxc_spi_data *mxc_spi) #define MX27_CSPICTRL_DR_SHIFT 14 #define MX27_CSPICTRL_CS_SHIFT 19 -static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable) +static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; @@ -255,24 +255,24 @@ static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable) if (enable & MXC_INT_RR) val |= MX27_INTREG_RREN; - writel(val, mxc_spi->base + MXC_CSPIINT); + writel(val, spi_imx->base + MXC_CSPIINT); } -static void mx27_trigger(struct mxc_spi_data *mxc_spi) +static void mx27_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; - reg = readl(mxc_spi->base + MXC_CSPICTRL); + reg = readl(spi_imx->base + MXC_CSPICTRL); reg |= MX27_CSPICTRL_XCH; - writel(reg, mxc_spi->base + MXC_CSPICTRL); + writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int mx27_config(struct mxc_spi_data *mxc_spi, - struct mxc_spi_config *config) +static int mx27_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) { unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; - reg |= mxc_spi_clkdiv_1(mxc_spi->spi_clk, config->speed_hz) << + reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) << MX27_CSPICTRL_DR_SHIFT; reg |= config->bpw - 1; @@ -285,14 +285,14 @@ static int mx27_config(struct mxc_spi_data *mxc_spi, if (config->cs < 0) reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT; - writel(reg, mxc_spi->base + MXC_CSPICTRL); + writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } -static int mx27_rx_available(struct mxc_spi_data *mxc_spi) +static int mx27_rx_available(struct spi_imx_data *spi_imx) { - return readl(mxc_spi->base + MXC_CSPIINT) & MX27_INTREG_RR; + return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR; } #define MX1_INTREG_RR (1 << 3) @@ -306,7 +306,7 @@ static int mx27_rx_available(struct mxc_spi_data *mxc_spi) #define MX1_CSPICTRL_MASTER (1 << 10) #define MX1_CSPICTRL_DR_SHIFT 13 -static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable) +static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; @@ -315,24 +315,24 @@ static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable) if (enable & MXC_INT_RR) val |= MX1_INTREG_RREN; - writel(val, mxc_spi->base + MXC_CSPIINT); + writel(val, spi_imx->base + MXC_CSPIINT); } -static void mx1_trigger(struct mxc_spi_data *mxc_spi) +static void mx1_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; - reg = readl(mxc_spi->base + MXC_CSPICTRL); + reg = readl(spi_imx->base + MXC_CSPICTRL); reg |= MX1_CSPICTRL_XCH; - writel(reg, mxc_spi->base + MXC_CSPICTRL); + writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int mx1_config(struct mxc_spi_data *mxc_spi, - struct mxc_spi_config *config) +static int mx1_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) { unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; - reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << MX1_CSPICTRL_DR_SHIFT; reg |= config->bpw - 1; @@ -341,156 +341,151 @@ static int mx1_config(struct mxc_spi_data *mxc_spi, if (config->mode & SPI_CPOL) reg |= MX1_CSPICTRL_POL; - writel(reg, mxc_spi->base + MXC_CSPICTRL); + writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } -static int mx1_rx_available(struct mxc_spi_data *mxc_spi) +static int mx1_rx_available(struct spi_imx_data *spi_imx) { - return readl(mxc_spi->base + MXC_CSPIINT) & MX1_INTREG_RR; + return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; } -static void mxc_spi_chipselect(struct spi_device *spi, int is_active) +static void spi_imx_chipselect(struct spi_device *spi, int is_active) { - struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); - unsigned int cs = 0; - int gpio = mxc_spi->chipselect[spi->chip_select]; - struct mxc_spi_config config; + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + int gpio = spi_imx->chipselect[spi->chip_select]; + int active = is_active != BITBANG_CS_INACTIVE; + int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); - if (spi->mode & SPI_CS_HIGH) - cs = 1; - - if (is_active == BITBANG_CS_INACTIVE) { - if (gpio >= 0) - gpio_set_value(gpio, !cs); + if (gpio < 0) return; - } - - config.bpw = spi->bits_per_word; - config.speed_hz = spi->max_speed_hz; - config.mode = spi->mode; - config.cs = mxc_spi->chipselect[spi->chip_select]; - - mxc_spi->config(mxc_spi, &config); - - /* Initialize the functions for transfer */ - if (config.bpw <= 8) { - mxc_spi->rx = mxc_spi_buf_rx_u8; - mxc_spi->tx = mxc_spi_buf_tx_u8; - } else if (config.bpw <= 16) { - mxc_spi->rx = mxc_spi_buf_rx_u16; - mxc_spi->tx = mxc_spi_buf_tx_u16; - } else if (config.bpw <= 32) { - mxc_spi->rx = mxc_spi_buf_rx_u32; - mxc_spi->tx = mxc_spi_buf_tx_u32; - } else - BUG(); - if (gpio >= 0) - gpio_set_value(gpio, cs); - - return; + gpio_set_value(gpio, dev_is_lowactive ^ active); } -static void mxc_spi_push(struct mxc_spi_data *mxc_spi) +static void spi_imx_push(struct spi_imx_data *spi_imx) { - while (mxc_spi->txfifo < 8) { - if (!mxc_spi->count) + while (spi_imx->txfifo < 8) { + if (!spi_imx->count) break; - mxc_spi->tx(mxc_spi); - mxc_spi->txfifo++; + spi_imx->tx(spi_imx); + spi_imx->txfifo++; } - mxc_spi->trigger(mxc_spi); + spi_imx->trigger(spi_imx); } -static irqreturn_t mxc_spi_isr(int irq, void *dev_id) +static irqreturn_t spi_imx_isr(int irq, void *dev_id) { - struct mxc_spi_data *mxc_spi = dev_id; + struct spi_imx_data *spi_imx = dev_id; - while (mxc_spi->rx_available(mxc_spi)) { - mxc_spi->rx(mxc_spi); - mxc_spi->txfifo--; + while (spi_imx->rx_available(spi_imx)) { + spi_imx->rx(spi_imx); + spi_imx->txfifo--; } - if (mxc_spi->count) { - mxc_spi_push(mxc_spi); + if (spi_imx->count) { + spi_imx_push(spi_imx); return IRQ_HANDLED; } - if (mxc_spi->txfifo) { + if (spi_imx->txfifo) { /* No data left to push, but still waiting for rx data, * enable receive data available interrupt. */ - mxc_spi->intctrl(mxc_spi, MXC_INT_RR); + spi_imx->intctrl(spi_imx, MXC_INT_RR); return IRQ_HANDLED; } - mxc_spi->intctrl(mxc_spi, 0); - complete(&mxc_spi->xfer_done); + spi_imx->intctrl(spi_imx, 0); + complete(&spi_imx->xfer_done); return IRQ_HANDLED; } -static int mxc_spi_setupxfer(struct spi_device *spi, +static int spi_imx_setupxfer(struct spi_device *spi, struct spi_transfer *t) { - struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); - struct mxc_spi_config config; + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + struct spi_imx_config config; config.bpw = t ? t->bits_per_word : spi->bits_per_word; config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; config.mode = spi->mode; + config.cs = spi_imx->chipselect[spi->chip_select]; + + if (!config.speed_hz) + config.speed_hz = spi->max_speed_hz; + if (!config.bpw) + config.bpw = spi->bits_per_word; + if (!config.speed_hz) + config.speed_hz = spi->max_speed_hz; + + /* Initialize the functions for transfer */ + if (config.bpw <= 8) { + spi_imx->rx = spi_imx_buf_rx_u8; + spi_imx->tx = spi_imx_buf_tx_u8; + } else if (config.bpw <= 16) { + spi_imx->rx = spi_imx_buf_rx_u16; + spi_imx->tx = spi_imx_buf_tx_u16; + } else if (config.bpw <= 32) { + spi_imx->rx = spi_imx_buf_rx_u32; + spi_imx->tx = spi_imx_buf_tx_u32; + } else + BUG(); - mxc_spi->config(mxc_spi, &config); + spi_imx->config(spi_imx, &config); return 0; } -static int mxc_spi_transfer(struct spi_device *spi, +static int spi_imx_transfer(struct spi_device *spi, struct spi_transfer *transfer) { - struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); - mxc_spi->tx_buf = transfer->tx_buf; - mxc_spi->rx_buf = transfer->rx_buf; - mxc_spi->count = transfer->len; - mxc_spi->txfifo = 0; + spi_imx->tx_buf = transfer->tx_buf; + spi_imx->rx_buf = transfer->rx_buf; + spi_imx->count = transfer->len; + spi_imx->txfifo = 0; - init_completion(&mxc_spi->xfer_done); + init_completion(&spi_imx->xfer_done); - mxc_spi_push(mxc_spi); + spi_imx_push(spi_imx); - mxc_spi->intctrl(mxc_spi, MXC_INT_TE); + spi_imx->intctrl(spi_imx, MXC_INT_TE); - wait_for_completion(&mxc_spi->xfer_done); + wait_for_completion(&spi_imx->xfer_done); return transfer->len; } -static int mxc_spi_setup(struct spi_device *spi) +static int spi_imx_setup(struct spi_device *spi) { - if (!spi->bits_per_word) - spi->bits_per_word = 8; + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + int gpio = spi_imx->chipselect[spi->chip_select]; pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, spi->mode, spi->bits_per_word, spi->max_speed_hz); - mxc_spi_chipselect(spi, BITBANG_CS_INACTIVE); + if (gpio >= 0) + gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); + + spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); return 0; } -static void mxc_spi_cleanup(struct spi_device *spi) +static void spi_imx_cleanup(struct spi_device *spi) { } -static int __init mxc_spi_probe(struct platform_device *pdev) +static int __init spi_imx_probe(struct platform_device *pdev) { struct spi_imx_master *mxc_platform_info; struct spi_master *master; - struct mxc_spi_data *mxc_spi; + struct spi_imx_data *spi_imx; struct resource *res; int i, ret; @@ -500,7 +495,7 @@ static int __init mxc_spi_probe(struct platform_device *pdev) return -EINVAL; } - master = spi_alloc_master(&pdev->dev, sizeof(struct mxc_spi_data)); + master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data)); if (!master) return -ENOMEM; @@ -509,32 +504,32 @@ static int __init mxc_spi_probe(struct platform_device *pdev) master->bus_num = pdev->id; master->num_chipselect = mxc_platform_info->num_chipselect; - mxc_spi = spi_master_get_devdata(master); - mxc_spi->bitbang.master = spi_master_get(master); - mxc_spi->chipselect = mxc_platform_info->chipselect; + spi_imx = spi_master_get_devdata(master); + spi_imx->bitbang.master = spi_master_get(master); + spi_imx->chipselect = mxc_platform_info->chipselect; for (i = 0; i < master->num_chipselect; i++) { - if (mxc_spi->chipselect[i] < 0) + if (spi_imx->chipselect[i] < 0) continue; - ret = gpio_request(mxc_spi->chipselect[i], DRIVER_NAME); + ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); if (ret) { i--; while (i > 0) - if (mxc_spi->chipselect[i] >= 0) - gpio_free(mxc_spi->chipselect[i--]); + if (spi_imx->chipselect[i] >= 0) + gpio_free(spi_imx->chipselect[i--]); dev_err(&pdev->dev, "can't get cs gpios"); goto out_master_put; } - gpio_direction_output(mxc_spi->chipselect[i], 1); } - mxc_spi->bitbang.chipselect = mxc_spi_chipselect; - mxc_spi->bitbang.setup_transfer = mxc_spi_setupxfer; - mxc_spi->bitbang.txrx_bufs = mxc_spi_transfer; - mxc_spi->bitbang.master->setup = mxc_spi_setup; - mxc_spi->bitbang.master->cleanup = mxc_spi_cleanup; + spi_imx->bitbang.chipselect = spi_imx_chipselect; + spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; + spi_imx->bitbang.txrx_bufs = spi_imx_transfer; + spi_imx->bitbang.master->setup = spi_imx_setup; + spi_imx->bitbang.master->cleanup = spi_imx_cleanup; + spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - init_completion(&mxc_spi->xfer_done); + init_completion(&spi_imx->xfer_done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { @@ -549,58 +544,58 @@ static int __init mxc_spi_probe(struct platform_device *pdev) goto out_gpio_free; } - mxc_spi->base = ioremap(res->start, resource_size(res)); - if (!mxc_spi->base) { + spi_imx->base = ioremap(res->start, resource_size(res)); + if (!spi_imx->base) { ret = -EINVAL; goto out_release_mem; } - mxc_spi->irq = platform_get_irq(pdev, 0); - if (!mxc_spi->irq) { + spi_imx->irq = platform_get_irq(pdev, 0); + if (!spi_imx->irq) { ret = -EINVAL; goto out_iounmap; } - ret = request_irq(mxc_spi->irq, mxc_spi_isr, 0, DRIVER_NAME, mxc_spi); + ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx); if (ret) { - dev_err(&pdev->dev, "can't get irq%d: %d\n", mxc_spi->irq, ret); + dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); goto out_iounmap; } if (cpu_is_mx31() || cpu_is_mx35()) { - mxc_spi->intctrl = mx31_intctrl; - mxc_spi->config = mx31_config; - mxc_spi->trigger = mx31_trigger; - mxc_spi->rx_available = mx31_rx_available; + spi_imx->intctrl = mx31_intctrl; + spi_imx->config = mx31_config; + spi_imx->trigger = mx31_trigger; + spi_imx->rx_available = mx31_rx_available; } else if (cpu_is_mx27() || cpu_is_mx21()) { - mxc_spi->intctrl = mx27_intctrl; - mxc_spi->config = mx27_config; - mxc_spi->trigger = mx27_trigger; - mxc_spi->rx_available = mx27_rx_available; + spi_imx->intctrl = mx27_intctrl; + spi_imx->config = mx27_config; + spi_imx->trigger = mx27_trigger; + spi_imx->rx_available = mx27_rx_available; } else if (cpu_is_mx1()) { - mxc_spi->intctrl = mx1_intctrl; - mxc_spi->config = mx1_config; - mxc_spi->trigger = mx1_trigger; - mxc_spi->rx_available = mx1_rx_available; + spi_imx->intctrl = mx1_intctrl; + spi_imx->config = mx1_config; + spi_imx->trigger = mx1_trigger; + spi_imx->rx_available = mx1_rx_available; } else BUG(); - mxc_spi->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(mxc_spi->clk)) { + spi_imx->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(spi_imx->clk)) { dev_err(&pdev->dev, "unable to get clock\n"); - ret = PTR_ERR(mxc_spi->clk); + ret = PTR_ERR(spi_imx->clk); goto out_free_irq; } - clk_enable(mxc_spi->clk); - mxc_spi->spi_clk = clk_get_rate(mxc_spi->clk); + clk_enable(spi_imx->clk); + spi_imx->spi_clk = clk_get_rate(spi_imx->clk); if (!cpu_is_mx31() || !cpu_is_mx35()) - writel(1, mxc_spi->base + MXC_RESET); + writel(1, spi_imx->base + MXC_RESET); - mxc_spi->intctrl(mxc_spi, 0); + spi_imx->intctrl(spi_imx, 0); - ret = spi_bitbang_start(&mxc_spi->bitbang); + ret = spi_bitbang_start(&spi_imx->bitbang); if (ret) { dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); goto out_clk_put; @@ -611,18 +606,18 @@ static int __init mxc_spi_probe(struct platform_device *pdev) return ret; out_clk_put: - clk_disable(mxc_spi->clk); - clk_put(mxc_spi->clk); + clk_disable(spi_imx->clk); + clk_put(spi_imx->clk); out_free_irq: - free_irq(mxc_spi->irq, mxc_spi); + free_irq(spi_imx->irq, spi_imx); out_iounmap: - iounmap(mxc_spi->base); + iounmap(spi_imx->base); out_release_mem: release_mem_region(res->start, resource_size(res)); out_gpio_free: for (i = 0; i < master->num_chipselect; i++) - if (mxc_spi->chipselect[i] >= 0) - gpio_free(mxc_spi->chipselect[i]); + if (spi_imx->chipselect[i] >= 0) + gpio_free(spi_imx->chipselect[i]); out_master_put: spi_master_put(master); kfree(master); @@ -630,24 +625,24 @@ out_master_put: return ret; } -static int __exit mxc_spi_remove(struct platform_device *pdev) +static int __exit spi_imx_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - struct mxc_spi_data *mxc_spi = spi_master_get_devdata(master); + struct spi_imx_data *spi_imx = spi_master_get_devdata(master); int i; - spi_bitbang_stop(&mxc_spi->bitbang); + spi_bitbang_stop(&spi_imx->bitbang); - writel(0, mxc_spi->base + MXC_CSPICTRL); - clk_disable(mxc_spi->clk); - clk_put(mxc_spi->clk); - free_irq(mxc_spi->irq, mxc_spi); - iounmap(mxc_spi->base); + writel(0, spi_imx->base + MXC_CSPICTRL); + clk_disable(spi_imx->clk); + clk_put(spi_imx->clk); + free_irq(spi_imx->irq, spi_imx); + iounmap(spi_imx->base); for (i = 0; i < master->num_chipselect; i++) - if (mxc_spi->chipselect[i] >= 0) - gpio_free(mxc_spi->chipselect[i]); + if (spi_imx->chipselect[i] >= 0) + gpio_free(spi_imx->chipselect[i]); spi_master_put(master); @@ -658,27 +653,27 @@ static int __exit mxc_spi_remove(struct platform_device *pdev) return 0; } -static struct platform_driver mxc_spi_driver = { +static struct platform_driver spi_imx_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, - .probe = mxc_spi_probe, - .remove = __exit_p(mxc_spi_remove), + .probe = spi_imx_probe, + .remove = __exit_p(spi_imx_remove), }; -static int __init mxc_spi_init(void) +static int __init spi_imx_init(void) { - return platform_driver_register(&mxc_spi_driver); + return platform_driver_register(&spi_imx_driver); } -static void __exit mxc_spi_exit(void) +static void __exit spi_imx_exit(void) { - platform_driver_unregister(&mxc_spi_driver); + platform_driver_unregister(&spi_imx_driver); } -module_init(mxc_spi_init); -module_exit(mxc_spi_exit); +module_init(spi_imx_init); +module_exit(spi_imx_exit); MODULE_DESCRIPTION("SPI Master Controller driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index f921bd1109e..5d23983f02f 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -537,7 +537,7 @@ static int spidev_release(struct inode *inode, struct file *filp) return status; } -static struct file_operations spidev_fops = { +static const struct file_operations spidev_fops = { .owner = THIS_MODULE, /* REVISIT switch to aio primitives, so that userspace * gets more complete API coverage. It'll simplify things diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c index ac8577358ba..c24e4e0367a 100644 --- a/drivers/staging/dst/dcore.c +++ b/drivers/staging/dst/dcore.c @@ -102,7 +102,7 @@ static int dst_request(struct request_queue *q, struct bio *bio) struct dst_node *n = q->queuedata; int err = -EIO; - if (bio_empty_barrier(bio) && !q->prepare_discard_fn) { + if (bio_empty_barrier(bio) && !blk_queue_discard(q)) { /* * This is a dirty^Wnice hack, but if we complete this * operation with -EOPNOTSUPP like intended, XFS @@ -847,7 +847,7 @@ static dst_command_func dst_commands[] = { /* * Configuration parser. */ -static void cn_dst_callback(struct cn_msg *msg) +static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { struct dst_ctl *ctl; int err; @@ -855,6 +855,11 @@ static void cn_dst_callback(struct cn_msg *msg) struct dst_node *n = NULL, *tmp; unsigned int hash; + if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { + err = -EPERM; + goto out; + } + if (msg->len < sizeof(struct dst_ctl)) { err = -EBADMSG; goto out; diff --git a/drivers/staging/iio/light/tsl2561.c b/drivers/staging/iio/light/tsl2561.c index ea8a5efc19b..fc2107f4c04 100644 --- a/drivers/staging/iio/light/tsl2561.c +++ b/drivers/staging/iio/light/tsl2561.c @@ -239,10 +239,6 @@ static int __devexit tsl2561_remove(struct i2c_client *client) return tsl2561_powerdown(client); } -static unsigned short normal_i2c[] = { 0x29, 0x39, 0x49, I2C_CLIENT_END }; - -I2C_CLIENT_INSMOD; - static const struct i2c_device_id tsl2561_id[] = { { "tsl2561", 0 }, { } diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c index 90f962ee5fd..5d04bf5b021 100644 --- a/drivers/staging/pohmelfs/config.c +++ b/drivers/staging/pohmelfs/config.c @@ -527,10 +527,13 @@ out_unlock: return err; } -static void pohmelfs_cn_callback(struct cn_msg *msg) +static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { int err; + if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) + return; + switch (msg->flags) { case POHMELFS_FLAGS_ADD: case POHMELFS_FLAGS_DEL: diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 333ee02e7b2..864f0ba6a34 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -993,7 +993,7 @@ skip_io_on_zombie: return retval; } -static struct file_operations fops = { +static const struct file_operations fops = { .owner = THIS_MODULE, .read = usbtmc_read, .write = usbtmc_write, diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index c44367fea18..bf0f6520c6d 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -30,6 +30,7 @@ #include <linux/wait.h> #include <linux/compiler.h> #include <asm/uaccess.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/smp_lock.h> diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 29500154d00..2d867fd2241 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -875,7 +875,7 @@ printer_ioctl(struct file *fd, unsigned int code, unsigned long arg) } /* used after endpoint configuration */ -static struct file_operations printer_io_operations = { +static const struct file_operations printer_io_operations = { .owner = THIS_MODULE, .open = printer_open, .read = printer_read, diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c index cf2d45946c5..2273c815941 100644 --- a/drivers/usb/host/whci/debug.c +++ b/drivers/usb/host/whci/debug.c @@ -134,7 +134,7 @@ static int pzl_open(struct inode *inode, struct file *file) return single_open(file, pzl_print, inode->i_private); } -static struct file_operations di_fops = { +static const struct file_operations di_fops = { .open = di_open, .read = seq_read, .llseek = seq_lseek, @@ -142,7 +142,7 @@ static struct file_operations di_fops = { .owner = THIS_MODULE, }; -static struct file_operations asl_fops = { +static const struct file_operations asl_fops = { .open = asl_open, .read = seq_read, .llseek = seq_lseek, @@ -150,7 +150,7 @@ static struct file_operations asl_fops = { .owner = THIS_MODULE, }; -static struct file_operations pzl_fops = { +static const struct file_operations pzl_fops = { .open = pzl_open, .read = seq_read, .llseek = seq_lseek, diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index d645f3899fe..32d0199d0c3 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -429,8 +429,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) return read_count; } -static struct -file_operations usb_rio_fops = { +static const struct file_operations usb_rio_fops = { .owner = THIS_MODULE, .read = read_rio, .write = write_rio, diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c index 4a42993700c..2eecec0c13c 100644 --- a/drivers/uwb/uwb-debug.c +++ b/drivers/uwb/uwb-debug.c @@ -205,7 +205,7 @@ static ssize_t command_write(struct file *file, const char __user *buf, return ret < 0 ? ret : len; } -static struct file_operations command_fops = { +static const struct file_operations command_fops = { .open = command_open, .write = command_write, .read = NULL, @@ -255,7 +255,7 @@ static int reservations_open(struct inode *inode, struct file *file) return single_open(file, reservations_print, inode->i_private); } -static struct file_operations reservations_fops = { +static const struct file_operations reservations_fops = { .open = reservations_open, .read = seq_read, .llseek = seq_lseek, @@ -283,7 +283,7 @@ static int drp_avail_open(struct inode *inode, struct file *file) return single_open(file, drp_avail_print, inode->i_private); } -static struct file_operations drp_avail_fops = { +static const struct file_operations drp_avail_fops = { .open = drp_avail_open, .read = seq_read, .llseek = seq_lseek, diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 42e1005e291..d065894ce38 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -26,7 +26,6 @@ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/uaccess.h> -#include <linux/device.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <video/da8xx-fb.h> diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c index f2de5a1acd6..5c5a1ad1d39 100644 --- a/drivers/video/msm/mddi.c +++ b/drivers/video/msm/mddi.c @@ -27,8 +27,6 @@ #include <mach/msm_iomap.h> #include <mach/irqs.h> #include <mach/board.h> -#include <linux/delay.h> - #include <mach/msm_fb.h> #include "mddi_hw.h" diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c index d5e59556f9e..70dadf9d233 100644 --- a/drivers/video/omap/blizzard.c +++ b/drivers/video/omap/blizzard.c @@ -93,7 +93,7 @@ struct blizzard_reg_list { }; /* These need to be saved / restored separately from the rest. */ -static struct blizzard_reg_list blizzard_pll_regs[] = { +static const struct blizzard_reg_list blizzard_pll_regs[] = { { .start = 0x04, /* Don't save PLL ctrl (0x0C) */ .end = 0x0a, @@ -104,7 +104,7 @@ static struct blizzard_reg_list blizzard_pll_regs[] = { }, }; -static struct blizzard_reg_list blizzard_gen_regs[] = { +static const struct blizzard_reg_list blizzard_gen_regs[] = { { .start = 0x18, /* SDRAM control */ .end = 0x20, @@ -191,7 +191,7 @@ struct blizzard_struct { struct omapfb_device *fbdev; struct lcd_ctrl_extif *extif; - struct lcd_ctrl *int_ctrl; + const struct lcd_ctrl *int_ctrl; void (*power_up)(struct device *dev); void (*power_down)(struct device *dev); @@ -1372,7 +1372,7 @@ static void blizzard_get_caps(int plane, struct omapfb_caps *caps) (1 << OMAPFB_COLOR_YUV420); } -static void _save_regs(struct blizzard_reg_list *list, int cnt) +static void _save_regs(const struct blizzard_reg_list *list, int cnt) { int i; @@ -1383,7 +1383,7 @@ static void _save_regs(struct blizzard_reg_list *list, int cnt) } } -static void _restore_regs(struct blizzard_reg_list *list, int cnt) +static void _restore_regs(const struct blizzard_reg_list *list, int cnt) { int i; diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index 125e605b8c6..0d0c8c8b9b5 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c @@ -393,7 +393,7 @@ static void omapfb_sync(struct fb_info *fbi) * Set fb_info.fix fields and also updates fbdev. * When calling this fb_info.var must be set up already. */ -static void set_fb_fix(struct fb_info *fbi) +static void set_fb_fix(struct fb_info *fbi, int from_init) { struct fb_fix_screeninfo *fix = &fbi->fix; struct fb_var_screeninfo *var = &fbi->var; @@ -403,10 +403,16 @@ static void set_fb_fix(struct fb_info *fbi) rg = &plane->fbdev->mem_desc.region[plane->idx]; fbi->screen_base = rg->vaddr; - mutex_lock(&fbi->mm_lock); - fix->smem_start = rg->paddr; - fix->smem_len = rg->size; - mutex_unlock(&fbi->mm_lock); + + if (!from_init) { + mutex_lock(&fbi->mm_lock); + fix->smem_start = rg->paddr; + fix->smem_len = rg->size; + mutex_unlock(&fbi->mm_lock); + } else { + fix->smem_start = rg->paddr; + fix->smem_len = rg->size; + } fix->type = FB_TYPE_PACKED_PIXELS; bpp = var->bits_per_pixel; @@ -704,7 +710,7 @@ static int omapfb_set_par(struct fb_info *fbi) int r = 0; omapfb_rqueue_lock(fbdev); - set_fb_fix(fbi); + set_fb_fix(fbi, 0); r = ctrl_change_mode(fbi); omapfb_rqueue_unlock(fbdev); @@ -904,7 +910,7 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) if (old_size != size) { if (size) { memcpy(&fbi->var, new_var, sizeof(fbi->var)); - set_fb_fix(fbi); + set_fb_fix(fbi, 0); } else { /* * Set these explicitly to indicate that the @@ -1504,7 +1510,7 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info) var->bits_per_pixel = fbdev->panel->bpp; set_fb_var(info, var); - set_fb_fix(info); + set_fb_fix(info, 1); r = fb_alloc_cmap(&info->cmap, 16, 0); if (r != 0) diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index e98baf6916b..e35232a1857 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -67,11 +67,14 @@ static DEFINE_MUTEX(uvfb_lock); * find the kernel part of the task struct, copy the registers and * the buffer contents and then complete the task. */ -static void uvesafb_cn_callback(struct cn_msg *msg) +static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { struct uvesafb_task *utask; struct uvesafb_ktask *task; + if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) + return; + if (msg->seq >= UVESAFB_TASKS_MAX) return; diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c index df52cb355f7..406caa6a71c 100644 --- a/drivers/w1/masters/ds2482.c +++ b/drivers/w1/masters/ds2482.c @@ -24,19 +24,6 @@ #include "../w1_int.h" /** - * Address is selected using 2 pins, resulting in 4 possible addresses. - * 0x18, 0x19, 0x1a, 0x1b - * However, the chip cannot be detected without doing an i2c write, - * so use the force module parameter. - */ -static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; - -/** - * Insmod parameters - */ -I2C_CLIENT_INSMOD_1(ds2482); - -/** * The DS2482 registers - there are 3 registers that are addressed by a read * pointer. The read pointer is set by the last command executed. * @@ -96,8 +83,6 @@ static const u8 ds2482_chan_rd[8] = static int ds2482_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int ds2482_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); static int ds2482_remove(struct i2c_client *client); @@ -117,8 +102,6 @@ static struct i2c_driver ds2482_driver = { .probe = ds2482_probe, .remove = ds2482_remove, .id_table = ds2482_id, - .detect = ds2482_detect, - .address_data = &addr_data, }; /* @@ -425,19 +408,6 @@ static u8 ds2482_w1_reset_bus(void *data) } -static int ds2482_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) -{ - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_WRITE_BYTE_DATA | - I2C_FUNC_SMBUS_BYTE)) - return -ENODEV; - - strlcpy(info->type, "ds2482", I2C_NAME_SIZE); - - return 0; -} - static int ds2482_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -446,6 +416,11 @@ static int ds2482_probe(struct i2c_client *client, int temp1; int idx; + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WRITE_BYTE_DATA | + I2C_FUNC_SMBUS_BYTE)) + return -ENODEV; + if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) { err = -ENOMEM; goto exit; diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index 52ccb3d3a96..45c126fea31 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c @@ -306,7 +306,7 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm return error; } -static void w1_cn_callback(struct cn_msg *msg) +static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1); struct w1_netlink_cmd *cmd; diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c index a9592d981b1..6c4269b836b 100644 --- a/drivers/xen/xenfs/xenbus.c +++ b/drivers/xen/xenfs/xenbus.c @@ -43,6 +43,7 @@ #include <linux/fs.h> #include <linux/poll.h> #include <linux/mutex.h> +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mount.h> #include <linux/pagemap.h> |