aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/completion.h18
-rw-r--r--include/linux/lguest.h4
-rw-r--r--include/linux/lguest_launcher.h24
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/sched.h37
6 files changed, 51 insertions, 42 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c811c8b979a..c68b67b86ef 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -101,6 +101,12 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#undef __must_check
#define __must_check
#endif
+#ifndef CONFIG_ENABLE_WARN_DEPRECATED
+#undef __deprecated
+#undef __deprecated_for_modules
+#define __deprecated
+#define __deprecated_for_modules
+#endif
/*
* Allow us to avoid 'defined but not used' warnings on functions and data,
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 268c5a4a2bd..33d6aaf9444 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -42,15 +42,15 @@ static inline void init_completion(struct completion *x)
init_waitqueue_head(&x->wait);
}
-extern void FASTCALL(wait_for_completion(struct completion *));
-extern int FASTCALL(wait_for_completion_interruptible(struct completion *x));
-extern unsigned long FASTCALL(wait_for_completion_timeout(struct completion *x,
- unsigned long timeout));
-extern unsigned long FASTCALL(wait_for_completion_interruptible_timeout(
- struct completion *x, unsigned long timeout));
-
-extern void FASTCALL(complete(struct completion *));
-extern void FASTCALL(complete_all(struct completion *));
+extern void wait_for_completion(struct completion *);
+extern int wait_for_completion_interruptible(struct completion *x);
+extern unsigned long wait_for_completion_timeout(struct completion *x,
+ unsigned long timeout);
+extern unsigned long wait_for_completion_interruptible_timeout(
+ struct completion *x, unsigned long timeout);
+
+extern void complete(struct completion *);
+extern void complete_all(struct completion *);
#define INIT_COMPLETION(x) ((x).done = 0)
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 8beb2913462..175e63f4a8c 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -12,8 +12,8 @@
#define LG_CLOCK_MAX_DELTA ULONG_MAX
/*G:032 The second method of communicating with the Host is to via "struct
- * lguest_data". The Guest's very first hypercall is to tell the Host where
- * this is, and then the Guest and Host both publish information in it. :*/
+ * lguest_data". Once the Guest's initialization hypercall tells the Host where
+ * this is, the Guest and Host both publish information in it. :*/
struct lguest_data
{
/* 512 == enabled (same as eflags in normal hardware). The Guest
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index 61e1e3e6b1c..697104da91f 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -1,17 +1,7 @@
-#ifndef _ASM_LGUEST_USER
-#define _ASM_LGUEST_USER
+#ifndef _LINUX_LGUEST_LAUNCHER
+#define _LINUX_LGUEST_LAUNCHER
/* Everything the "lguest" userspace program needs to know. */
#include <linux/types.h>
-/* They can register up to 32 arrays of lguest_dma. */
-#define LGUEST_MAX_DMA 32
-/* At most we can dma 16 lguest_dma in one op. */
-#define LGUEST_MAX_DMA_SECTIONS 16
-
-/* How many devices? Assume each one wants up to two dma arrays per device. */
-#define LGUEST_MAX_DEVICES (LGUEST_MAX_DMA/2)
-
-/* Where the Host expects the Guest to SEND_DMA console output to. */
-#define LGUEST_CONSOLE_DMA_KEY 0
/*D:010
* Drivers
@@ -20,7 +10,11 @@
* real devices (think of the damage it could do!) we provide virtual devices.
* We could emulate a PCI bus with various devices on it, but that is a fairly
* complex burden for the Host and suboptimal for the Guest, so we have our own
- * "lguest" bus and simple drivers.
+ * simple lguest bus and we use "virtio" drivers. These drivers need a set of
+ * routines from us which will actually do the virtual I/O, but they handle all
+ * the net/block/console stuff themselves. This means that if we want to add
+ * a new device, we simply need to write a new virtio driver and create support
+ * for it in the Launcher: this code won't need to change.
*
* Devices are described by a simplified ID, a status byte, and some "config"
* bytes which describe this device's configuration. This is placed by the
@@ -51,9 +45,9 @@ struct lguest_vqconfig {
/* Write command first word is a request. */
enum lguest_req
{
- LHREQ_INITIALIZE, /* + pfnlimit, pgdir, start, pageoffset */
+ LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */
LHREQ_GETDMA, /* No longer used */
LHREQ_IRQ, /* + irq */
LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */
};
-#endif /* _ASM_LGUEST_USER */
+#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4e10a074ca5..e44aac8cf5f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1236,6 +1236,10 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
+#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760
+#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761
+#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762
+#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763
#define PCI_VENDOR_ID_IMS 0x10e0
#define PCI_DEVICE_ID_IMS_TT128 0x9128
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 13df99fb276..24e08d1d900 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -828,12 +828,17 @@ struct sched_class {
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
+#ifdef CONFIG_SMP
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
- struct rq *busiest,
- unsigned long max_nr_move, unsigned long max_load_move,
+ struct rq *busiest, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio);
+ int (*move_one_task) (struct rq *this_rq, int this_cpu,
+ struct rq *busiest, struct sched_domain *sd,
+ enum cpu_idle_type idle);
+#endif
+
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p);
void (*task_new) (struct rq *rq, struct task_struct *p);
@@ -1196,7 +1201,7 @@ static inline int rt_prio(int prio)
return 0;
}
-static inline int rt_task(struct task_struct *p)
+static inline int rt_task(const struct task_struct *p)
{
return rt_prio(p->prio);
}
@@ -1211,22 +1216,22 @@ static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
tsk->signal->__pgrp = pgrp;
}
-static inline struct pid *task_pid(struct task_struct *task)
+static inline struct pid *task_pid(const struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
}
-static inline struct pid *task_tgid(struct task_struct *task)
+static inline struct pid *task_tgid(const struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PID].pid;
}
-static inline struct pid *task_pgrp(struct task_struct *task)
+static inline struct pid *task_pgrp(const struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PGID].pid;
}
-static inline struct pid *task_session(struct task_struct *task)
+static inline struct pid *task_session(const struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_SID].pid;
}
@@ -1255,7 +1260,7 @@ struct pid_namespace;
* see also pid_nr() etc in include/linux/pid.h
*/
-static inline pid_t task_pid_nr(struct task_struct *tsk)
+static inline pid_t task_pid_nr(const struct task_struct *tsk)
{
return tsk->pid;
}
@@ -1268,7 +1273,7 @@ static inline pid_t task_pid_vnr(struct task_struct *tsk)
}
-static inline pid_t task_tgid_nr(struct task_struct *tsk)
+static inline pid_t task_tgid_nr(const struct task_struct *tsk)
{
return tsk->tgid;
}
@@ -1281,7 +1286,7 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
}
-static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+static inline pid_t task_pgrp_nr(const struct task_struct *tsk)
{
return tsk->signal->__pgrp;
}
@@ -1294,7 +1299,7 @@ static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
}
-static inline pid_t task_session_nr(struct task_struct *tsk)
+static inline pid_t task_session_nr(const struct task_struct *tsk)
{
return tsk->signal->__session;
}
@@ -1321,7 +1326,7 @@ static inline pid_t task_ppid_nr_ns(struct task_struct *tsk,
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*/
-static inline int pid_alive(struct task_struct *p)
+static inline int pid_alive(const struct task_struct *p)
{
return p->pids[PIDTYPE_PID].pid != NULL;
}
@@ -1332,7 +1337,7 @@ static inline int pid_alive(struct task_struct *p)
*
* Check if a task structure is the first user space task the kernel created.
*/
-static inline int is_global_init(struct task_struct *tsk)
+static inline int is_global_init(const struct task_struct *tsk)
{
return tsk->pid == 1;
}
@@ -1469,7 +1474,7 @@ extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
#else
-static inline int rt_mutex_getprio(struct task_struct *p)
+static inline int rt_mutex_getprio(const struct task_struct *p)
{
return p->normal_prio;
}
@@ -1721,7 +1726,7 @@ extern void wait_task_inactive(struct task_struct * p);
* all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task.
*/
-static inline int has_group_leader_pid(struct task_struct *p)
+static inline int has_group_leader_pid(const struct task_struct *p)
{
return p->pid == p->tgid;
}
@@ -1738,7 +1743,7 @@ static inline struct task_struct *next_thread(const struct task_struct *p)
struct task_struct, thread_group);
}
-static inline int thread_group_empty(struct task_struct *p)
+static inline int thread_group_empty(const struct task_struct *p)
{
return list_empty(&p->thread_group);
}