diff options
Diffstat (limited to 'include')
32 files changed, 833 insertions, 55 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h new file mode 100644 index 00000000000..78b236ca04f --- /dev/null +++ b/include/linux/dmaengine.h @@ -0,0 +1,359 @@ +/* + * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef DMAENGINE_H +#define DMAENGINE_H +#include <linux/config.h> +#ifdef CONFIG_DMA_ENGINE + +#include <linux/device.h> +#include <linux/uio.h> +#include <linux/kref.h> +#include <linux/completion.h> +#include <linux/rcupdate.h> + +/** + * enum dma_event - resource PNP/power managment events + * @DMA_RESOURCE_SUSPEND: DMA device going into low power state + * @DMA_RESOURCE_RESUME: DMA device returning to full power + * @DMA_RESOURCE_ADDED: DMA device added to the system + * @DMA_RESOURCE_REMOVED: DMA device removed from the system + */ +enum dma_event { + DMA_RESOURCE_SUSPEND, + DMA_RESOURCE_RESUME, + DMA_RESOURCE_ADDED, + DMA_RESOURCE_REMOVED, +}; + +/** + * typedef dma_cookie_t + * + * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code + */ +typedef s32 dma_cookie_t; + +#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) + +/** + * enum dma_status - DMA transaction status + * @DMA_SUCCESS: transaction completed successfully + * @DMA_IN_PROGRESS: transaction not yet processed + * @DMA_ERROR: transaction failed + */ +enum dma_status { + DMA_SUCCESS, + DMA_IN_PROGRESS, + DMA_ERROR, +}; + +/** + * struct dma_chan_percpu - the per-CPU part of struct dma_chan + * @refcount: local_t used for open-coded "bigref" counting + * @memcpy_count: transaction counter + * @bytes_transferred: byte counter + */ + +struct dma_chan_percpu { + local_t refcount; + /* stats */ + unsigned long memcpy_count; + unsigned long bytes_transferred; +}; + +/** + * struct dma_chan - devices supply DMA channels, clients use them + * @client: ptr to the client user of this chan, will be NULL when unused + * @device: ptr to the dma device who supplies this channel, always !NULL + * @cookie: last cookie value returned to client + * @chan_id: + * @class_dev: + * @refcount: kref, used in "bigref" slow-mode + * @slow_ref: + * @rcu: + * @client_node: used to add this to the client chan list + * @device_node: used to add this to the device chan list + * @local: per-cpu pointer to a struct dma_chan_percpu + */ +struct dma_chan { + struct dma_client *client; + struct dma_device *device; + dma_cookie_t cookie; + + /* sysfs */ + int chan_id; + struct class_device class_dev; + + struct kref refcount; + int slow_ref; + struct rcu_head rcu; + + struct list_head client_node; + struct list_head device_node; + struct dma_chan_percpu *local; +}; + +void dma_chan_cleanup(struct kref *kref); + +static inline void dma_chan_get(struct dma_chan *chan) +{ + if (unlikely(chan->slow_ref)) + kref_get(&chan->refcount); + else { + local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); + put_cpu(); + } +} + +static inline void dma_chan_put(struct dma_chan *chan) +{ + if (unlikely(chan->slow_ref)) + kref_put(&chan->refcount, dma_chan_cleanup); + else { + local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); + put_cpu(); + } +} + +/* + * typedef dma_event_callback - function pointer to a DMA event callback + */ +typedef void (*dma_event_callback) (struct dma_client *client, + struct dma_chan *chan, enum dma_event event); + +/** + * struct dma_client - info on the entity making use of DMA services + * @event_callback: func ptr to call when something happens + * @chan_count: number of chans allocated + * @chans_desired: number of chans requested. Can be +/- chan_count + * @lock: protects access to the channels list + * @channels: the list of DMA channels allocated + * @global_node: list_head for global dma_client_list + */ +struct dma_client { + dma_event_callback event_callback; + unsigned int chan_count; + unsigned int chans_desired; + + spinlock_t lock; + struct list_head channels; + struct list_head global_node; +}; + +/** + * struct dma_device - info on the entity supplying DMA services + * @chancnt: how many DMA channels are supported + * @channels: the list of struct dma_chan + * @global_node: list_head for global dma_device_list + * @refcount: + * @done: + * @dev_id: + * Other func ptrs: used to make use of this device's capabilities + */ +struct dma_device { + + unsigned int chancnt; + struct list_head channels; + struct list_head global_node; + + struct kref refcount; + struct completion done; + + int dev_id; + + int (*device_alloc_chan_resources)(struct dma_chan *chan); + void (*device_free_chan_resources)(struct dma_chan *chan); + dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan, + void *dest, void *src, size_t len); + dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan, + struct page *page, unsigned int offset, void *kdata, + size_t len); + dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan, + struct page *dest_pg, unsigned int dest_off, + struct page *src_pg, unsigned int src_off, size_t len); + enum dma_status (*device_memcpy_complete)(struct dma_chan *chan, + dma_cookie_t cookie, dma_cookie_t *last, + dma_cookie_t *used); + void (*device_memcpy_issue_pending)(struct dma_chan *chan); +}; + +/* --- public DMA engine API --- */ + +struct dma_client *dma_async_client_register(dma_event_callback event_callback); +void dma_async_client_unregister(struct dma_client *client); +void dma_async_client_chan_request(struct dma_client *client, + unsigned int number); + +/** + * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses + * @chan: DMA channel to offload copy to + * @dest: destination address (virtual) + * @src: source address (virtual) + * @len: length + * + * Both @dest and @src must be mappable to a bus address according to the + * DMA mapping API rules for streaming mappings. + * Both @dest and @src must stay memory resident (kernel memory or locked + * user space pages) + */ +static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, + void *dest, void *src, size_t len) +{ + int cpu = get_cpu(); + per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; + per_cpu_ptr(chan->local, cpu)->memcpy_count++; + put_cpu(); + + return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len); +} + +/** + * dma_async_memcpy_buf_to_pg - offloaded copy + * @chan: DMA channel to offload copy to + * @page: destination page + * @offset: offset in page to copy to + * @kdata: source address (virtual) + * @len: length + * + * Both @page/@offset and @kdata must be mappable to a bus address according + * to the DMA mapping API rules for streaming mappings. + * Both @page/@offset and @kdata must stay memory resident (kernel memory or + * locked user space pages) + */ +static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, + struct page *page, unsigned int offset, void *kdata, size_t len) +{ + int cpu = get_cpu(); + per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; + per_cpu_ptr(chan->local, cpu)->memcpy_count++; + put_cpu(); + + return chan->device->device_memcpy_buf_to_pg(chan, page, offset, + kdata, len); +} + +/** + * dma_async_memcpy_buf_to_pg - offloaded copy + * @chan: DMA channel to offload copy to + * @dest_page: destination page + * @dest_off: offset in page to copy to + * @src_page: source page + * @src_off: offset in page to copy from + * @len: length + * + * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus + * address according to the DMA mapping API rules for streaming mappings. + * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident + * (kernel memory or locked user space pages) + */ +static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, + struct page *dest_pg, unsigned int dest_off, struct page *src_pg, + unsigned int src_off, size_t len) +{ + int cpu = get_cpu(); + per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; + per_cpu_ptr(chan->local, cpu)->memcpy_count++; + put_cpu(); + + return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off, + src_pg, src_off, len); +} + +/** + * dma_async_memcpy_issue_pending - flush pending copies to HW + * @chan: + * + * This allows drivers to push copies to HW in batches, + * reducing MMIO writes where possible. + */ +static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan) +{ + return chan->device->device_memcpy_issue_pending(chan); +} + +/** + * dma_async_memcpy_complete - poll for transaction completion + * @chan: DMA channel + * @cookie: transaction identifier to check status of + * @last: returns last completed cookie, can be NULL + * @used: returns last issued cookie, can be NULL + * + * If @last and @used are passed in, upon return they reflect the driver + * internal state and can be used with dma_async_is_complete() to check + * the status of multiple cookies without re-checking hardware state. + */ +static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan, + dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) +{ + return chan->device->device_memcpy_complete(chan, cookie, last, used); +} + +/** + * dma_async_is_complete - test a cookie against chan state + * @cookie: transaction identifier to test status of + * @last_complete: last know completed transaction + * @last_used: last cookie value handed out + * + * dma_async_is_complete() is used in dma_async_memcpy_complete() + * the test logic is seperated for lightweight testing of multiple cookies + */ +static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, + dma_cookie_t last_complete, dma_cookie_t last_used) +{ + if (last_complete <= last_used) { + if ((cookie <= last_complete) || (cookie > last_used)) + return DMA_SUCCESS; + } else { + if ((cookie <= last_complete) && (cookie > last_used)) + return DMA_SUCCESS; + } + return DMA_IN_PROGRESS; +} + + +/* --- DMA device --- */ + +int dma_async_device_register(struct dma_device *device); +void dma_async_device_unregister(struct dma_device *device); + +/* --- Helper iov-locking functions --- */ + +struct dma_page_list { + char *base_address; + int nr_pages; + struct page **pages; +}; + +struct dma_pinned_list { + int nr_iovecs; + struct dma_page_list page_list[0]; +}; + +struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); +void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); + +dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, + struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); +dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, + struct dma_pinned_list *pinned_list, struct page *page, + unsigned int offset, size_t len); + +#endif /* CONFIG_DMA_ENGINE */ +#endif /* DMAENGINE_H */ diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 28f4f3b3695..899c3d4776f 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -169,7 +169,7 @@ struct ip_sf_list struct ip_mc_list { struct in_device *interface; - unsigned long multiaddr; + __be32 multiaddr; struct ip_sf_list *sources; struct ip_sf_list *tomb; unsigned int sfmode; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f4169bbb60e..e432b743dda 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -37,6 +37,7 @@ #include <linux/config.h> #include <linux/device.h> #include <linux/percpu.h> +#include <linux/dmaengine.h> struct divert_blk; struct vlan_group; @@ -311,6 +312,9 @@ struct net_device #define NETIF_F_LLTX 4096 /* LockLess TX */ #define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ +#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) +#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) + struct net_device *next_sched; /* Interface index. Unique device identifier */ @@ -406,7 +410,7 @@ struct net_device * One part is mostly used on xmit path (device) */ /* hard_start_xmit synchronizer */ - spinlock_t xmit_lock ____cacheline_aligned_in_smp; + spinlock_t _xmit_lock ____cacheline_aligned_in_smp; /* cpu id of processor entered to hard_start_xmit or -1, if nobody entered there. */ @@ -593,6 +597,9 @@ struct softnet_data struct sk_buff *completion_queue; struct net_device backlog_dev; /* Sorry. 8) */ +#ifdef CONFIG_NET_DMA + struct dma_chan *net_dma; +#endif }; DECLARE_PER_CPU(struct softnet_data,softnet_data); @@ -889,11 +896,43 @@ static inline void __netif_rx_complete(struct net_device *dev) clear_bit(__LINK_STATE_RX_SCHED, &dev->state); } +static inline void netif_tx_lock(struct net_device *dev) +{ + spin_lock(&dev->_xmit_lock); + dev->xmit_lock_owner = smp_processor_id(); +} + +static inline void netif_tx_lock_bh(struct net_device *dev) +{ + spin_lock_bh(&dev->_xmit_lock); + dev->xmit_lock_owner = smp_processor_id(); +} + +static inline int netif_tx_trylock(struct net_device *dev) +{ + int err = spin_trylock(&dev->_xmit_lock); + if (!err) + dev->xmit_lock_owner = smp_processor_id(); + return err; +} + +static inline void netif_tx_unlock(struct net_device *dev) +{ + dev->xmit_lock_owner = -1; + spin_unlock(&dev->_xmit_lock); +} + +static inline void netif_tx_unlock_bh(struct net_device *dev) +{ + dev->xmit_lock_owner = -1; + spin_unlock_bh(&dev->_xmit_lock); +} + static inline void netif_tx_disable(struct net_device *dev) { - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); netif_stop_queue(dev); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); } /* These functions live elsewhere (drivers/net/net_init.c, but related) */ diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 3ff88c87830..d2e4bd7a7a1 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -69,6 +69,10 @@ enum ip_conntrack_status { /* Connection is dying (removed from lists), can not be unset. */ IPS_DYING_BIT = 9, IPS_DYING = (1 << IPS_DYING_BIT), + + /* Connection has fixed timeout. */ + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT), }; /* Connection tracking event bits */ diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index 668ec946c8e..b5883ccee29 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h @@ -27,13 +27,15 @@ enum ctattr_type { CTA_STATUS, CTA_PROTOINFO, CTA_HELP, - CTA_NAT, + CTA_NAT_SRC, +#define CTA_NAT CTA_NAT_SRC /* backwards compatibility */ CTA_TIMEOUT, CTA_MARK, CTA_COUNTERS_ORIG, CTA_COUNTERS_REPLY, CTA_USE, CTA_ID, + CTA_NAT_DST, __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) diff --git a/include/linux/netfilter/xt_CONNSECMARK.h b/include/linux/netfilter/xt_CONNSECMARK.h new file mode 100644 index 00000000000..c6bd75469ba --- /dev/null +++ b/include/linux/netfilter/xt_CONNSECMARK.h @@ -0,0 +1,13 @@ +#ifndef _XT_CONNSECMARK_H_target +#define _XT_CONNSECMARK_H_target + +enum { + CONNSECMARK_SAVE = 1, + CONNSECMARK_RESTORE, +}; + +struct xt_connsecmark_target_info { + u_int8_t mode; +}; + +#endif /*_XT_CONNSECMARK_H_target */ diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h new file mode 100644 index 00000000000..c53fbffa997 --- /dev/null +++ b/include/linux/netfilter/xt_SECMARK.h @@ -0,0 +1,26 @@ +#ifndef _XT_SECMARK_H_target +#define _XT_SECMARK_H_target + +/* + * This is intended for use by various security subsystems (but not + * at the same time). + * + * 'mode' refers to the specific security subsystem which the + * packets are being marked for. + */ +#define SECMARK_MODE_SEL 0x01 /* SELinux */ +#define SECMARK_SELCTX_MAX 256 + +struct xt_secmark_target_selinux_info { + u_int32_t selsid; + char selctx[SECMARK_SELCTX_MAX]; +}; + +struct xt_secmark_target_info { + u_int8_t mode; + union { + struct xt_secmark_target_selinux_info sel; + } u; +}; + +#endif /*_XT_SECMARK_H_target */ diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h new file mode 100644 index 00000000000..acd7fd77bbe --- /dev/null +++ b/include/linux/netfilter/xt_quota.h @@ -0,0 +1,16 @@ +#ifndef _XT_QUOTA_H +#define _XT_QUOTA_H + +enum xt_quota_flags { + XT_QUOTA_INVERT = 0x1, +}; +#define XT_QUOTA_MASK 0x1 + +struct xt_quota_info { + u_int32_t flags; + u_int32_t pad; + aligned_u64 quota; + struct xt_quota_info *master; +}; + +#endif /* _XT_QUOTA_H */ diff --git a/include/linux/netfilter/xt_statistic.h b/include/linux/netfilter/xt_statistic.h new file mode 100644 index 00000000000..c344e9916e2 --- /dev/null +++ b/include/linux/netfilter/xt_statistic.h @@ -0,0 +1,32 @@ +#ifndef _XT_STATISTIC_H +#define _XT_STATISTIC_H + +enum xt_statistic_mode { + XT_STATISTIC_MODE_RANDOM, + XT_STATISTIC_MODE_NTH, + __XT_STATISTIC_MODE_MAX +}; +#define XT_STATISTIC_MODE_MAX (__XT_STATISTIC_MODE_MAX - 1) + +enum xt_statistic_flags { + XT_STATISTIC_INVERT = 0x1, +}; +#define XT_STATISTIC_MASK 0x1 + +struct xt_statistic_info { + u_int16_t mode; + u_int16_t flags; + union { + struct { + u_int32_t probability; + } random; + struct { + u_int32_t every; + u_int32_t packet; + u_int32_t count; + } nth; + } u; + struct xt_statistic_info *master __attribute__((aligned(8))); +}; + +#endif /* _XT_STATISTIC_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h index d54d7b278e9..e0e9951eb8c 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack.h +++ b/include/linux/netfilter_ipv4/ip_conntrack.h @@ -121,6 +121,10 @@ struct ip_conntrack u_int32_t mark; #endif +#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK + u_int32_t secmark; +#endif + /* Traversed often, so hopefully in different cacheline to top */ /* These are my tuples; original and reply */ struct ip_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX]; @@ -154,6 +158,7 @@ struct ip_conntrack_expect unsigned int flags; #ifdef CONFIG_IP_NF_NAT_NEEDED + u_int32_t saved_ip; /* This is the original per-proto part, used to map the * expected connection the way the recipient expects. */ union ip_conntrack_manip_proto saved_proto; @@ -293,6 +298,7 @@ static inline int is_dying(struct ip_conntrack *ct) } extern unsigned int ip_conntrack_htable_size; +extern int ip_conntrack_checksum; #define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++) diff --git a/include/linux/netfilter_ipv4/ip_conntrack_h323.h b/include/linux/netfilter_ipv4/ip_conntrack_h323.h index eace86bd2ad..3cbff737900 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_h323.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_h323.h @@ -71,6 +71,13 @@ extern int (*nat_h245_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct, unsigned char **data, int dataoff, TransportAddress * addr, u_int16_t port, struct ip_conntrack_expect * exp); +extern int (*nat_callforwarding_hook) (struct sk_buff ** pskb, + struct ip_conntrack * ct, + enum ip_conntrack_info ctinfo, + unsigned char **data, int dataoff, + TransportAddress * addr, + u_int16_t port, + struct ip_conntrack_expect * exp); extern int (*nat_q931_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct, enum ip_conntrack_info ctinfo, unsigned char **data, TransportAddress * addr, diff --git a/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h b/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h index cc98f7aa5ab..3d4a773799f 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h @@ -1,4 +1,4 @@ -/* Generated by Jing Min Zhao's ASN.1 parser, Mar 15 2006 +/* Generated by Jing Min Zhao's ASN.1 parser, Apr 20 2006 * * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> * @@ -412,6 +412,7 @@ typedef struct Facility_UUIE { /* SEQUENCE */ eFacility_UUIE_destinationInfo = (1 << 14), eFacility_UUIE_h245SecurityMode = (1 << 13), } options; + TransportAddress alternativeAddress; FacilityReason reason; TransportAddress h245Address; Facility_UUIE_fastStart fastStart; diff --git a/include/linux/netfilter_ipv4/ip_conntrack_sip.h b/include/linux/netfilter_ipv4/ip_conntrack_sip.h new file mode 100644 index 00000000000..913dad66c0f --- /dev/null +++ b/include/linux/netfilter_ipv4/ip_conntrack_sip.h @@ -0,0 +1,44 @@ +#ifndef __IP_CONNTRACK_SIP_H__ +#define __IP_CONNTRACK_SIP_H__ +#ifdef __KERNEL__ + +#define SIP_PORT 5060 +#define SIP_TIMEOUT 3600 + +#define POS_VIA 0 +#define POS_CONTACT 1 +#define POS_CONTENT 2 +#define POS_MEDIA 3 +#define POS_OWNER 4 +#define POS_CONNECTION 5 +#define POS_REQ_HEADER 6 +#define POS_SDP_HEADER 7 + +struct sip_header_nfo { + const char *lname; + const char *sname; + const char *ln_str; + size_t lnlen; + size_t snlen; + size_t ln_strlen; + int (*match_len)(const char *, const char *, int *); +}; + +extern unsigned int (*ip_nat_sip_hook)(struct sk_buff **pskb, + enum ip_conntrack_info ctinfo, + struct ip_conntrack *ct, + const char **dptr); +extern unsigned int (*ip_nat_sdp_hook)(struct sk_buff **pskb, + enum ip_conntrack_info ctinfo, + struct ip_conntrack_expect *exp, + const char *dptr); + +extern int ct_sip_get_info(const char *dptr, size_t dlen, + unsigned int *matchoff, + unsigned int *matchlen, + struct sip_header_nfo *hnfo); +extern int ct_sip_lnlen(const char *line, const char *limit); +extern const char *ct_sip_search(const char *needle, const char *haystack, + size_t needle_len, size_t haystack_len); +#endif /* __KERNEL__ */ +#endif /* __IP_CONNTRACK_SIP_H__ */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c3fe769c912..bcfe9d4f56a 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1897,6 +1897,7 @@ #define PCI_DEVICE_ID_TIGON3_5751F 0x167e #define PCI_DEVICE_ID_TIGON3_5787M 0x1693 #define PCI_DEVICE_ID_TIGON3_5782 0x1696 +#define PCI_DEVICE_ID_TIGON3_5786 0x169a #define PCI_DEVICE_ID_TIGON3_5787 0x169b #define PCI_DEVICE_ID_TIGON3_5788 0x169c #define PCI_DEVICE_ID_TIGON3_5789 0x169d @@ -2053,6 +2054,7 @@ #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 +#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h index bac0fb389cf..d5dd471da22 100644 --- a/include/linux/pfkeyv2.h +++ b/include/linux/pfkeyv2.h @@ -159,7 +159,7 @@ struct sadb_spirange { struct sadb_x_kmprivate { uint16_t sadb_x_kmprivate_len; uint16_t sadb_x_kmprivate_exttype; - u_int32_t sadb_x_kmprivate_reserved; + uint32_t sadb_x_kmprivate_reserved; } __attribute__((packed)); /* sizeof(struct sadb_x_kmprivate) == 8 */ diff --git a/include/linux/security.h b/include/linux/security.h index 1bab48f6aea..4dfb1b84a9b 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -805,31 +805,37 @@ struct swap_info_struct; * used by the XFRM system. * @sec_ctx contains the security context information being provided by * the user-level policy update program (e.g., setkey). - * Allocate a security structure to the xp->selector.security field. + * Allocate a security structure to the xp->security field. * The security field is initialized to NULL when the xfrm_policy is * allocated. * Return 0 if operation was successful (memory to allocate, legal context) * @xfrm_policy_clone_security: * @old contains an existing xfrm_policy in the SPD. * @new contains a new xfrm_policy being cloned from old. - * Allocate a security structure to the new->selector.security field - * that contains the information from the old->selector.security field. + * Allocate a security structure to the new->security field + * that contains the information from the old->security field. * Return 0 if operation was successful (memory to allocate). * @xfrm_policy_free_security: * @xp contains the xfrm_policy - * Deallocate xp->selector.security. + * Deallocate xp->security. + * @xfrm_policy_delete_security: + * @xp contains the xfrm_policy. + * Authorize deletion of xp->security. * @xfrm_state_alloc_security: * @x contains the xfrm_state being added to the Security Association * Database by the XFRM system. * @sec_ctx contains the security context information being provided by * the user-level SA generation program (e.g., setkey or racoon). - * Allocate a security structure to the x->sel.security field. The + * Allocate a security structure to the x->security field. The * security field is initialized to NULL when the xfrm_state is * allocated. * Return 0 if operation was successful (memory to allocate, legal context). * @xfrm_state_free_security: * @x contains the xfrm_state. - * Deallocate x>sel.security. + * Deallocate x->security. + * @xfrm_state_delete_security: + * @x contains the xfrm_state. + * Authorize deletion of x->security. * @xfrm_policy_lookup: * @xp contains the xfrm_policy for which the access control is being * checked. @@ -1298,8 +1304,10 @@ struct security_operations { int (*xfrm_policy_alloc_security) (struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx); int (*xfrm_policy_clone_security) (struct xfrm_policy *old, struct xfrm_policy *new); void (*xfrm_policy_free_security) (struct xfrm_policy *xp); + int (*xfrm_policy_delete_security) (struct xfrm_policy *xp); int (*xfrm_state_alloc_security) (struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx); void (*xfrm_state_free_security) (struct xfrm_state *x); + int (*xfrm_state_delete_security) (struct xfrm_state *x); int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 sk_sid, u8 dir); #endif /* CONFIG_SECURITY_NETWORK_XFRM */ @@ -2934,11 +2942,21 @@ static inline void security_xfrm_policy_free(struct xfrm_policy *xp) security_ops->xfrm_policy_free_security(xp); } +static inline int security_xfrm_policy_delete(struct xfrm_policy *xp) +{ + return security_ops->xfrm_policy_delete_security(xp); +} + static inline int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) { return security_ops->xfrm_state_alloc_security(x, sec_ctx); } +static inline int security_xfrm_state_delete(struct xfrm_state *x) +{ + return security_ops->xfrm_state_delete_security(x); +} + static inline void security_xfrm_state_free(struct xfrm_state *x) { security_ops->xfrm_state_free_security(x); @@ -2963,6 +2981,11 @@ static inline void security_xfrm_policy_free(struct xfrm_policy *xp) { } +static inline int security_xfrm_policy_delete(struct xfrm_policy *xp) +{ + return 0; +} + static inline int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) { return 0; @@ -2972,6 +2995,11 @@ static inline void security_xfrm_state_free(struct xfrm_state *x) { } +static inline int security_xfrm_state_delete(struct xfrm_state *x) +{ + return 0; +} + static inline int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 sk_sid, u8 dir) { return 0; diff --git a/include/linux/selinux.h b/include/linux/selinux.h index 4047bcde448..aad4e390d6a 100644 --- a/include/linux/selinux.h +++ b/include/linux/selinux.h @@ -118,6 +118,27 @@ void selinux_get_ipc_sid(const struct kern_ipc_perm *ipcp, u32 *sid); */ void selinux_get_task_sid(struct task_struct *tsk, u32 *sid); +/** + * selinux_string_to_sid - map a security context string to a security ID + * @str: the security context string to be mapped + * @sid: ID value returned via this. + * + * Returns 0 if successful, with the SID stored in sid. A value + * of zero for sid indicates no SID could be determined (but no error + * occurred). + */ +int selinux_string_to_sid(char *str, u32 *sid); + +/** + * selinux_relabel_packet_permission - check permission to relabel a packet + * @sid: ID value to be applied to network packet (via SECMARK, most likely) + * + * Returns 0 if the current task is allowed to label packets with the + * supplied security ID. Note that it is implicit that the packet is always + * being relabeled from the default unlabled value, and that the access + * control decision is made in the AVC. + */ +int selinux_relabel_packet_permission(u32 sid); #else @@ -172,6 +193,17 @@ static inline void selinux_get_task_sid(struct task_struct *tsk, u32 *sid) *sid = 0; } +static inline int selinux_string_to_sid(const char *str, u32 *sid) +{ + *sid = 0; + return 0; +} + +static inline int selinux_relabel_packet_permission(u32 sid) +{ + return 0; +} + #endif /* CONFIG_SECURITY_SELINUX */ #endif /* _LINUX_SELINUX_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f8f234708b9..93e4db22158 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -29,6 +29,7 @@ #include <linux/net.h> #include <linux/textsearch.h> #include <net/checksum.h> +#include <linux/dmaengine.h> #define HAVE_ALLOC_SKB /* For the drivers to know */ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ @@ -209,6 +210,7 @@ enum { * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @tc_index: Traffic control index * @tc_verd: traffic control verdict + * @secmark: security marking */ struct sk_buff { @@ -285,6 +287,12 @@ struct sk_buff { __u16 tc_verd; /* traffic control verdict */ #endif #endif +#ifdef CONFIG_NET_DMA + dma_cookie_t dma_cookie; +#endif +#ifdef CONFIG_NETWORK_SECMARK + __u32 secmark; +#endif /* These elements must be at the end, see alloc_skb() for details. */ @@ -967,15 +975,16 @@ static inline void skb_reserve(struct sk_buff *skb, int len) #define NET_SKB_PAD 16 #endif -extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); +extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); static inline void __skb_trim(struct sk_buff *skb, unsigned int len) { - if (!skb->data_len) { - skb->len = len; - skb->tail = skb->data + len; - } else - ___pskb_trim(skb, len, 0); + if (unlikely(skb->data_len)) { + WARN_ON(1); + return; + } + skb->len = len; + skb->tail = skb->data + len; } /** @@ -985,6 +994,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) * * Cut the length of a buffer down by removing data from the tail. If * the buffer is already under the length specified it is not modified. + * The skb must be linear. */ static inline void skb_trim(struct sk_buff *skb, unsigned int len) { @@ -995,12 +1005,10 @@ static inline void skb_trim(struct sk_buff *skb, unsigned int len) static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) { - if (!skb->data_len) { - skb->len = len; - skb->tail = skb->data+len; - return 0; - } - return ___pskb_trim(skb, len, 1); + if (skb->data_len) + return ___pskb_trim(skb, len); + __skb_trim(skb, len); + return 0; } static inline int pskb_trim(struct sk_buff *skb, unsigned int len) @@ -1161,18 +1169,34 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i, return 0; } +static inline int __skb_linearize(struct sk_buff *skb) +{ + return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; +} + /** * skb_linearize - convert paged skb to linear one * @skb: buffer to linarize - * @gfp: allocation mode * * If there is no free memory -ENOMEM is returned, otherwise zero * is returned and the old skb data released. */ -extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp); -static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp) +static inline int skb_linearize(struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; +} + +/** + * skb_linearize_cow - make sure skb is linear and writable + * @skb: buffer to process + * + * If there is no free memory -ENOMEM is returned, otherwise zero + * is returned and the old skb data released. + */ +static inline int skb_linearize_cow(struct sk_buff *skb) { - return __skb_linearize(skb, gfp); + return skb_is_nonlinear(skb) || skb_cloned(skb) ? + __skb_linearize(skb) : 0; } /** @@ -1396,5 +1420,23 @@ static inline void nf_reset(struct sk_buff *skb) static inline void nf_reset(struct sk_buff *skb) {} #endif /* CONFIG_NETFILTER */ +#ifdef CONFIG_NETWORK_SECMARK +static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) +{ + to->secmark = from->secmark; +} + +static inline void skb_init_secmark(struct sk_buff *skb) +{ + skb->secmark = 0; +} +#else +static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) +{ } + +static inline void skb_init_secmark(struct sk_buff *skb) +{ } +#endif + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 76eaeff76f8..cee944dbdcd 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -313,6 +313,7 @@ enum NET_NF_CONNTRACK_FRAG6_TIMEOUT=29, NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30, NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31, + NET_NF_CONNTRACK_CHECKSUM=32, }; /* /proc/sys/net/ipv4 */ @@ -403,6 +404,8 @@ enum NET_TCP_MTU_PROBING=113, NET_TCP_BASE_MSS=114, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115, + NET_TCP_DMA_COPYBREAK=116, + NET_TCP_SLOW_START_AFTER_IDLE=117, }; enum { @@ -491,6 +494,7 @@ enum NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, NET_IPV4_NF_CONNTRACK_COUNT=27, + NET_IPV4_NF_CONNTRACK_CHECKSUM=28, }; /* /proc/sys/net/ipv6 */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 542d39596bd..c90daa5da6c 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -18,6 +18,7 @@ #define _LINUX_TCP_H #include <linux/types.h> +#include <linux/dmaengine.h> #include <asm/byteorder.h> struct tcphdr { @@ -233,6 +234,13 @@ struct tcp_sock { struct iovec *iov; int memory; int len; +#ifdef CONFIG_NET_DMA + /* members for async copy */ + struct dma_chan *dma_chan; + int wakeup; + struct dma_pinned_list *pinned_list; + dma_cookie_t dma_cookie; +#endif } ucopy; __u32 snd_wl1; /* Sequence for window update */ diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index 6b42cc474c0..46a15c7a1a1 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -118,6 +118,10 @@ enum XFRM_SHARE_UNIQUE /* Use once */ }; +#define XFRM_MODE_TRANSPORT 0 +#define XFRM_MODE_TUNNEL 1 +#define XFRM_MODE_MAX 2 + /* Netlink configuration messages. */ enum { XFRM_MSG_BASE = 0x10, diff --git a/include/net/ip.h b/include/net/ip.h index 3d2e5ca62a5..ead233c9540 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -147,7 +147,6 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar struct ipv4_config { int log_martians; - int autoconfig; int no_pmtu_disc; }; diff --git a/include/net/llc_if.h b/include/net/llc_if.h index 090eaa0d71f..c608812a8e8 100644 --- a/include/net/llc_if.h +++ b/include/net/llc_if.h @@ -16,6 +16,7 @@ #include <linux/if.h> #include <linux/if_arp.h> #include <linux/llc.h> +#include <linux/etherdevice.h> #include <net/llc.h> #define LLC_DATAUNIT_PRIM 1 @@ -61,8 +62,6 @@ #define LLC_STATUS_CONFLICT 7 /* disconnect conn */ #define LLC_STATUS_RESET_DONE 8 /* */ -extern u8 llc_mac_null_var[IFHWADDRLEN]; - /** * llc_mac_null - determines if a address is a null mac address * @mac: Mac address to test if null. @@ -70,16 +69,20 @@ extern u8 llc_mac_null_var[IFHWADDRLEN]; * Determines if a given address is a null mac address. Returns 0 if the * address is not a null mac, 1 if the address is a null mac. */ -static __inline__ int llc_mac_null(u8 *mac) +static inline int llc_mac_null(const u8 *mac) { - return !memcmp(mac, llc_mac_null_var, IFHWADDRLEN); + return is_zero_ether_addr(mac); } -static __inline__ int llc_addrany(struct llc_addr *addr) +static inline int llc_addrany(const struct llc_addr *addr) { return llc_mac_null(addr->mac) && !addr->lsap; } +static inline int llc_mac_multicast(const u8 *mac) +{ + return is_multicast_ether_addr(mac); +} /** * llc_mac_match - determines if two mac addresses are the same * @mac1: First mac address to compare. @@ -89,9 +92,9 @@ static __inline__ int llc_addrany(struct llc_addr *addr) * is not a complete match up to len, 1 if a complete match up to len is * found. */ -static __inline__ int llc_mac_match(u8 *mac1, u8 *mac2) +static inline int llc_mac_match(const u8 *mac1, const u8 *mac2) { - return !memcmp(mac1, mac2, IFHWADDRLEN); + return !compare_ether_addr(mac1, mac2); } extern int llc_establish_connection(struct sock *sk, u8 *lmac, diff --git a/include/net/netdma.h b/include/net/netdma.h new file mode 100644 index 00000000000..19760eb131a --- /dev/null +++ b/include/net/netdma.h @@ -0,0 +1,44 @@ +/* + * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef NETDMA_H +#define NETDMA_H +#include <linux/config.h> +#ifdef CONFIG_NET_DMA +#include <linux/dmaengine.h> +#include <linux/skbuff.h> + +static inline struct dma_chan *get_softnet_dma(void) +{ + struct dma_chan *chan; + rcu_read_lock(); + chan = rcu_dereference(__get_cpu_var(softnet_data.net_dma)); + if (chan) + dma_chan_get(chan); + rcu_read_unlock(); + return chan; +} + +int dma_skb_copy_datagram_iovec(struct dma_chan* chan, + const struct sk_buff *skb, int offset, struct iovec *to, + size_t len, struct dma_pinned_list *pinned_list); + +#endif /* CONFIG_NET_DMA */ +#endif /* NETDMA_H */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 916013ca4a5..41111781580 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -114,6 +114,10 @@ struct nf_conn u_int32_t mark; #endif +#ifdef CONFIG_NF_CONNTRACK_SECMARK + u_int32_t secmark; +#endif + /* Storage reserved for other modules: */ union nf_conntrack_proto proto; @@ -285,6 +289,7 @@ static inline int nf_ct_is_dying(struct nf_conn *ct) } extern unsigned int nf_conntrack_htable_size; +extern int nf_conntrack_checksum; #define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++) diff --git a/include/net/netfilter/nf_conntrack_compat.h b/include/net/netfilter/nf_conntrack_compat.h index 3cac19fb364..f1b1482d720 100644 --- a/include/net/netfilter/nf_conntrack_compat.h +++ b/include/net/netfilter/nf_conntrack_compat.h @@ -20,6 +20,19 @@ static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb, } #endif /* CONFIG_IP_NF_CONNTRACK_MARK */ +#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK +static inline u_int32_t *nf_ct_get_secmark(const struct sk_buff *skb, + u_int32_t *ctinfo) +{ + struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo); + + if (ct) + return &ct->secmark; + else + return NULL; +} +#endif /* CONFIG_IP_NF_CONNTRACK_SECMARK */ + #ifdef CONFIG_IP_NF_CT_ACCT static inline struct ip_conntrack_counter * nf_ct_get_counters(const struct sk_buff *skb) @@ -70,6 +83,19 @@ static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb, } #endif /* CONFIG_NF_CONNTRACK_MARK */ +#ifdef CONFIG_NF_CONNTRACK_SECMARK +static inline u_int32_t *nf_ct_get_secmark(const struct sk_buff *skb, + u_int32_t *ctinfo) +{ + struct nf_conn *ct = nf_ct_get(skb, ctinfo); + + if (ct) + return &ct->secmark; + else + return NULL; +} +#endif /* CONFIG_NF_CONNTRACK_MARK */ + #ifdef CONFIG_NF_CT_ACCT static inline struct ip_conntrack_counter * nf_ct_get_counters(const struct sk_buff *skb) diff --git a/include/net/raw.h b/include/net/raw.h index e67b28a0248..d83571fe4c6 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -36,7 +36,7 @@ extern rwlock_t raw_v4_lock; extern struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num, - unsigned long raddr, unsigned long laddr, + __be32 raddr, __be32 laddr, int dif); extern int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash); diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index aa6033ca7cd..b2b40f951ae 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -255,7 +255,7 @@ extern int sctp_debug_flag; #define SCTP_DEBUG_PRINTK_IPADDR(whatever...) #define SCTP_ENABLE_DEBUG #define SCTP_DISABLE_DEBUG -#define SCTP_ASSERT(expr, str, func) +#define SCTP_ASSERT(expr, str, func) BUG_ON(!(expr)) #endif /* SCTP_DEBUG */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 7f4fea173fb..5f69158c100 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -555,7 +555,8 @@ struct sctp_af { int (*to_addr_param) (const union sctp_addr *, union sctp_addr_param *); int (*addr_valid) (union sctp_addr *, - struct sctp_sock *); + struct sctp_sock *, + const struct sk_buff *); sctp_scope_t (*scope) (union sctp_addr *); void (*inaddr_any) (union sctp_addr *, unsigned short); int (*is_any) (const union sctp_addr *); diff --git a/include/net/sock.h b/include/net/sock.h index c9fad6fb629..96565ff0de6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -132,6 +132,7 @@ struct sock_common { * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_write_queue: Packet sending queue + * @sk_async_wait_queue: DMA copied packets * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward @@ -205,6 +206,7 @@ struct sock { atomic_t sk_omem_alloc; struct sk_buff_head sk_receive_queue; struct sk_buff_head sk_write_queue; + struct sk_buff_head sk_async_wait_queue; int sk_wmem_queued; int sk_forward_alloc; gfp_t sk_allocation; @@ -871,10 +873,7 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock) if (filter) { unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len); - if (!pkt_len) - err = -EPERM; - else - skb_trim(skb, pkt_len); + err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; } if (needlock) @@ -1271,11 +1270,22 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) * This routine must be called with interrupts disabled or with the socket * locked so that the sk_buff queue operation is ok. */ -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) +#ifdef CONFIG_NET_DMA +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) +{ + __skb_unlink(skb, &sk->sk_receive_queue); + if (!copied_early) + __kfree_skb(skb); + else + __skb_queue_tail(&sk->sk_async_wait_queue, skb); +} +#else +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } +#endif extern void sock_enable_timestamp(struct sock *sk); extern int sock_get_timestamp(struct sock *, struct timeval __user *); diff --git a/include/net/tcp.h b/include/net/tcp.h index 3c989db8a7a..bfc71f954bb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -28,6 +28,7 @@ #include <linux/cache.h> #include <linux/percpu.h> #include <linux/skbuff.h> +#include <linux/dmaengine.h> #include <net/inet_connection_sock.h> #include <net/inet_timewait_sock.h> @@ -218,6 +219,7 @@ extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_frto; extern int sysctl_tcp_low_latency; +extern int sysctl_tcp_dma_copybreak; extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_tso_win_divisor; @@ -225,6 +227,7 @@ extern int sysctl_tcp_abc; extern int sysctl_tcp_mtu_probing; extern int sysctl_tcp_base_mss; extern int sysctl_tcp_workaround_signed_windows; +extern int sysctl_tcp_slow_start_after_idle; extern atomic_t tcp_memory_allocated; extern atomic_t tcp_sockets_allocated; @@ -293,6 +296,8 @@ extern int tcp_rcv_established(struct sock *sk, extern void tcp_rcv_space_adjust(struct sock *sk); +extern void tcp_cleanup_rbuf(struct sock *sk, int copied); + extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); @@ -628,7 +633,7 @@ struct tcp_congestion_ops { /* return slow start threshold (required) */ u32 (*ssthresh)(struct sock *sk); /* lower bound for congestion window (optional) */ - u32 (*min_cwnd)(struct sock *sk); + u32 (*min_cwnd)(const struct sock *sk); /* do new cwnd calculation (required) */ void (*cong_avoid)(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, int good_ack); @@ -663,7 +668,7 @@ extern struct tcp_congestion_ops tcp_init_congestion_ops; extern u32 tcp_reno_ssthresh(struct sock *sk); extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, int flag); -extern u32 tcp_reno_min_cwnd(struct sock *sk); +extern u32 tcp_reno_min_cwnd(const struct sock *sk); extern struct tcp_congestion_ops tcp_reno; static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) @@ -817,6 +822,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) tp->ucopy.len = 0; tp->ucopy.memory = 0; skb_queue_head_init(&tp->ucopy.prequeue); +#ifdef CONFIG_NET_DMA + tp->ucopy.dma_chan = NULL; + tp->ucopy.wakeup = 0; + tp->ucopy.pinned_list = NULL; + tp->ucopy.dma_cookie = 0; +#endif } /* Packet is added to VJ-style prequeue for processing in process diff --git a/include/net/xfrm.h b/include/net/xfrm.h index afa508d92c9..9c5ee9f20b6 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -20,6 +20,8 @@ #include <net/ip6_fib.h> #define XFRM_ALIGN8(len) (((len) + 7) & ~7) +#define MODULE_ALIAS_XFRM_MODE(family, encap) \ + MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap)) extern struct sock *xfrm_nl; extern u32 sysctl_xfrm_aevent_etime; @@ -164,6 +166,7 @@ struct xfrm_state /* Reference to data common to all the instances of this * transformer. */ struct xfrm_type *type; + struct xfrm_mode *mode; /* Security context */ struct xfrm_sec_ctx *security; @@ -204,8 +207,8 @@ struct xfrm_type; struct xfrm_dst; struct xfrm_policy_afinfo { unsigned short family; - rwlock_t lock; - struct xfrm_type_map *type_map; + struct xfrm_type *type_map[IPPROTO_MAX]; + struct xfrm_mode *mode_map[XFRM_MODE_MAX]; struct dst_ops *dst_ops; void (*garbage_collect)(void); int (*dst_lookup)(struct xfrm_dst **dst, struct flowi *fl); @@ -232,7 +235,6 @@ extern int __xfrm_state_delete(struct xfrm_state *x); struct xfrm_state_afinfo { unsigned short family; - rwlock_t lock; struct list_head *state_bydst; struct list_head *state_byspi; int (*init_flags)(struct xfrm_state *x); @@ -264,16 +266,24 @@ struct xfrm_type u32 (*get_max_size)(struct xfrm_state *, int size); }; -struct xfrm_type_map { - rwlock_t lock; - struct xfrm_type *map[256]; -}; - extern int xfrm_register_type(struct xfrm_type *type, unsigned short family); extern int xfrm_unregister_type(struct xfrm_type *type, unsigned short family); extern struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family); extern void xfrm_put_type(struct xfrm_type *type); +struct xfrm_mode { + int (*input)(struct xfrm_state *x, struct sk_buff *skb); + int (*output)(struct sk_buff *skb); + + struct module *owner; + unsigned int encap; +}; + +extern int xfrm_register_mode(struct xfrm_mode *mode, int family); +extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family); +extern struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family); +extern void xfrm_put_mode(struct xfrm_mode *mode); + struct xfrm_tmpl { /* id in template is interpreted as: |