diff -rNu linux-2.4.9-ac10/include/net/tux.h linux/include/net/tux.h --- linux-2.4.9-ac10/include/net/tux.h Thu Jan 1 01:00:00 1970 +++ linux/include/net/tux.h Mon Sep 10 16:18:08 2001 @@ -0,0 +1,744 @@ +#ifndef _NET_TUX_H +#define _NET_TUX_H + +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * tux.h: main structure definitions and function prototypes + */ + +#define __KERNEL_SYSCALLS__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +/* Maximum number of threads: */ +#define CONFIG_TUX_NUMTHREADS 8 + +/* Number of cachemiss/IO threads: */ +#define NR_IO_THREADS 8 + +/* Maximum number of listen sockets per thread: */ +#define CONFIG_TUX_NUMSOCKETS 4 + +extern unsigned int tux_listen [CONFIG_TUX_NUMTHREADS][CONFIG_TUX_NUMSOCKETS]; + +extern spinlock_t tux_module_lock; +extern struct module *tux_module; +extern int (*sys_tux_ptr) (unsigned int action, user_req_t *u_info); + +#undef Dprintk + +extern int tux_Dprintk; + +#define TUX_DEBUG CONFIG_TUX_DEBUG +#if CONFIG_TUX_DEBUG +# define TUX_BUG() BUG() + +# define INC_STAT(x) atomic_inc((atomic_t *)&kstat.x) +# define DEC_STAT(x) atomic_dec((atomic_t *)&kstat.x) +# define ADD_STAT(y,x) atomic_add(y,(atomic_t *)&kstat.x) +# define SUB_STAT(y,x) atomic_sub(y,(atomic_t *)&kstat.x) +# define TUX_DPRINTK 1 +# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0) +# define Dprintk(x...) do { if (tux_Dprintk == 1) TDprintk(x); } while (0) +#else +# define TUX_DPRINTK 0 +# define Dprintk(x...) do { } while (0) +# define TDprintk(x...) do { } while (0) +# define INC_STAT(x) do { } while (0) +# define DEC_STAT(x) do { } while (0) +# define ADD_STAT(x,y) do { } while (0) +# define SUB_STAT(x,y) do { } while (0) +# define TUX_BUG() do { } while (0) +#endif + +// lru needs this: + +# define DEBUG_DEL_LIST(x...) do { INIT_LIST_HEAD((x)); } while (0) + + +#define LOG_LEN (8*1024*1024UL) + +struct tux_req_struct; +typedef struct tux_req_struct tux_req_t; +typedef struct tux_threadinfo threadinfo_t; + +extern struct address_space_operations url_aops; + +typedef struct tcapi_template_s { + char *vfs_name; + struct list_head modules; + int (*query) (tux_req_t *req); + struct module *mod; + int userspace_id; +} tcapi_template_t; + +typedef struct mimetype_s { + struct list_head list; + + char *ext; + int ext_len; + char *type; + int type_len; + + int special; +} mimetype_t; + +typedef struct tux_attribute_s { + mimetype_t *mime; + tcapi_template_t *tcapi; +} tux_attribute_t; + +#define MAX_TUX_ATOMS 8 + +typedef void (atom_func_t)(tux_req_t *req, int cachemiss); + +typedef struct tux_proto_s +{ + int defer_accept; + void (*got_request) (tux_req_t *req); + int (*parse_message) (tux_req_t *req, const int total_len); + atom_func_t *illegal_request; + +} tux_proto_t; + +typedef struct abuf_s { + struct page *page; + char *buf; + unsigned int size; + unsigned int max_len; + unsigned int offset; + unsigned int left; + unsigned long flags; + int push; +} abuf_t; + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + unsigned short d_reclen; + unsigned char d_type; + char d_name[0]; +}; + +struct getdents_callback64 { + struct linux_dirent64 * current_dir; + struct linux_dirent64 * previous; + int count; + int error; +}; + +#define TUX_MAGIC 0x12457801 + +#define MAX_TUX_ATOMS 8 + +struct tux_req_struct +{ + tux_proto_t *proto; + + int atom_idx; + atom_func_t *atoms [MAX_TUX_ATOMS]; + struct list_head work; + + struct list_head all; + struct list_head free; + struct list_head lru; + + unsigned int idle_input; + unsigned int wait_output_space; + + struct socket *sock; + struct dentry *dentry; + struct file in_file; + int fd; + read_descriptor_t desc; + + unsigned int filelen; + int lendigits; + time_t mtime; + char etag [21]; + int etaglen; + + char usermode; + int usermodule_idx; + struct dentry *module_dentry; + char *userbuf; + unsigned int userlen; + + tux_attribute_t *attr; + + threadinfo_t *ti; + wait_queue_t sleep; + wait_queue_t ftp_sleep; + + struct sk_buff *input_skb; + + abuf_t abuf; + /* + * Parsed request fields. In-line strings are zero-delimited. + */ + + char *headers; + char *headers_buf; + int headers_len; + + int parsed_len; + + // FTP part + ftp_command_t ftp_command; + u32 ftp_user_addr; + u16 ftp_user_port; + + struct socket *ftp_data_sock; + unsigned int ftp_offset; + + // ls handing: + struct linux_dirent64 *ftp_dirp0; + unsigned int ftp_curroff, ftp_total; + +#define MAX_USERNAME_LEN 16 + char username[MAX_USERNAME_LEN]; + unsigned int username_len; + + // HTTP part + http_method_t method; + const char *method_str; + int method_len; + + http_version_t version; + const char *version_str; + int version_len; + + /* requested URI: */ + + const char *uri_str; + int uri_len; + + /* Objectname (filename/scriptname) this URI refers to: */ + +#define MAX_OBJECTNAME_LEN 256 + char objectname[MAX_OBJECTNAME_LEN + 4]; // space for .gz as well + unsigned int objectname_len; + + /* Query string within the URI: */ + + const char *query_str; + int query_len; + + /* Cookies: */ + + const char *cookies_str; + int cookies_len; + int parse_cookies; + + /* Content-TYpe */ + const char *content_type_str; + int content_type_len; + + /* Content-Length: */ + + const char *contentlen_str; + int contentlen_len; + int content_len; + + /* User-Agent: */ + + const char *user_agent_str; + int user_agent_len; + + /* Accept: */ + + const char *accept_str; + int accept_len; + + /* Accept-Charset: */ + + const char *accept_charset_str; + int accept_charset_len; + + /* Accept-Language: */ + + const char *accept_language_str; + int accept_language_len; + + /* Cache-Control: */ + + const char *cache_control_str; + int cache_control_len; + + /* If-Modified-Since: */ + + const char *if_modified_since_str; + int if_modified_since_len; + + /* If-None-Match: */ + const char *if_none_match_str; + int if_none_match_len; + + /* Negotiate: */ + + const char *negotiate_str; + int negotiate_len; + + /* Pragma: */ + + const char *pragma_str; + int pragma_len; + + /* Referer: */ + + const char *referer_str; + int referer_len; + + /* Accept-Encoding: */ + + const char *accept_encoding_str; + int accept_encoding_len; + int may_send_gzip; + int content_gzipped; + + /* Host */ + +#define MAX_HOST_LEN 128 + char host[MAX_HOST_LEN]; + int host_len; + + struct dentry *cwd; + struct vfsmount *cwdmnt; + + /* POSTed data: */ + + const char *post_data_str; + int post_data_len; + + int status; + + /* the file being sent */ + + int bytes_sent; +#if CONFIG_TUX_DEBUG + int bytes_expected; +#endif + unsigned long first_timestamp; + int body_len; + + char error; + char postponed; + + char had_cachemiss; + char lookup_dir; + char lookup_404; + + char keep_alive; + timer_t keepalive_timer; + unsigned int total_bytes; + timer_t output_timer; + + int nr_keepalives; + + int event; + void *private; + + unsigned int magic; + void (*real_data_ready)(struct sock *sk, int space); + void (*real_state_change)(struct sock *sk); + void (*real_write_space)(struct sock *sk); + void (*real_error_report)(struct sock *sk); + void (*real_destruct)(struct sock *sk); + + void (*ftp_real_data_ready)(struct sock *sk, int space); + void (*ftp_real_state_change)(struct sock *sk); + void (*ftp_real_write_space)(struct sock *sk); + void (*ftp_real_error_report)(struct sock *sk); + void (*ftp_real_destruct)(struct sock *sk); + +#if CONFIG_TUX_EXTENDED_LOG + unsigned long accept_timestamp; + unsigned long parse_timestamp; + unsigned long output_timestamp; + unsigned long flush_timestamp; +# define SET_TIMESTAMP(x) do { (x) = jiffies; } while (0) +#else +# define SET_TIMESTAMP(x) do { } while (0) +#endif + +}; + +extern void add_tux_atom (tux_req_t *req, atom_func_t *event_done); +extern void del_tux_atom (tux_req_t *req); +extern void tux_schedule_atom (tux_req_t *req, int cachemiss); +extern void add_req_to_workqueue (tux_req_t *req); + + +typedef struct iothread_s +{ + spinlock_t async_lock; + threadinfo_t *ti; + struct list_head async_queue; + wait_queue_head_t async_sleep; + int nr_async_pending; + int threads; + int shutdown; + wait_queue_head_t wait_shutdown; +} iothread_t; + +typedef struct tux_listen_s +{ + tux_proto_t *proto; + struct socket *sock; + int cloned; +} tux_listen_t; + +struct tux_threadinfo +{ + tux_req_t *userspace_req; + int started; + struct task_struct *thread; + iothread_t *iot; + wait_queue_t wait_event [CONFIG_TUX_NUMSOCKETS]; + wait_queue_t stop; + int pid; + + struct page *header_cache; + int header_offset; + + int nr_requests; + struct list_head all_requests; + + int nr_free_requests; + spinlock_t free_requests_lock; + struct list_head free_requests; + + spinlock_t work_lock; + struct list_head work_pending; + struct list_head lru; + unsigned int nr_lru; + + int listen_error; + tux_listen_t listen[CONFIG_TUX_NUMSOCKETS]; + + int cpu; + unsigned int __padding[16]; +}; + +typedef enum special_mimetypes { + NORMAL_MIME_TYPE, + MIME_TYPE_REDIRECT, + MIME_TYPE_CGI, + MIME_TYPE_MODULE, +} special_mimetypes_t; + +extern struct nameidata docroot; + +#if CONFIG_TUX_DEBUG +extern inline void url_hist_hit (int size) +{ + unsigned int idx = size/1024; + + if (idx >= URL_HIST_SIZE) + idx = URL_HIST_SIZE-1; + kstat.url_hist_hits[idx]++; +} +extern inline void url_hist_miss (int size) +{ + unsigned int idx = size/1024; + + if (idx >= URL_HIST_SIZE) + idx = URL_HIST_SIZE-1; + kstat.url_hist_misses[idx]++; +} +extern void __check_req_list (tux_req_t *req, struct list_head *list); +# define check_req_list __check_req_list +#else +# define url_hist_hit(size) do { } while (0) +# define url_hist_miss(size) do { } while (0) +# define check_req_list(req, list) do { } while (0) +#endif + + +extern char tux_docroot[200]; +extern char tux_logfile[200]; +extern char tux_cgiroot[200]; +extern char tux_404_page[200]; +extern char tux_default_vhost[200]; +extern int tux_cgi_uid; +extern int tux_cgi_gid; +extern int tux_clientport; +extern int tux_logging; +extern int tux_serverport; +extern int tux_threads; +extern int tux_keepalive_timeout; +extern int tux_max_output_bandwidth; +extern int tux_max_backlog; +extern int tux_max_connect; +extern int tux_mode_forbidden; +extern int tux_mode_allowed; +extern int tux_logentry_align_order; +extern int tux_nonagle; +extern int tux_ack_pingpong; +extern int tux_push_all; +extern int tux_zerocopy_parse; +extern int tux_generate_etags; +extern int tux_ip_logging; +extern int tux_ftp_wait_close; + +typedef enum virtual_server { + TUX_VHOST_NONE, + TUX_VHOST_HOST, + TUX_VHOST_IP, + TUX_VHOST_IP_HOST, +} virtual_server_t; + +extern int virtual_server; + +extern int tux_max_object_size; +extern unsigned int tux_max_free_requests; +extern int tux_defer_accept; + + +#if CONFIG_TUX_DEBUG +# undef FASTCALL +# define FASTCALL(x) x +#endif + +extern struct socket * FASTCALL(start_listening(u16 port, u32 addr, int defer, int nr)); +extern void FASTCALL(stop_listening(struct socket **sock)); +extern void FASTCALL(start_sysctl(void)); +extern void FASTCALL(end_sysctl(void)); +extern void flush_request (tux_req_t *req, int cachemiss); +extern void unlink_tux_socket (tux_req_t *req); +extern void unlink_tux_ftp_data_socket (tux_req_t *req); +extern void unlink_tux_listen_socket (tux_req_t *req); +extern void link_tux_ftp_accept_socket (tux_req_t *req, struct socket *sock); +extern void link_tux_ftp_data_socket (tux_req_t *req, struct socket *sock); +extern void FASTCALL(push_frames (tux_req_t *req)); +extern int FASTCALL(send_sync_buf (tux_req_t *req, struct socket *sock, const char *buf, const size_t length, unsigned long flags)); +extern void FASTCALL(__send_async_message (tux_req_t *req, const char *message, int status, int push)); +extern void FASTCALL(send_success (tux_req_t *req, struct socket *sock)); +extern void FASTCALL(send_async_err_not_found (tux_req_t *req)); +extern void FASTCALL(send_async_timed_out (tux_req_t *req)); + +extern void FASTCALL(kfree_req (tux_req_t *req)); +extern int FASTCALL(accept_requests (threadinfo_t *ti)); +extern int FASTCALL(process_requests (threadinfo_t *ti, tux_req_t **user_req)); +extern int FASTCALL(flush_freequeue (threadinfo_t * ti)); +extern int FASTCALL(flush_workqueue (threadinfo_t *ti)); +extern tux_req_t * FASTCALL(pick_userspace_req (threadinfo_t *ti)); +extern atom_func_t redirect_request; +extern atom_func_t parse_request; +extern int FASTCALL(accept_pending (threadinfo_t *ti)); +extern int FASTCALL(work_pending (threadinfo_t *ti)); +extern void FASTCALL(queue_cachemiss (tux_req_t *req)); +extern int FASTCALL(start_cachemiss_threads (threadinfo_t *ti)); +extern void FASTCALL(stop_cachemiss_threads (threadinfo_t *ti)); +struct file * FASTCALL(tux_open_file(char *filename, int mode)); +extern void FASTCALL(init_log_thread (void)); +extern void FASTCALL(stop_log_thread (void)); +extern void add_mimetype (char *new_ext, char *new_type); +extern void free_mimetypes (void); +extern int FASTCALL(lookup_url (tux_req_t *req, const unsigned int flag)); +extern int FASTCALL(handle_gzip_req (tux_req_t *req, unsigned int flags)); +extern struct dentry * FASTCALL(tux_lookup (tux_req_t *req, const char *filename, const unsigned int flag)); +extern tcapi_template_t * lookup_tuxmodule (const char *filename); +extern int register_tuxmodule (tcapi_template_t *tcapi); +extern tcapi_template_t * unregister_tuxmodule (char *vfs_name); +extern tcapi_template_t * get_first_usermodule (void); +extern int user_register_module (user_req_t *u_info); +extern int user_unregister_module (user_req_t *u_info); +extern void unregister_all_tuxmodules (void); + +typedef struct exec_param_s { + char *command; + char **argv; + char **envp; + int pipe_fds; +} exec_param_t; + +extern pid_t tux_exec_process (char *command, char **argv, char **envp, int pipe_fds, exec_param_t *param, int wait); + +extern void start_external_cgi (tux_req_t *req); +extern tcapi_template_t extcgi_tcapi; + +extern void queue_output_req (tux_req_t *req, threadinfo_t *ti); +extern void queue_userspace_req (tux_req_t *req, threadinfo_t *ti); + + +extern void FASTCALL(__log_request (tux_req_t *req)); +extern inline void log_request (tux_req_t *req) +{ + if (tux_logging) + __log_request(req); +} + +extern int FASTCALL(connection_too_fast (tux_req_t *req)); +extern void FASTCALL(trunc_headers (tux_req_t *req)); +extern int FASTCALL(generic_send_file (tux_req_t *req, int push, int nonblock, struct socket *sock)); +extern int FASTCALL(tux_fetch_file (tux_req_t *req, int nonblock)); + +extern void FASTCALL(postpone_request (tux_req_t *req)); +extern int FASTCALL(continue_request (int fd)); +extern void FASTCALL(tux_push_pending (struct sock *sk)); +extern void FASTCALL(zap_request (tux_req_t *req, int cachemiss)); +extern int FASTCALL(add_output_space_event (tux_req_t *req, struct socket *sock)); + +extern void reap_kids (void); +extern void unuse_frag (struct sk_buff *skb, skb_frag_t *frag); +extern skb_frag_t * build_dynbuf_frag (tux_req_t *req, int size); +extern int url_permission (struct inode *inode); +extern void flush_all_signals (void); + +extern int multifragment_api; + +extern int tux_print_retransmit; + +#define D() Dprintk("{%s:%d}\n", __FILE__, __LINE__) + +#define tux_sleep(n) \ + do { \ + current->state = TASK_INTERRUPTIBLE; \ + schedule_timeout(HZ * (n)); \ + } while (0) + +#define tux_file file + +#define tux_write_file(file, buf, len) \ + ({ unsigned int __ret; mm_segment_t oldmm = get_fs(); set_fs(KERNEL_DS); __ret = ((file)->f_op->write(file, buf, len, &(file)->f_pos)); set_fs(oldmm); __ret; }) + +#define tux_read_file(file, buf, len) \ + ({ unsigned int __ret; mm_segment_t oldmm = get_fs(); set_fs(KERNEL_DS); __ret = ((file)->f_op->read(file, buf, len, &(file)->f_pos)); set_fs(oldmm); __ret; }) + +#define tux_close_file(file) \ + (fput(file)) + +#define TUX_DECLARE_MUTEX DECLARE_MUTEX +#define tux_down down +#define tux_up up + +#define tux_time() CURRENT_TIME + +#define tux_direntry dentry +#define tux_direntry_open(d,r,fl) \ + ({ struct file *__f; lock_kernel(); __f = dentry_open(d,r,fl); unlock_kernel(); __f; }) +#define tux_lookup_direntry(f,r,fl) \ + ({ struct dentry *__d; lock_kernel(); __d = tux_lookup(f,r,fl); unlock_kernel(); __d; }) +#define tux_file_size(file) ((file)->f_dentry->d_inode->i_size) + +#define tux_mmap_page(file, virt, offset) \ +({ \ + struct page *page = NULL; \ + page = grab_cache_page((file)->f_dentry->d_inode->i_mapping, 0); \ + if (page) { \ + virt = (char *)kmap(page); \ + UnlockPage(page); \ + } \ + page; \ +}) + +#define tux_direntry_error(dentry) \ + (!(dentry) || IS_ERR(dentry) || !(dentry)->d_inode) +#define tux_dput(d) do { lock_kernel(); dput(d); unlock_kernel(); } while (0) +#define tux_mtime(dentry) \ + ((dentry)->d_inode->i_mtime) +#define tux_file_error(file) \ + ((!file) || !(file)->f_dentry || !(file)->f_dentry->d_inode) + +#define tux_getpid() (current->pid) +#define tux_client_addr(req) ((req)->sock->sk->daddr) + +#define tux_page page + +extern int nr_async_io_pending (void); + +extern void __add_keepalive_timer (tux_req_t *req); +#define add_keepalive_timer(req) \ +do { \ + if (tux_keepalive_timeout) { \ + Dprintk("add_keepalive_timer(%p).\n", (req)); \ + __add_keepalive_timer(req); \ + } \ +} while (0) +extern void __del_keepalive_timer (tux_req_t *req); +#define del_keepalive_timer(req) \ +do { \ + if (tux_keepalive_timeout) { \ + Dprintk("del_keepalive_timer(%p).\n", (req)); \ + __del_keepalive_timer(req); \ + } \ +} while (0) + +extern void del_output_timer (tux_req_t *req); +extern void output_timeout (tux_req_t *req); + +extern void print_req (tux_req_t *req); + +extern char tux_date [DATE_LEN]; + + +extern int nr_async_io_pending (void); +extern void tux_exit (void); +extern char * FASTCALL(get_abuf (tux_req_t *req, int max_size)); +extern void FASTCALL(send_abuf (tux_req_t *req, int size, unsigned long flags, int push)); + + +extern int idle_event (tux_req_t *req); +extern int output_space_event (tux_req_t *req); +extern unsigned int log_cpu_mask; +extern int tux_compression; +extern int tux_noid; +extern int tux_cgi_inherit_cpu; +extern int tux_zerocopy_header; +extern int tux_zerocopy_sendfile; +extern unsigned int tux_cgi_cpu_mask; +extern tux_proto_t tux_proto_http; +extern tux_proto_t tux_proto_ftp; +extern int tux_all_userspace; +extern int tux_redirect_logging; +extern int tux_referer_logging; +extern unsigned int tux_max_header_len; +extern int tux_application_protocol; + +extern void drop_permissions (void); +extern int query_extcgi (tux_req_t *req); +extern int tux_chroot (char *dir); + +extern void install_req_dentry (tux_req_t *req, struct dentry *dentry); +extern void release_req_dentry (tux_req_t *req); +extern void unidle_req (tux_req_t *req); +extern int nr_requests_used (void); + +#define req_err(req) do { (req)->error = 1; TDprintk("request %p error at %s:%d.\n", req, __FILE__, __LINE__); } while (0) + +#define enough_wspace(sk) (tcp_wspace(sk) >= tcp_min_write_space(sk)) +#define clear_keepalive(req) do { (req)->keep_alive = 0; Dprintk("keepalive cleared for req %p.\n", req); } while (0) + +extern int print_all_requests (threadinfo_t *ti); +extern int tux_max_keepalives; +extern int time_unix2ls (time_t zulu, char *buf); + +#endif diff -rNu linux-2.4.9-ac10/include/net/tux_u.h linux/include/net/tux_u.h --- linux-2.4.9-ac10/include/net/tux_u.h Thu Jan 1 01:00:00 1970 +++ linux/include/net/tux_u.h Mon Sep 10 16:18:08 2001 @@ -0,0 +1,163 @@ +#ifndef _NET_TUX_U_H +#define _NET_TUX_U_H + +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * tux_u.h: HTTP module API - HTTP interface to user-space + */ + +/* + * Different major versions are not compatible. + * Different minor versions are only downward compatible. + * Different patchlevel versions are downward and upward compatible. + */ +#define TUX_MAJOR_VERSION 2 +#define TUX_MINOR_VERSION 1 +#define TUX_PATCHLEVEL_VERSION 0 + +#define __KERNEL_SYSCALLS__ + +typedef enum http_versions { + HTTP_1_0, + HTTP_1_1 +} http_version_t; + +/* + * Request methods known to HTTP: + */ +typedef enum http_methods { + METHOD_NONE, + METHOD_GET, + METHOD_HEAD, + METHOD_POST, + METHOD_PUT, + NR_METHODS +} http_method_t; + +enum user_req { + TUX_ACTION_STARTUP = 1, + TUX_ACTION_SHUTDOWN = 2, + TUX_ACTION_STARTTHREAD = 3, + TUX_ACTION_STOPTHREAD = 4, + TUX_ACTION_EVENTLOOP = 5, + TUX_ACTION_GET_OBJECT = 6, + TUX_ACTION_SEND_OBJECT = 7, + TUX_ACTION_READ_OBJECT = 8, + TUX_ACTION_FINISH_REQ = 9, + TUX_ACTION_FINISH_CLOSE_REQ = 10, + TUX_ACTION_REGISTER_MODULE = 11, + TUX_ACTION_UNREGISTER_MODULE = 12, + TUX_ACTION_CURRENT_DATE = 13, + TUX_ACTION_REGISTER_MIMETYPE = 14, + TUX_ACTION_READ_HEADERS = 15, + TUX_ACTION_POSTPONE_REQ = 16, + TUX_ACTION_CONTINUE_REQ = 17, + TUX_ACTION_REDIRECT_REQ = 18, + TUX_ACTION_READ_POST_DATA = 19, + TUX_ACTION_SEND_BUFFER = 20, + MAX_TUX_ACTION +}; + +enum tux_ret { + TUX_ERROR = -1, + TUX_RETURN_USERSPACE_REQUEST = 0, + TUX_RETURN_EXIT = 1, + TUX_RETURN_SIGNAL = 2, + TUX_CONTINUE_EVENTLOOP = 3, +}; + +#define MAX_MODULENAME_LEN 16 +#define MAX_URI_LEN 256 +#define MAX_COOKIE_LEN 128 +#define MAX_FIELD_LEN 64 +#define DATE_LEN 30 + +typedef struct user_req_s { + int version_major; + int version_minor; + int version_patch; + + int http_version; + int http_method; + + int sock; + int event; + int thread_nr; + void *id; + void *priv; + + int http_status; + int bytes_sent; + char *object_addr; + int module_index; + char modulename[MAX_MODULENAME_LEN]; + + unsigned int client_host; + unsigned int objectlen; + char query[MAX_URI_LEN]; + char objectname[MAX_URI_LEN]; + + unsigned int cookies_len; + char cookies[MAX_COOKIE_LEN]; + + char content_type[MAX_FIELD_LEN]; + char user_agent[MAX_FIELD_LEN]; + char accept[MAX_FIELD_LEN]; + char accept_charset[MAX_FIELD_LEN]; + char accept_encoding[MAX_FIELD_LEN]; + char accept_language[MAX_FIELD_LEN]; + char cache_control[MAX_FIELD_LEN]; + char if_modified_since[MAX_FIELD_LEN]; + char negotiate[MAX_FIELD_LEN]; + char pragma[MAX_FIELD_LEN]; + char referer[MAX_FIELD_LEN]; + + char *post_data; + char new_date[DATE_LEN]; + + int keep_alive; +} user_req_t; + +typedef enum ftp_commands { + FTP_COMM_NONE, + FTP_COMM_USER, + FTP_COMM_PASS, + FTP_COMM_ACCT, + FTP_COMM_CWD, + FTP_COMM_CDUP, + FTP_COMM_SMNT, + FTP_COMM_QUIT, + FTP_COMM_REIN, + FTP_COMM_PORT, + FTP_COMM_PASV, + FTP_COMM_TYPE, + FTP_COMM_STRU, + FTP_COMM_MODE, + FTP_COMM_RETR, + FTP_COMM_STOR, + FTP_COMM_STOU, + FTP_COMM_APPE, + FTP_COMM_ALLO, + FTP_COMM_REST, + FTP_COMM_RNFR, + FTP_COMM_RNTO, + FTP_COMM_ABOR, + FTP_COMM_DELE, + FTP_COMM_RMD, + FTP_COMM_MKD, + FTP_COMM_PWD, + FTP_COMM_LIST, + FTP_COMM_NLST, + FTP_COMM_SITE, + FTP_COMM_SYST, + FTP_COMM_STAT, + FTP_COMM_HELP, + FTP_COMM_NOOP, + FTP_COMM_FEAT, + FTP_COMM_CLNT, +} ftp_command_t; + +#endif diff -rNu linux-2.4.9-ac10/net/tux/Config.in linux/net/tux/Config.in --- linux-2.4.9-ac10/net/tux/Config.in Thu Jan 1 01:00:00 1970 +++ linux/net/tux/Config.in Mon Sep 10 16:18:08 2001 @@ -0,0 +1,7 @@ +tristate ' Threaded linUX application protocol accelerator layer (TUX)' CONFIG_TUX +if [ "$CONFIG_TUX" = "y" -o "$CONFIG_TUX" = "m" ]; then + bool ' External CGI module' CONFIG_TUX_EXTCGI + bool ' extended TUX logging format' CONFIG_TUX_EXTENDED_LOG + bool ' debug TUX' CONFIG_TUX_DEBUG +fi + diff -rNu linux-2.4.9-ac10/net/tux/Makefile linux/net/tux/Makefile --- linux-2.4.9-ac10/net/tux/Makefile Thu Jan 1 01:00:00 1970 +++ linux/net/tux/Makefile Mon Sep 10 16:18:08 2001 @@ -0,0 +1,17 @@ +# +# Makefile for TUX +# + +O_TARGET := tux.o +MOD_LIST_NAME := NET_MODULES + +obj-y := accept.o input.o userspace.o cachemiss.o output.o \ + redirect.o postpone.o logger.o proto_http.o proto_ftp.o \ + proc.o main.o mod.o abuf.o times.o + +obj-$(CONFIG_TUX_EXTCGI) += cgi.o extcgi.o +obj-m := $(O_TARGET) + + +include $(TOPDIR)/Rules.make + diff -rNu linux-2.4.9-ac10/net/tux/abuf.c linux/net/tux/abuf.c --- linux-2.4.9-ac10/net/tux/abuf.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/abuf.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,175 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * abuf.c: async buffer-sending + */ + +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +char * get_abuf (tux_req_t *req, int max_size) +{ + threadinfo_t *ti = req->ti; + struct page *page; + char *buf; + unsigned int offset; + unsigned int left; + + if (req->abuf.page || req->abuf.buf || req->abuf.size) + TUX_BUG(); + + if (max_size > PAGE_SIZE) + BUG(); + offset = ti->header_offset; + if (offset > PAGE_SIZE) + TUX_BUG(); + left = PAGE_SIZE - offset; + page = ti->header_cache; + if ((left < max_size) || !page) { + page = alloc_pages(GFP_KERNEL, 0); + if (ti->header_cache) + __free_page(ti->header_cache); + ti->header_cache = page; + ti->header_offset = 0; + offset = 0; + } + buf = page_address(page) + offset; + + if (!page) + BUG(); + req->abuf.page = page; + req->abuf.buf = buf; + req->abuf.size = 0; + req->abuf.offset = offset; + req->abuf.flags = 0; + get_page(req->abuf.page); + + return buf; +} + +static void do_send_abuf (tux_req_t *req, int cachemiss); + +void send_abuf (tux_req_t *req, int size, unsigned long flags, int push) +{ + threadinfo_t *ti = req->ti; + + Dprintk("send_abuf(req: %p, sock: %p): %p(%p), size:%d, off:%d, flags:%08lx\n", req, req->sock, req->abuf.page, req->abuf.buf, size, req->abuf.offset, flags); + + ti->header_offset += size; + if (ti->header_offset > PAGE_SIZE) + TUX_BUG(); + if (req->abuf.offset + req->abuf.size > PAGE_SIZE) + TUX_BUG(); + + req->abuf.flags = flags | MSG_NOSIGNAL; + req->abuf.size = size; + req->abuf.push = push; + + add_tux_atom(req, do_send_abuf); +} + +static void do_send_abuf (tux_req_t *req, int cachemiss) +{ + int ret; + +repeat: + Dprintk("do_send_abuf(%p,%d): %p(%p), size:%d, off:%d, flags:%08lx\n", + req, cachemiss, + req->abuf.page, req->abuf.buf, req->abuf.size, + req->abuf.offset, req->abuf.flags); + + if (tux_zerocopy_header) + ret = tcp_sendpage(req->sock, req->abuf.page, + req->abuf.offset, req->abuf.size, req->abuf.flags); + else { + mm_segment_t oldmm; + oldmm = get_fs(); set_fs(KERNEL_DS); + ret = send_sync_buf(req, req->sock, req->abuf.buf, + req->abuf.size, req->abuf.flags); + set_fs(oldmm); + } + + + Dprintk("do_send_abuf: ret: %d\n", ret); + if (!ret) + TUX_BUG(); + + if (ret < 0) { + if (ret != -EAGAIN) { + TDprintk("ret: %d, req->error = 3.\n", ret); + req->error = 3; + req->atom_idx = 0; + req->in_file.f_pos = 0; + __free_page(req->abuf.page); + memset(&req->abuf, 0, sizeof(req->abuf)); + zap_request(req, cachemiss); + return; + } + add_tux_atom(req, do_send_abuf); + if (add_output_space_event(req, req->sock)) { + del_tux_atom(req); + goto repeat; + } + return; + } + + req->abuf.buf += ret; + req->abuf.offset += ret; + req->abuf.size -= ret; + + if (req->abuf.size < 0) + TUX_BUG(); + if (req->abuf.size > 0) + goto repeat; + + Dprintk("DONE do_send_abuf: %p(%p), size:%d, off:%d, flags:%08lx\n", + req->abuf.page, req->abuf.buf, req->abuf.size, + req->abuf.offset, req->abuf.flags); + + __free_page(req->abuf.page); + memset(&req->abuf, 0, sizeof(req->abuf)); + + if (req->abuf.push || tux_push_all) { + if (req->sock) + tux_push_pending(req->sock->sk); + if (req->ftp_data_sock) + tux_push_pending(req->ftp_data_sock->sk); + } + + add_req_to_workqueue(req); +} + +void __send_async_message (tux_req_t *req, const char *message, int status, int push) +{ + int size = strlen(message); + char *buf; + + Dprintk("TUX: sending %d reply (%d bytes)!\n", status, size); + Dprintk("request %p, reply: %s\n", req, message); + if (!size) + TUX_BUG(); + buf = get_abuf(req, size); + memcpy(buf, message, size); + + req->status = status; + send_abuf(req, size, MSG_DONTWAIT, push); + add_req_to_workqueue(req); +} diff -rNu linux-2.4.9-ac10/net/tux/accept.c linux/net/tux/accept.c --- linux-2.4.9-ac10/net/tux/accept.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/accept.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,848 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * accept.c: accept new connections, allocate requests + */ + +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +int tux_nonagle = 2; +int tux_ack_pingpong = 1; +int tux_push_all = 0; +int tux_zerocopy_parse = 1; + +static int __idle_event (tux_req_t *req); +static int __output_space_event (tux_req_t *req); + +struct socket * start_listening(u16 port, u32 addr, int defer, int nr) +{ + struct sockaddr_in sin; + struct socket *sock = NULL; + struct sock *sk; + struct tcp_opt *tp; + int err; + + /* Create a listening socket: */ + + err = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); + if (err < 0) { + printk(KERN_ERR "TUX: error %d creating socket.\n", err); + goto err; + } + + /* Bind the socket: */ + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = htonl(addr); + sin.sin_port = htons(port); + + sk = sock->sk; + sk->reuse = 1; + + + err = sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin)); + if (err < 0) { + printk(KERN_ERR "TUX: error %d binding socket. This means that probably some other process is (or was a short time ago) using addr %08x, port %i.\n", err, addr, port); + goto err; + } + + sk->urginline = 1; + + tp = &sk->tp_pinfo.af_tcp; + tp->ack.pingpong = tux_ack_pingpong; + + sk->linger = 0; + sk->lingertime = 0; + tp->linger2 = tux_keepalive_timeout * HZ; + + if (defer && !tux_keepalive_timeout && tux_defer_accept) + tp->defer_accept = 1; + + /* Now, start listening on the socket */ + + err = sock->ops->listen(sock, tux_max_backlog); + if (err) { + printk(KERN_ERR "TUX: error %d listening on socket.\n", err); + goto err; + } + +#define IP(n) ((unsigned char *)&addr)[n] + printk(KERN_NOTICE "TUX: thread %d listens on %d.%d.%d.%d:%d.\n", + nr, IP(3), IP(2), IP(1), IP(0), port); + return sock; + +err: + if (sock) + sock_release(sock); + return NULL; +} + +static inline void __kfree_req (tux_req_t *req, threadinfo_t * ti) +{ + list_del(&req->all); + DEBUG_DEL_LIST(&req->all); + ti->nr_requests--; + kfree(req); +} + +int flush_freequeue (threadinfo_t * ti) +{ + struct list_head *tmp; + unsigned long flags; + tux_req_t *req; + int count = 0; + + spin_lock_irqsave(&ti->free_requests_lock,flags); + while (ti->nr_free_requests) { + ti->nr_free_requests--; + tmp = ti->free_requests.next; + req = list_entry(tmp, tux_req_t, free); + list_del(tmp); + DEBUG_DEL_LIST(tmp); + DEC_STAT(nr_free_pending); + __kfree_req(req, ti); + count++; + } + spin_unlock_irqrestore(&ti->free_requests_lock,flags); + + return count; +} + +static tux_req_t * kmalloc_req (threadinfo_t * ti) +{ + struct list_head *tmp; + unsigned long flags; + tux_req_t *req; + + spin_lock_irqsave(&ti->free_requests_lock, flags); + if (ti->nr_free_requests) { + ti->nr_free_requests--; + tmp = ti->free_requests.next; + req = list_entry(tmp, tux_req_t, free); + list_del(tmp); + DEBUG_DEL_LIST(tmp); + DEC_STAT(nr_free_pending); + req->magic = TUX_MAGIC; + spin_unlock_irqrestore(&ti->free_requests_lock, flags); + } else { + spin_unlock_irqrestore(&ti->free_requests_lock, flags); + req = kmalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + ti->nr_requests++; + memset (req, 0, sizeof(*req)); + list_add(&req->all, &ti->all_requests); + } + req->magic = TUX_MAGIC; + INC_STAT(nr_allocated); + init_waitqueue_entry(&req->sleep, current); + init_waitqueue_entry(&req->ftp_sleep, current); + INIT_LIST_HEAD(&req->work); + INIT_LIST_HEAD(&req->free); + INIT_LIST_HEAD(&req->lru); + req->ti = ti; + req->total_bytes = 0; + SET_TIMESTAMP(req->accept_timestamp); + req->first_timestamp = jiffies; + req->fd = -1; + init_timer(&req->keepalive_timer); + init_timer(&req->output_timer); + + Dprintk("allocated NEW req %p.\n", req); + return req; +} + +void kfree_req (tux_req_t *req) +{ + threadinfo_t * ti = req->ti; + unsigned long flags; + + Dprintk("freeing req %p.\n", req); + + if (req->magic != TUX_MAGIC) + TUX_BUG(); + spin_lock_irqsave(&ti->free_requests_lock,flags); + req->magic = 0; + DEC_STAT(nr_allocated); + if (req->sock || req->dentry || req->private) + TUX_BUG(); + if (ti->nr_free_requests > tux_max_free_requests) + __kfree_req(req, ti); + else { + req->error = 0; + ti->nr_free_requests++; + + // the free requests queue is LIFO + list_add(&req->free, &ti->free_requests); + INC_STAT(nr_free_pending); + } + spin_unlock_irqrestore(&ti->free_requests_lock,flags); +} + +void __add_req_to_workqueue (tux_req_t *req) +{ + threadinfo_t *ti = req->ti; + + if (!list_empty(&req->work)) + TUX_BUG(); + Dprintk("work-queueing request %p at %p.\n", req, __builtin_return_address(0)); + if (connection_too_fast(req)) + list_add_tail(&req->work, &ti->work_pending); + else + list_add(&req->work, &ti->work_pending); + INC_STAT(nr_work_pending); + if (ti->thread != current) + wake_up_process(ti->thread); + return; +} + +void add_req_to_workqueue (tux_req_t *req) +{ + unsigned long flags; + threadinfo_t *ti = req->ti; + + spin_lock_irqsave(&ti->work_lock, flags); + __add_req_to_workqueue(req); + spin_unlock_irqrestore(&ti->work_lock, flags); +} + +void del_output_timer (tux_req_t *req) +{ +#if CONFIG_SMP + if (!spin_is_locked(&req->ti->work_lock)) + TUX_BUG(); +#endif + if (!list_empty(&req->lru)) { + list_del(&req->lru); + DEBUG_DEL_LIST(&req->lru); + req->ti->nr_lru--; + } + Dprintk("del output timeout for req %p.\n", req); + del_timer(&req->output_timer); +} + +static void output_timeout_fn (unsigned long data); + +#define OUTPUT_TIMEOUT HZ + +static void add_output_timer (tux_req_t *req) +{ + struct timer_list *timer = &req->output_timer; + + timer->data = (unsigned long) req; + timer->function = &output_timeout_fn; + mod_timer(timer, jiffies + OUTPUT_TIMEOUT); +} + +static void output_timeout_fn (unsigned long data) +{ + tux_req_t *req = (tux_req_t *)data; + + if (connection_too_fast(req)) { + add_output_timer(req); +// mod_timer(&req->output_timer, jiffies + OUTPUT_TIMEOUT); + return; + } + output_space_event(req); +} + +void output_timeout (tux_req_t *req) +{ + Dprintk("output timeout for req %p.\n", req); + if (test_and_set_bit(0, &req->wait_output_space)) + TUX_BUG(); + INC_STAT(nr_output_space_pending); + add_output_timer(req); +} + +void __del_keepalive_timer (tux_req_t *req) +{ +#if CONFIG_SMP + if (!spin_is_locked(&req->ti->work_lock)) + TUX_BUG(); +#endif + if (!list_empty(&req->lru)) { + list_del(&req->lru); + DEBUG_DEL_LIST(&req->lru); + req->ti->nr_lru--; + } + Dprintk("del keepalive timeout for req %p.\n", req); + del_timer(&req->keepalive_timer); +} + +static void keepalive_timeout_fn (unsigned long data) +{ + tux_req_t *req = (tux_req_t *)data; + +#if CONFIG_TUX_DEBUG + TDprintk("req %p timed out after %d sec!\n", req, tux_keepalive_timeout); + print_req(req); +#endif + TDprintk("req->error = 3!\n"); + req->error = 3; + if (!idle_event(req)) + output_space_event(req); +} + +void __add_keepalive_timer (tux_req_t *req) +{ + struct timer_list *timer = &req->keepalive_timer; + + if (!tux_keepalive_timeout) + TUX_BUG(); +#if CONFIG_SMP + if (!spin_is_locked(&req->ti->work_lock)) + TUX_BUG(); +#endif + + if (!list_empty(&req->lru)) + TUX_BUG(); + if (req->ti->nr_lru > tux_max_keepalives) { + struct list_head *head, *last; + tux_req_t *last_req; + + head = &req->ti->lru; + last = head->prev; + if (last == head) + TUX_BUG(); + last_req = list_entry(last, tux_req_t, lru); + list_del(last); + DEBUG_DEL_LIST(last); + req->ti->nr_lru--; + + Dprintk("LRU-aging req %p!\n", last_req); + last_req->error = 3; + if (!__idle_event(last_req)) + __output_space_event(last_req); + } + list_add(&req->lru, &req->ti->lru); + req->ti->nr_lru++; + + timer->expires = jiffies + tux_keepalive_timeout * HZ; + timer->data = (unsigned long) req; + timer->function = &keepalive_timeout_fn; + add_timer(timer); +} + +static int __output_space_event (tux_req_t *req) +{ + if (!req || (req->magic != TUX_MAGIC)) + TUX_BUG(); + + if (!test_and_clear_bit(0, &req->wait_output_space)) { + Dprintk("output space ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req); + return 0; + } + + Dprintk("output space ready event at <%p>, %p was waiting!\n", __builtin_return_address(0), req); + DEC_STAT(nr_output_space_pending); + + del_keepalive_timer(req); + del_output_timer(req); + + __add_req_to_workqueue(req); + return 1; +} + +int output_space_event (tux_req_t *req) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&req->ti->work_lock, flags); + ret = __output_space_event(req); + spin_unlock_irqrestore(&req->ti->work_lock, flags); + + return ret; +} + +static int __idle_event (tux_req_t *req) +{ + threadinfo_t *ti; + + if (!req || (req->magic != TUX_MAGIC)) + TUX_BUG(); + ti = req->ti; + + if (!test_and_clear_bit(0, &req->idle_input)) { + Dprintk("data ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req); + return 0; + } + + Dprintk("data ready event at <%p>, %p was idle!\n", __builtin_return_address(0), req); + del_keepalive_timer(req); + del_output_timer(req); + DEC_STAT(nr_idle_input_pending); + + req->sock->sk->tp_pinfo.af_tcp.ack.pingpong = tux_ack_pingpong; + SET_TIMESTAMP(req->accept_timestamp); + + __add_req_to_workqueue(req); + + return 1; +} + +int idle_event (tux_req_t *req) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&req->ti->work_lock, flags); + ret = __idle_event(req); + spin_unlock_irqrestore(&req->ti->work_lock, flags); + + return ret; +} + +#define HANDLE_CALLBACK(callback, tux_name, real_name, param...) \ + tux_req_t *req; \ + \ + read_lock(&sk->callback_lock); \ + req = sk->user_data; \ + \ + Dprintk("callback "#callback"(%p) req %p.\n", \ + sk->callback, req); \ + \ + if (!req) { \ + read_unlock(&sk->callback_lock); \ + if (sk->callback == tux_name) \ + TUX_BUG(); \ + if (sk->callback) \ + sk->callback(param); \ + return; \ + } \ + Dprintk(#tux_name"() on %p.\n", req); \ + if (req->magic != TUX_MAGIC) \ + TUX_BUG(); \ + if (req->real_name) \ + req->real_name(param); + +static void tux_data_ready (struct sock *sk, int len) +{ + HANDLE_CALLBACK(data_ready, tux_data_ready, real_data_ready, sk, len); + + if (!idle_event(req)) + output_space_event(req); + read_unlock(&sk->callback_lock); +} + +static void tux_write_space (struct sock *sk) +{ + HANDLE_CALLBACK(write_space, tux_write_space, real_write_space, sk); + + Dprintk("sk->wmem_queued: %d, sk->sndbuf: %d.\n", + sk->wmem_queued, sk->sndbuf); + + if (!idle_event(req)) + output_space_event(req); + read_unlock(&sk->callback_lock); +} + +static void tux_error_report (struct sock *sk) +{ + HANDLE_CALLBACK(error_report, tux_error_report, real_error_report, sk); + +#if CONFIG_TUX_DEBUG + // show_stack(NULL); +#endif + req->error = 3; + if (!idle_event(req)) + output_space_event(req); + read_unlock(&sk->callback_lock); +} + +static void tux_state_change (struct sock *sk) +{ + HANDLE_CALLBACK(state_change, tux_state_change, real_state_change, sk); + + if (req->sock && req->sock->sk && + (req->sock->sk->state > TCP_ESTABLISHED)) { + Dprintk("req %p changed to TCP non-established!\n", req); + Dprintk("req->sock: %p\n", req->sock); + if (req->sock) + Dprintk("req->sock->sk: %p\n", req->sock->sk); + if (req->sock && req->sock->sk) + Dprintk("TCP state: %d\n", req->sock->sk->state); + Dprintk("req->error = 3!\n"); + req->error = 3; + } + if (!idle_event(req)) + output_space_event(req); + read_unlock(&sk->callback_lock); +} + +static void tux_destruct (struct sock *sk) +{ + HANDLE_CALLBACK(destruct, tux_destruct, real_destruct, sk); + read_unlock(&sk->callback_lock); + BUG(); +} + +static void tux_ftp_data_ready (struct sock *sk, int len) +{ + HANDLE_CALLBACK(data_ready, tux_ftp_data_ready, + ftp_real_data_ready, sk, len); + + if (!idle_event(req)) + output_space_event(req); + read_unlock(&sk->callback_lock); +} + +static void tux_ftp_write_space (struct sock *sk) +{ + HANDLE_CALLBACK(write_space, tux_ftp_write_space, + ftp_real_write_space, sk); + + Dprintk("sk->wmem_queued: %d, sk->sndbuf: %d.\n", + sk->wmem_queued, sk->sndbuf); + + if (!idle_event(req)) + output_space_event(req); + read_unlock(&sk->callback_lock); +} + +static void tux_ftp_error_report (struct sock *sk) +{ + HANDLE_CALLBACK(error_report, tux_ftp_error_report, + ftp_real_error_report, sk); + + TDprintk("req %p got TCP errors on FTP data connection!\n", req); + TDprintk("req->error = 3!\n"); + req->error = 3; + if (!idle_event(req)) + output_space_event(req); + read_unlock(&sk->callback_lock); +} + +static void tux_ftp_state_change (struct sock *sk) +{ + HANDLE_CALLBACK(state_change, tux_ftp_state_change, + ftp_real_state_change, sk); + +#if 0 + if (req->ftp_data_sock && req->ftp_data_sock->sk && + (req->ftp_data_sock->sk->state != TCP_ESTABLISHED) && + (req->ftp_data_sock->sk->state != TCP_LISTEN)) { + Dprintk("req %p FTP data sock changed to TCP non-established!\n", req); + Dprintk("req->ftp_data_sock: %p\n", req->ftp_data_sock); + + TDprintk("req->error = 1!\n"); + req->error = 3; + } +#endif + if (req->sock && req->sock->sk && + (req->sock->sk->state > TCP_ESTABLISHED)) { + Dprintk("req %p FTP control sock changed to TCP non-established!\n", req); + Dprintk("req->sock: %p\n", req->sock); + + TDprintk("req->error = 3!\n"); + req->error = 3; + } + if (!idle_event(req)) + output_space_event(req); + read_unlock(&sk->callback_lock); +} + +static void tux_ftp_destruct (struct sock *sk) +{ + HANDLE_CALLBACK(destruct, tux_ftp_destruct, ftp_real_destruct, sk); + read_unlock(&sk->callback_lock); + BUG(); +} + +static void link_tux_socket (tux_req_t *req, struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (req->sock) + TUX_BUG(); + if (sk->destruct == tux_destruct) + TUX_BUG(); + /* + * (No need to lock the socket, we just want to + * make sure that events from now on go through + * tux_data_ready()) + */ + write_lock_irq(&sk->callback_lock); + req->sock = sock; + sk->user_data = req; + req->real_data_ready = xchg(&sk->data_ready, tux_data_ready); + req->real_state_change = xchg(&sk->state_change, tux_state_change); + req->real_write_space = xchg(&sk->write_space, tux_write_space); + req->real_error_report = xchg(&sk->error_report, tux_error_report); + req->real_destruct = xchg(&sk->destruct, tux_destruct); + write_unlock_irq(&sk->callback_lock); + if (req->real_destruct == tux_destruct) + TUX_BUG(); + + add_wait_queue(sk->sleep, &req->sleep); +} + +void link_tux_ftp_data_socket (tux_req_t *req, struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (req->ftp_data_sock) + TUX_BUG(); + if (sk->destruct == tux_ftp_destruct) + TUX_BUG(); + /* + * (No need to lock the socket, we just want to + * make sure that events from now on go through + * tux_data_ready()) + */ + write_lock_irq(&sk->callback_lock); + req->ftp_data_sock = sock; + sk->user_data = req; + req->ftp_real_data_ready = xchg(&sk->data_ready, tux_ftp_data_ready); + req->ftp_real_state_change = xchg(&sk->state_change, tux_ftp_state_change); + req->ftp_real_write_space = xchg(&sk->write_space, tux_ftp_write_space); + req->ftp_real_error_report = xchg(&sk->error_report, tux_ftp_error_report); + req->ftp_real_destruct = xchg(&sk->destruct, tux_ftp_destruct); + write_unlock_irq(&sk->callback_lock); + if (req->ftp_real_destruct == tux_ftp_destruct) + TUX_BUG(); + + add_wait_queue(sk->sleep, &req->ftp_sleep); +} + +void link_tux_ftp_accept_socket (tux_req_t *req, struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (req->ftp_data_sock) + TUX_BUG(); + if (sk->destruct != tux_ftp_destruct) + TUX_BUG(); + if (sk->data_ready != tux_ftp_data_ready) + TUX_BUG(); + /* + * (No need to lock the socket, we just want to + * make sure that events from now on go through + * tux_data_ready()) + */ + write_lock_irq(&sk->callback_lock); + req->ftp_data_sock = sock; + sk->user_data = req; + xchg(&sk->data_ready, tux_ftp_data_ready); + xchg(&sk->state_change, tux_ftp_state_change); + xchg(&sk->write_space, tux_ftp_write_space); + xchg(&sk->error_report, tux_ftp_error_report); + xchg(&sk->destruct, tux_ftp_destruct); + write_unlock_irq(&sk->callback_lock); + + if (req->ftp_real_destruct == tux_ftp_destruct) + TUX_BUG(); + + add_wait_queue(sk->sleep, &req->ftp_sleep); +} + +void unlink_tux_socket (tux_req_t *req) +{ + struct sock *sk; + + if (!req->sock || !req->sock->sk) + return; + sk = req->sock->sk; + + if (!sk->user_data) + TUX_BUG(); + if (req->real_destruct == tux_destruct) + TUX_BUG(); + + write_lock_irq(&sk->callback_lock); + xchg(&sk->data_ready, req->real_data_ready); + xchg(&sk->state_change, req->real_state_change); + xchg(&sk->write_space, req->real_write_space); + xchg(&sk->error_report, req->real_error_report); + xchg(&sk->destruct, req->real_destruct); + xchg(&sk->user_data, NULL); + write_unlock_irq(&sk->callback_lock); + + if (sk->destruct == tux_destruct) + TUX_BUG(); + + remove_wait_queue(sk->sleep, &req->sleep); +} + +void unlink_tux_ftp_data_socket (tux_req_t *req) +{ + struct sock *sk; + + if (!req->ftp_data_sock || !req->ftp_data_sock->sk) + return; + sk = req->ftp_data_sock->sk; + + if (req->real_destruct == tux_ftp_destruct) + TUX_BUG(); + + write_lock_irq(&sk->callback_lock); + xchg(&sk->data_ready, req->ftp_real_data_ready); + xchg(&sk->state_change, req->ftp_real_state_change); + xchg(&sk->write_space, req->ftp_real_write_space); + xchg(&sk->error_report, req->ftp_real_error_report); + xchg(&sk->destruct, req->ftp_real_destruct); + xchg(&sk->user_data, NULL); + write_unlock_irq(&sk->callback_lock); + + if (sk->destruct == tux_ftp_destruct) + TUX_BUG(); + + remove_wait_queue(sk->sleep, &req->ftp_sleep); +} + +void unlink_tux_listen_socket (tux_req_t *req) +{ + struct sock *sk; + + if (!req->ftp_data_sock || !req->ftp_data_sock->sk) + return; + sk = req->ftp_data_sock->sk; + + if (req->real_destruct == tux_ftp_destruct) + TUX_BUG(); + + write_lock_irq(&sk->callback_lock); + sk->data_ready = req->ftp_real_data_ready; + sk->state_change = req->ftp_real_state_change; + sk->write_space = req->ftp_real_write_space; + sk->error_report = req->ftp_real_error_report; + sk->destruct = req->ftp_real_destruct; + xchg(&sk->user_data, NULL); + write_unlock_irq(&sk->callback_lock); + + if (sk->destruct == tux_ftp_destruct) + TUX_BUG(); + + remove_wait_queue(sk->sleep, &req->ftp_sleep); +} + +void add_tux_atom (tux_req_t *req, atom_func_t *atom) +{ + Dprintk("adding TUX atom %p to req %p, atom_idx: %d, at %p.\n", + atom, req, req->atom_idx, __builtin_return_address(0)); + if (req->atom_idx == MAX_TUX_ATOMS) + TUX_BUG(); + req->atoms[req->atom_idx] = atom; + req->atom_idx++; +} + +void del_tux_atom (tux_req_t *req) +{ + if (!req->atom_idx) + TUX_BUG(); + req->atom_idx--; + Dprintk("removing TUX atom %p to req %p, atom_idx: %d, at %p.\n", + req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0)); +} + +void tux_schedule_atom (tux_req_t *req, int cachemiss) +{ + if (!list_empty(&req->work)) + TUX_BUG(); + if (!req->atom_idx) + TUX_BUG(); + req->atom_idx--; + Dprintk("DONE TUX atom %p to req %p, atom_idx: %d, at %p.\n", + req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0)); + req->atoms[req->atom_idx](req, cachemiss); +} + +/* + * Puts newly accepted connections into the inputqueue. This is the + * first step in the life of a TUX request. + */ +int accept_requests (threadinfo_t *ti) +{ + int count = 0, last_count = 0, error, socknr = 0; + struct socket *sock, *new_sock; + struct tcp_opt *tp1, *tp2; + tux_req_t *req; + + if (ti->nr_requests > tux_max_connect) + goto out; + +repeat: + for (socknr = 0; socknr < CONFIG_TUX_NUMSOCKETS; socknr++) { + tux_listen_t *tux_listen; + + tux_listen = ti->listen + socknr; + sock = tux_listen->sock; + if (!sock) + break; + if (current->need_resched) + break; + + tp1 = &sock->sk->tp_pinfo.af_tcp; + /* + * Quick test to see if there are connections on the queue. + * This is cheaper than accept() itself because this saves us + * the allocation of a new socket. (Which doesn't seem to be + * used anyway) + */ + if (tp1->accept_queue) { + tux_proto_t *proto; + + if (!count++) + __set_task_state(current, TASK_RUNNING); + + new_sock = sock_alloc(); + if (!new_sock) + goto out; + + new_sock->type = sock->type; + new_sock->ops = sock->ops; + + error = sock->ops->accept(sock, new_sock, O_NONBLOCK); + if (error < 0) + goto err; + if (new_sock->sk->state != TCP_ESTABLISHED) + goto err; + + tp2 = &new_sock->sk->tp_pinfo.af_tcp; + tp2->nonagle = tux_nonagle; + tp2->ack.pingpong = tux_ack_pingpong; + new_sock->sk->reuse = 1; + new_sock->sk->urginline = 1; + + /* Allocate a request-entry for the connection */ + req = kmalloc_req(ti); + if (!req) + BUG(); + link_tux_socket(req, new_sock); + + proto = req->proto = tux_listen->proto; + + proto->got_request(req); + } + } + if (count != last_count) { + last_count = count; + goto repeat; + } +out: + return count; +err: + sock_release(new_sock); + goto out; +} + diff -rNu linux-2.4.9-ac10/net/tux/cachemiss.c linux/net/tux/cachemiss.c --- linux-2.4.9-ac10/net/tux/cachemiss.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/cachemiss.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,258 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * cachemiss.c: handle the 'slow IO path' by queueing not-yet-cached + * requests to the IO-thread pool. Dynamic load balancing is done + * between IO threads, based on the number of requests they have pending. + */ + +#include +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +void queue_cachemiss (tux_req_t *req) +{ + iothread_t *iot = req->ti->iot; + + if (req->idle_input || req->wait_output_space) + TUX_BUG(); + req->had_cachemiss = 1; + if (!list_empty(&req->work)) + TUX_BUG(); + spin_lock(&iot->async_lock); + if (connection_too_fast(req)) + list_add_tail(&req->work, &iot->async_queue); + else + list_add(&req->work, &iot->async_queue); + iot->nr_async_pending++; + INC_STAT(nr_cachemiss_pending); + spin_unlock(&iot->async_lock); + + wake_up(&iot->async_sleep); +} + +static tux_req_t * get_cachemiss (iothread_t *iot) +{ + struct list_head *tmp; + tux_req_t *req = NULL; + + spin_lock(&iot->async_lock); + if (!list_empty(&iot->async_queue)) { + + tmp = iot->async_queue.next; + req = list_entry(tmp, tux_req_t, work); + + list_del(tmp); + DEBUG_DEL_LIST(tmp); + iot->nr_async_pending--; + DEC_STAT(nr_cachemiss_pending); + + if (req->ti->iot != iot) + TUX_BUG(); + } + spin_unlock(&iot->async_lock); + return req; +} + +struct file * tux_open_file (char *filename, int mode) +{ + struct file *filp; + + if (!filename) + TUX_BUG(); + + /* Rule no. 3 -- Does the file exist ? */ + + filp = filp_open(filename, mode, 0600); + + if (IS_ERR(filp) || !filp || !filp->f_dentry) + goto err; + +out: + return filp; +err: + Dprintk("filp_open() error: %d.\n", (int)filp); + filp = NULL; + goto out; +} + +static int cachemiss_thread (void *data) +{ + tux_req_t *req; + struct k_sigaction *ka; + DECLARE_WAITQUEUE(wait, current); + iothread_t *iot = data; + int nr = iot->ti->cpu, wake_up; + + Dprintk("iot %p/%p got started.\n", iot, current); +// daemonize(); + tux_chroot(tux_docroot); + drop_permissions(); + + spin_lock(&iot->async_lock); + iot->threads++; + sprintf(current->comm, "async IO %d/%d", nr, iot->threads); + +#if CONFIG_SMP + { + unsigned int mask; + + mask = 1 << nr; + if (cpu_online_map & mask) + current->cpus_allowed = mask; + } +#endif + + spin_lock_irq(¤t->sigmask_lock); + ka = current->sig->action + SIGCHLD-1; + ka->sa.sa_handler = SIG_IGN; + siginitsetinv(¤t->blocked, sigmask(SIGCHLD)); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + spin_unlock(&iot->async_lock); + + for (;;) { + while (!list_empty(&iot->async_queue) && + (req = get_cachemiss(iot))) { + + tux_schedule_atom(req, 1); + if (signal_pending(current)) { + flush_all_signals(); + while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0) + /* nothing */; + } + } + if (signal_pending(current)) { + flush_all_signals(); + while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0) + /* nothing */; + } + if (!list_empty(&iot->async_queue)) + continue; + if (iot->shutdown) { + Dprintk("iot %p/%p got shutdown!\n", iot, current); + break; + } + add_wait_queue_exclusive(&iot->async_sleep, &wait); + __set_current_state(TASK_INTERRUPTIBLE); + if (list_empty(&iot->async_queue)) { + Dprintk("iot %p/%p going to sleep.\n", iot, current); + schedule(); + Dprintk("iot %p/%p got woken up.\n", iot, current); + } + __set_current_state(TASK_RUNNING); + remove_wait_queue(&iot->async_sleep, &wait); + } + wake_up = 0; + spin_lock(&iot->async_lock); + if (!--iot->threads) + wake_up = 1; + spin_unlock(&iot->async_lock); + Dprintk("iot %p/%p has finished shutdown!\n", iot, current); + if (wake_up) { + Dprintk("iot %p/%p waking up master.\n", iot, current); + wake_up(&iot->wait_shutdown); + } + + return 0; +} + +static void __stop_cachemiss_threads (iothread_t *iot) +{ + DECLARE_WAITQUEUE(wait, current); + + Dprintk("stopping async IO threads %p.\n", iot); + add_wait_queue(&iot->wait_shutdown, &wait); + + spin_lock(&iot->async_lock); + if (iot->shutdown) + TUX_BUG(); + if (!iot->threads) + TUX_BUG(); + iot->shutdown = 1; + wake_up_all(&iot->async_sleep); + spin_unlock(&iot->async_lock); + + __set_current_state(TASK_UNINTERRUPTIBLE); + Dprintk("waiting for async IO threads %p to exit.\n", iot); + schedule(); + remove_wait_queue(&iot->wait_shutdown, &wait); + + if (iot->threads) + TUX_BUG(); + if (iot->nr_async_pending) + TUX_BUG(); + Dprintk("stopped async IO threads %p.\n", iot); +} + +void stop_cachemiss_threads (threadinfo_t *ti) +{ + iothread_t *iot = ti->iot; + + if (!iot) + TUX_BUG(); + if (iot->nr_async_pending) + TUX_BUG(); + __stop_cachemiss_threads(iot); + ti->iot = NULL; + kfree(iot); +} + +int start_cachemiss_threads (threadinfo_t *ti) +{ + int i, pid; + + iothread_t *iot; + + iot = kmalloc(sizeof(*iot), GFP_KERNEL); + if (!iot) + return -ENOMEM; + memset(iot, 0, sizeof(*iot)); + + iot->ti = ti; + iot->async_lock = SPIN_LOCK_UNLOCKED; + iot->nr_async_pending = 0; + INIT_LIST_HEAD(&iot->async_queue); + init_waitqueue_head(&iot->async_sleep); + init_waitqueue_head(&iot->wait_shutdown); + + for (i = 0; i < NR_IO_THREADS; i++) { + pid = kernel_thread(cachemiss_thread, (void *)iot, 0); + if (pid < 0) { + printk(KERN_ERR "TUX: error %d creating IO thread!\n", + pid); + __stop_cachemiss_threads(iot); + kfree(iot); + return pid; + } + } + ti->iot = iot; + /* + * Wait for all cachemiss threads to start up: + */ + while (iot->threads != NR_IO_THREADS) { + __set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/10); + } + return 0; +} + diff -rNu linux-2.4.9-ac10/net/tux/cgi.c linux/net/tux/cgi.c --- linux-2.4.9-ac10/net/tux/cgi.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/cgi.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,211 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * cgi.c: user-space CGI (and other) code execution. + */ + +#define __KERNEL_SYSCALLS__ + +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +/* + * Define our own execve() syscall - the unistd.h one uses + * errno which is not an exported symbol. (but removing it + * breaks old userspace tools.) + */ +#ifdef CONFIG_X86 + +static int tux_execve (const char *file, char **argv, char **envp) +{ + int ret; + __asm__ volatile ("int $0x80" + : "=a" (ret) + : "0" (__NR_execve), + "b" ((int)file), + "c" ((int)argv), + "d" ((long)envp)); + return ret; +} + +#else +#ifdef CONFIG_ALPHA + +static int tux_execve (const char *arg1, char **arg2, char **arg3) +{ + long _sc_ret, _sc_err; + { + register long _sc_0 __asm__("$0"); + register long _sc_16 __asm__("$16"); + register long _sc_17 __asm__("$17"); + register long _sc_18 __asm__("$18"); + register long _sc_19 __asm__("$19"); + + _sc_0 = __NR_execve; + _sc_16 = (long) (arg1); + _sc_17 = (long) (arg2); + _sc_18 = (long) (arg3); + __asm__("callsys # %0 %1 %2 %3 %4 %5" + : "=r"(_sc_0), "=r"(_sc_19) + : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), + "r"(_sc_18) + : _syscall_clobbers); + _sc_ret = _sc_0, _sc_err = _sc_19; + } + return _sc_err; +} + +#else +# define tux_execve execve +#endif +#endif + +static int exec_usermode(char *program_path, char *argv[], char *envp[]) +{ + int i, err; + + err = tux_chroot(tux_cgiroot); + if (err) { + printk(KERN_ERR "TUX: CGI chroot returned %d, /proc/sys/net/tux/cgiroot is probably set up incorrectly! Aborting CGI execution.\n", err); + return err; + } + + /* Allow execve args to be in kernel space. */ + set_fs(KERNEL_DS); + + spin_lock_irq(¤t->sigmask_lock); + flush_signals(current); + flush_signal_handlers(current); + spin_unlock_irq(¤t->sigmask_lock); + + for (i = 3; i < current->files->max_fds; i++ ) + if (current->files->fd[i]) + sys_close(i); + + err = tux_execve(program_path, argv, envp); + if (err < 0) + return err; + return 0; +} + +static int exec_helper (void * data) +{ + exec_param_t *param = data; + char **tmp; + int ret; + + current->flags &= ~PF_ATOMICALLOC; + sprintf(current->comm,"doexec - %i", current->pid); +#if CONFIG_SMP + if (!tux_cgi_inherit_cpu) { + unsigned int mask = cpu_online_map & tux_cgi_cpu_mask; + + if (mask) + current->cpus_allowed = mask; + else + current->cpus_allowed = cpu_online_map; + } +#endif + + if (!param) + TUX_BUG(); + Dprintk("doing exec(%s).\n", param->command); + + Dprintk("argv: "); + tmp = param->argv; + while (*tmp) { + Dprintk("{%s} ", *tmp); + tmp++; + } + Dprintk("\n"); + Dprintk("envp: "); + tmp = param->envp; + while (*tmp) { + Dprintk("{%s} ", *tmp); + tmp++; + } + Dprintk("\n"); + /* + * Set up stdin, stdout and stderr of the external + * CGI application. + */ + if (param->pipe_fds) { + sys_close(1); + sys_close(2); + sys_close(4); + if (sys_dup(3) != 1) + TUX_BUG(); + if (sys_dup(5) != 2) + TUX_BUG(); + sys_close(3); + sys_close(5); + // do not close on exec. + sys_fcntl(0, F_SETFD, 0); + sys_fcntl(1, F_SETFD, 0); + sys_fcntl(2, F_SETFD, 0); + } + ret = exec_usermode(param->command, param->argv, param->envp); + if (ret < 0) + Dprintk("bug: exec() returned %d.\n", ret); + else + Dprintk("exec()-ed successfully!\n"); + return 0; +} + +pid_t tux_exec_process (char *command, char **argv, + char **envp, int pipe_fds, + exec_param_t *param, int wait) +{ + exec_param_t param_local; + pid_t pid; + int ret = 0; + struct k_sigaction *ka; + + ka = current->sig->action + SIGCHLD-1; + ka->sa.sa_handler = SIG_IGN; + + if (!param && wait) + param = ¶m_local; + + param->command = command; + param->argv = argv; + param->envp = envp; + param->pipe_fds = pipe_fds; + +repeat_fork: + pid = kernel_thread(exec_helper, (void*) param, CLONE_SIGHAND|SIGCHLD); + Dprintk("kernel thread created PID %d.\n", pid); + if (pid < 0) { + printk(KERN_ERR "TUX: could not create new CGI kernel thread due to %d... retrying.\n", pid); + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(HZ); + goto repeat_fork; + } + if (wait) { +repeat: + reap_kids(); + ret = sys_wait4(pid, NULL, __WALL, NULL); + Dprintk("sys_wait4 returned %d.\n", ret); + if (ret == -ERESTARTSYS) + goto repeat; + } + return pid; +} diff -rNu linux-2.4.9-ac10/net/tux/extcgi.c linux/net/tux/extcgi.c --- linux-2.4.9-ac10/net/tux/extcgi.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/extcgi.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,325 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * extcgi.c: dynamic TUX module which forks and starts an external CGI + */ + +#define __KERNEL_SYSCALLS__ + +#include +#include "parser.h" + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +#define MAX_ENVLEN 1000 +#define MAX_CGI_METAVARIABLES 32 +#define CGI_CHUNK_SIZE 1024 +#define MAX_CGI_COMMAND_LEN 256 + +#if CONFIG_TUX_DEBUG +#define PRINT_MESSAGE_LEFT \ + Dprintk("CGI message left at %s:%d:\n--->{%s}<---\n", \ + __FILE__, __LINE__, curr) +#else +#define PRINT_MESSAGE_LEFT do {} while(0) +#endif + +#define GOTO_INCOMPLETE do { Dprintk("invalid CGI reply at %s:%d.\n", __FILE__, __LINE__); goto invalid; } while (0) + +/* + * Please acknowledge our hard work by not changing this define, or + * at least please acknowledge us by leaving "TUX/2.0 (Linux)" in + * the ID string. Thanks! :-) + */ +#define CGI_SUCCESS2 "HTTP/1.1 200 OK\r\nConnection: close\r\nServer: TUX/2.0 (Linux)\r\n" + +static int handle_cgi_reply (tux_req_t *req) +{ + int first = 1; + int len, left, total; + char buf [CGI_CHUNK_SIZE+1], *tmp; + mm_segment_t oldmm; + + sys_close(3); + sys_close(4); + sys_close(5); + oldmm = get_fs(); set_fs(KERNEL_DS); + send_sync_buf(NULL, req->sock, CGI_SUCCESS2, sizeof(CGI_SUCCESS2)-1, 0); + set_fs(oldmm); + + req->bytes_sent = 0; + /* + * The new process is the new owner of the socket, it will + * close it. + */ +repeat: + left = CGI_CHUNK_SIZE; + len = 0; + total = 0; + tmp = buf; + do { + mm_segment_t oldmm; + + tmp += len; + total += len; + left -= len; + if (!left) + break; +repeat_read: + Dprintk("reading %d bytes via sys_read().\n", left); + oldmm = get_fs(); set_fs(KERNEL_DS); + len = sys_read(2, tmp, left); + set_fs(oldmm); + Dprintk("got %d bytes from sys_read() (total: %d).\n", len, total); + if (len > 0) + tmp[len] = 0; + Dprintk("CGI reply: (%d bytes, total %d).\n", len, total); + if (len == -ERESTARTSYS) { + flush_all_signals(); + reap_kids(); + goto repeat_read; + } + } while (len > 0); + if (total > CGI_CHUNK_SIZE) { + printk(KERN_ERR "TUX: CGI weirdness. total: %d, len: %d, left: %d.\n", total, len, left); + TUX_BUG(); + } + Dprintk("CGI done reply chunk: (%d bytes last, total %d).\n", len, total); + if (total) { + mm_segment_t oldmm; + + oldmm = get_fs(); set_fs(KERNEL_DS); + if (!len) + send_sync_buf(NULL, req->sock, buf, total, 0); + else + send_sync_buf(NULL, req->sock, buf, total, MSG_MORE); + set_fs(oldmm); + req->bytes_sent += total; + } + + Dprintk("bytes_sent: %d\n", req->bytes_sent); + if ((total > 0) && first) { + first = 0; + + if (buf[total]) + TUX_BUG(); + tmp = strstr(buf, "\n\n"); + if (tmp) { + req->bytes_sent -= (tmp-buf) + 2; + Dprintk("new bytes_sent: %d\n", req->bytes_sent); + } else { + req->bytes_sent = 0; + req_err(req); + } + } + if (len < 0) + Dprintk("sys_read returned with %d.\n", len); + else { + if (total > 0) + goto repeat; + } + sys_close(2); + + req->status = 200; + add_req_to_workqueue(req); + return -1; +} + +static int exec_external_cgi (void *data) +{ + exec_param_t param; + tux_req_t *req = data; + char *envp[MAX_CGI_METAVARIABLES+1], **envp_p; + char *argv[] = { NULL}; + char envstr[MAX_ENVLEN], *tmp; + unsigned int host; + struct k_sigaction *ka; + int in_pipe_fds[2], out_pipe_fds[2], err_pipe_fds[2], len; + char command [MAX_CGI_COMMAND_LEN]; + pid_t pid; + int ret; + + len = strlen(tux_docroot); + if (req->objectname_len + len + 12 > MAX_CGI_COMMAND_LEN) + return -ENOMEM; + current->flags &= ~PF_ATOMICALLOC; + sprintf(current->comm,"cgimain - %i", current->pid); +#define IP(x) (((unsigned char *)&host)[x]) + host = req->sock->sk->daddr; + + tmp = envstr; + envp_p = envp; + +#define WRITE_ENV(str...) \ + if (envp_p >= envp + MAX_CGI_METAVARIABLES) \ + TUX_BUG(); \ + len = sprintf(tmp, str); \ + *envp_p++ = tmp; \ + tmp += len + 1; \ + if (tmp >= envstr + MAX_ENVLEN) \ + TUX_BUG(); + + #define WRITE_ENV_STR(str,field,len) \ + do { \ + int offset; \ + \ + offset = sizeof(str)-1; \ + if (tmp - envstr + offset + len >= MAX_ENVLEN) \ + return -EFAULT; \ + if (envp_p >= envp + MAX_CGI_METAVARIABLES) \ + TUX_BUG(); \ + memcpy(tmp, str, offset); \ + memcpy(tmp + offset, field, len); \ + offset += len; \ + tmp[offset] = 0; \ + *envp_p++ = tmp; \ + tmp += offset + 1; \ + } while (0) + + WRITE_ENV("GATEWAY_INTERFACE=CGI/1.1"); + WRITE_ENV("CONTENT_LENGTH=%d", req->post_data_len); + WRITE_ENV("REMOTE_ADDR=%d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3)); + WRITE_ENV("SERVER_PORT=%d", tux_serverport); + WRITE_ENV("SERVER_SOFTWARE=TUX/2.0 (Linux)"); + +#if 1 + WRITE_ENV("DOCUMENT_ROOT=/"); + WRITE_ENV("PATH_INFO=/"); +#else + WRITE_ENV_STR("DOCUMENT_ROOT=", tux_docroot, len); + WRITE_ENV_STR("PATH_INFO=", tux_docroot, len); +#endif + WRITE_ENV_STR("QUERY_STRING=", req->query_str, req->query_len); + WRITE_ENV_STR("REQUEST_METHOD=", req->method_str, req->method_len); + WRITE_ENV_STR("SCRIPT_NAME=", req->objectname, req->objectname_len); + WRITE_ENV_STR("SERVER_PROTOCOL=", req->version_str, req->version_len); + + if (req->content_type_len) + WRITE_ENV_STR("CONTENT_TYPE=", + req->content_type_str, req->content_type_len); + if (req->cookies_len) + WRITE_ENV_STR("HTTP_COOKIE=", + req->cookies_str, req->cookies_len); + + if (req->host_len) + WRITE_ENV_STR("SERVER_NAME=", req->host, req->host_len); + else { + const char *host = "localhost"; + WRITE_ENV_STR("SERVER_NAME=", host, strlen(host)); + } + + *envp_p = NULL; + + spin_lock_irq(¤t->sigmask_lock); + ka = current->sig->action + SIGPIPE-1; + ka->sa.sa_handler = SIG_IGN; + siginitsetinv(¤t->blocked, sigmask(SIGCHLD)); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + sys_close(0); sys_close(1); + sys_close(2); sys_close(3); + sys_close(4); sys_close(5); + + in_pipe_fds[0] = in_pipe_fds[1] = -1; + out_pipe_fds[0] = out_pipe_fds[1] = -1; + err_pipe_fds[0] = err_pipe_fds[1] = -1; + + if (do_pipe(in_pipe_fds)) + return -ENFILE; + if (do_pipe(out_pipe_fds)) + return -ENFILE; + if (do_pipe(err_pipe_fds)) + return -ENFILE; + + if (in_pipe_fds[0] != 0) TUX_BUG(); + if (in_pipe_fds[1] != 1) TUX_BUG(); + if (out_pipe_fds[0] != 2) TUX_BUG(); + if (out_pipe_fds[1] != 3) TUX_BUG(); + if (err_pipe_fds[0] != 4) TUX_BUG(); + if (err_pipe_fds[1] != 5) TUX_BUG(); + + if (virtual_server && req->host_len) + sprintf(command, "/%s/cgi-bin/%s", req->host, req->objectname); + else + sprintf(command, "/cgi-bin/%s", req->objectname); + Dprintk("before CGI exec.\n"); + pid = tux_exec_process(command, argv, envp, 1, ¶m, 0); + Dprintk("after CGI exec.\n"); + + if (req->post_data_len) { + mm_segment_t oldmm; + int ret; + + Dprintk("POST data to CGI:\n"); + oldmm = get_fs(); set_fs(KERNEL_DS); + ret = sys_write(1, req->post_data_str, req->post_data_len); + set_fs(oldmm); + Dprintk("sys_write() returned: %d.\n", ret); + if (ret != req->post_data_len) + Dprintk("sys_write() returned: %d.\n", ret); + } + + sys_close(0); + sys_close(1); + + handle_cgi_reply(req); +repeat: + reap_kids(); + ret = sys_wait4(pid, NULL, __WALL, NULL); + Dprintk("exec_external_cgi() sys_wait4() returned %d.\n", ret); + if (ret == -ERESTARTSYS) + goto repeat; + + return ret; +} + +void start_external_cgi (tux_req_t *req) +{ + int pid; + +repeat: + pid = kernel_thread(exec_external_cgi, (void*) req, SIGCHLD); + if (pid == -1) + return; + if (pid < 0) { + printk(KERN_INFO "TUX: Could not fork external CGI process due to %d, retrying!\n", pid); + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(HZ); + goto repeat; + } +} + +int query_extcgi (tux_req_t *req) +{ + clear_keepalive(req); + start_external_cgi(req); + return -1; +} + +#define EXTCGI_INVALID_HEADER \ + "HTTP/1.1 503 Service Unavailable\r\n" \ + "Content-Length: 23\r\n\r\n" + +#define EXTCGI_INVALID_BODY \ + "TUX: invalid CGI reply." + +#define EXTCGI_INVALID EXTCGI_INVALID_HEADER EXTCGI_INVALID_BODY + diff -rNu linux-2.4.9-ac10/net/tux/input.c linux/net/tux/input.c --- linux-2.4.9-ac10/net/tux/input.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/input.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,845 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * input.c: handle requests arriving on accepted connections + */ + +#include +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +void zap_request (tux_req_t *req, int cachemiss) +{ + if (!cachemiss && (req->error == 3)) { + /* + * Zap connection as fast as possible, there is + * no valid client connection anymore: + */ + clear_keepalive(req); + flush_request(req, 0); + } else { + if (req->error == 3) + add_tux_atom(req, flush_request); + else + /* + * Potentially redirect to the secondary server: + */ + add_tux_atom(req, redirect_request); + add_req_to_workqueue(req); + } +} + +static struct dentry * __tux_lookup (const char *filename, + struct nameidata *base) +{ + int err; + + mntget(base->mnt); + + err = path_walk(filename, base); + if (err) { + Dprintk("path_walk() returned with %d!\n", err); + return ERR_PTR(err); + } + mntput(base->mnt); + + return base->dentry; +} + +int url_permission (struct inode *inode) +{ + umode_t mode; + int err; + + mode = inode->i_mode; + Dprintk("URL inode mode: %08x.\n", mode); + + if (!S_ISREG(mode)) + return -1; + + /* + * Paranoia: first forbid things, then maybe allow. + * Only regular files allowed. + */ + if (mode & tux_mode_forbidden) + return -2; + /* + * at least one bit in the 'allowed' set has to + * be present to allow access. + */ + if (!(mode & tux_mode_allowed)) + return -3; + err = permission(inode,MAY_READ); + return err; +} + +static struct dentry * vhost_lookup (tux_req_t *req, const char *filename, + struct nameidata* base) +{ + struct dentry *dentry; + // 255.255.255.255 + char ip [3+1+3+1+3+1+3 + 2]; + int ip_len; + + if (virtual_server >= TUX_VHOST_IP) { +#define IP(n) ((unsigned char *)& req->sock->sk->rcv_saddr)[n] + ip_len = sprintf(ip, "%d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3)); +#undef IP + dentry = __tux_lookup (ip, base); + if (!dentry || IS_ERR(dentry)) { + base->dentry = req->cwd; + dget(base->dentry); + base->mnt = req->cwdmnt; + goto lookup_default; + } + if (virtual_server == TUX_VHOST_IP) + goto lookup_filename; + + // fall through in mixed mode: + } + + if (!req->host_len) +lookup_default: + dentry = __tux_lookup (tux_default_vhost, base); + else { + dentry = __tux_lookup (req->host, base); + if (!dentry || IS_ERR(dentry)) { + base->dentry = req->cwd; + dget(base->dentry); + base->mnt = req->cwdmnt; + if (virtual_server >= TUX_VHOST_IP) { + dentry = __tux_lookup (ip, base); + if (!dentry || IS_ERR(dentry)) { + base->dentry = req->cwd; + dget(base->dentry); + base->mnt = req->cwdmnt; + } + } + goto lookup_default; + } + } + +lookup_filename: + if (dentry && !IS_ERR(dentry)) + dentry = __tux_lookup (filename, base); + return dentry; +} + +struct dentry * tux_lookup (tux_req_t *req, const char *filename, + const unsigned int flag) +{ + struct dentry *dentry; + struct nameidata base; + + if ((req->objectname[0] == '/') && req->cwd) { + dput(req->cwd); + mntput(req->cwdmnt); + req->cwd = NULL; + req->cwdmnt = NULL; + } + if (!req->cwd) { + req->cwd = dget(docroot.dentry); + req->cwdmnt = mntget(docroot.mnt); + } + + base.flags = LOOKUP_POSITIVE|LOOKUP_FOLLOW|flag; + base.last_type = LAST_ROOT; + base.dentry = req->cwd; + dget(base.dentry); + base.mnt = req->cwdmnt; + + if (virtual_server && req->host_len) + dentry = vhost_lookup(req, filename, &base); + else + dentry = __tux_lookup (filename, &base); + + Dprintk("looked up {%s} == dentry %p.\n", filename, dentry); + + if (dentry && !IS_ERR(dentry) && !dentry->d_inode) + TUX_BUG(); + return dentry; +} + +void install_req_dentry (tux_req_t *req, struct dentry *dentry) +{ + if (req->dentry) + TUX_BUG(); + req->dentry = dentry; + if (req->in_file.f_dentry) + TUX_BUG(); + if (dentry) + init_private_file(&req->in_file, dentry, FMODE_READ); +} + +void release_req_dentry (tux_req_t *req) +{ + if (!req->dentry) { + if (req->in_file.f_dentry) + TUX_BUG(); + return; + } + if (req->in_file.f_op && req->in_file.f_op->release) + req->in_file.f_op->release(req->dentry->d_inode, &req->in_file); + memset(&req->in_file, 0, sizeof(req->in_file)); + + dput(req->dentry); + req->dentry = NULL; +} + +int lookup_url (tux_req_t *req, const unsigned int flag) +{ + int perm = 0, i; + struct dentry *dentry = NULL; + struct inode *inode; + const char *filename; + +repeat_lookup: + if (req->dentry) + TUX_BUG(); + + filename = req->objectname; + Dprintk("will look up {%s} (%d)\n", filename, req->objectname_len); + Dprintk("current->fsuid: %d, current->fsgid: %d, ngroups: %d\n", + current->fsuid, current->fsgid, current->ngroups); + for (i = 0; i < current->ngroups; i++) + Dprintk(".. group #%d: %d.\n", i, current->groups[i]); + + dentry = tux_lookup(req, filename, flag); + + if (!dentry || IS_ERR(dentry)) { + if (PTR_ERR(dentry) == -EWOULDBLOCKIO) + goto cachemiss; + + if (!req->lookup_404) { + int len = strlen(tux_404_page); + memcpy(req->objectname, tux_404_page, len); + req->objectname[len] = 0; + req->objectname_len = len; + req->lookup_404 = 1; + req->status = 404; + goto repeat_lookup; + } + Dprintk("abort - lookup error.\n"); + goto abort; + } + + Dprintk("SUCCESS, looked up {%s} == dentry %p (inode %p, count %d.)\n", filename, dentry, dentry->d_inode, atomic_read(&dentry->d_count)); + inode = dentry->d_inode; + + /* + * At this point we have a real, non-negative dentry. + */ + perm = url_permission(inode); + + if (perm < 0) { + Dprintk("FAILED trusted dentry %p permission %d.\n", dentry, perm); +#define INDEX "/index.html" + if (S_ISDIR(dentry->d_inode->i_mode) && !req->lookup_dir && + (req->objectname_len + sizeof(INDEX) < + MAX_OBJECTNAME_LEN)) { + if (req->objectname_len && (req->objectname[req->objectname_len-1] != '/')) { + dput(dentry); + return 2; + } + memcpy(req->objectname + req->objectname_len, + INDEX, sizeof(INDEX)); + req->objectname_len += sizeof(INDEX)-1; + req->lookup_dir = 1; + dput(dentry); + goto repeat_lookup; + } + req->status = 403; + goto abort; + } + if (tux_max_object_size && (inode->i_size > tux_max_object_size)) { + Dprintk("too big object, %d bytes.\n", (int)inode->i_size); + req->status = 403; + goto abort; + } + req->filelen = inode->i_size; + req->mtime = inode->i_mtime; + + { + unsigned int num = req->filelen; + int nr_digits = 0; + char * etag_p = req->etag; + char digits [10]; + + do { + digits[nr_digits++] = '0' + num % 10; + num /= 10; + } while (num); + + req->lendigits = nr_digits; + req->etaglen = nr_digits; + + while (nr_digits) + *etag_p++ = digits[--nr_digits]; + + *etag_p++ = '-'; + num = req->mtime; + nr_digits = 0; + + do { + digits[nr_digits++] = 'a' + num % 16; + num /= 16; + } while (num); + req->etaglen += nr_digits+1; + while (nr_digits) + *etag_p++ = digits[--nr_digits]; + } + + if (req->if_none_match_len >= req->etaglen) { + + char * etag_p = req->etag; + const char * match_p = req->if_none_match_str; + int pos = req->etaglen - 1; + int matchpos = req->etaglen - 1; + + do { + while (etag_p[matchpos--] == match_p[pos--]) + if (matchpos < 0) + break; + if (matchpos < 0) + pos = req->if_none_match_len; + else { + if (match_p[pos+1] == ',') + pos += req->etaglen + 2; + else + pos += req->etaglen-matchpos; + matchpos = req->etaglen - 1; + } + } while (pos < req->if_none_match_len); + + if (matchpos < 0) { + req->status = 304; + goto abort; + } + } + Dprintk("looked up cached dentry %p, (count %d.)\n", dentry, dentry ? atomic_read(&dentry->d_count) : -1 ); + + url_hist_hit(req->filelen); +out: + install_req_dentry(req, dentry); + return 0; + +cachemiss: + return 1; + +abort: + if (dentry) { + if (!IS_ERR(dentry)) + dput(dentry); + dentry = NULL; + } + TDprintk("req %p has lookup errors!\n", req); + req_err(req); + goto out; +} + +int connection_too_fast (tux_req_t *req) +{ + unsigned long curr_bw, delta, bytes; + + if (!tux_max_output_bandwidth) + return 1; + bytes = req->total_bytes + req->bytes_sent; + if (!bytes) + return 1; + + delta = jiffies - req->first_timestamp; + if (!delta) + delta++; + curr_bw = bytes * HZ / delta; + + if (curr_bw > tux_max_output_bandwidth) + return 2; + return 0; +} + +void unidle_req (tux_req_t *req) +{ + threadinfo_t *ti = req->ti; + + Dprintk("UNIDLE req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status); + spin_lock_irq(&ti->work_lock); + if (req->magic != TUX_MAGIC) + TUX_BUG(); + if (!test_and_clear_bit(0, &req->idle_input)) { + Dprintk("unidling %p, wasnt idle!\n", req); + if (list_empty(&req->work)) + TUX_BUG(); + list_del(&req->work); + DEBUG_DEL_LIST(&req->work); + DEC_STAT(nr_work_pending); + } else { + del_keepalive_timer(req); + DEC_STAT(nr_idle_input_pending); + Dprintk("unidled %p.\n", req); + } + if (req->idle_input) + TUX_BUG(); + spin_unlock_irq(&ti->work_lock); +} + +#define GOTO_INCOMPLETE do { Dprintk("incomplete at %s:%d.\n", __FILE__, __LINE__); goto incomplete; } while (0) +#define GOTO_REDIRECT do { TDprintk("redirect at %s:%d.\n", __FILE__, __LINE__); goto redirect; } while (0) +#define GOTO_REDIRECT_NONIDLE do { TDprintk("redirect at %s:%d.\n", __FILE__, __LINE__); goto redirect_nonidle; } while (0) + +static int read_request (struct socket *sock, char *buf, int max_size) +{ + mm_segment_t oldmm; + struct msghdr msg; + struct iovec iov; + int len; + + msg.msg_name = 0; + msg.msg_namelen = 0; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + msg.msg_iov->iov_base = buf; + msg.msg_iov->iov_len = max_size; + + oldmm = get_fs(); set_fs(KERNEL_DS); + +read_again: + len = sock->sk->prot->recvmsg(sock->sk, &msg, max_size, + MSG_DONTWAIT, MSG_PEEK, NULL); + + /* + * We must not get a signal inbetween + */ + if ((len == -EAGAIN) || (len == -ERESTARTSYS)) { + if (!signal_pending(current)) { + len = 0; + goto out; + } + reap_kids(); + goto read_again; + } +out: + set_fs(oldmm); + return len; +} + +static int zap_urg_data (struct socket *sock) +{ + mm_segment_t oldmm; + struct msghdr msg; + struct iovec iov; + int len; + char buf[10]; + + oldmm = get_fs(); set_fs(KERNEL_DS); + + msg.msg_name = 0; + msg.msg_namelen = 0; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; // MSG_TRUNC | MSG_OOB; + + msg.msg_iov->iov_base = buf; + msg.msg_iov->iov_len = 2; + +read_again: + len = sock->sk->prot->recvmsg(sock->sk, &msg, 2, + MSG_DONTWAIT, 0 /*MSG_TRUNC | MSG_OOB*/, NULL); + Dprintk("recvmsg(MSG_OOB) returned %d.\n", len); + + /* + * We must not get a signal inbetween + */ + if ((len == -EAGAIN) || (len == -ERESTARTSYS)) { + if (!signal_pending(current)) { + len = 0; + goto out; + } + reap_kids(); + goto read_again; + } +out: +// if (len > 0) +// goto read_more; + set_fs(oldmm); + + return len; +} + +void trunc_headers (tux_req_t *req) +{ + struct sock *sk = req->sock->sk; + struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; + int len, addr_len = 0; + struct sk_buff *skb = req->input_skb; + + if (!req->parsed_len) + TUX_BUG(); + if (skb && (skb->len == req->parsed_len)) { + lock_sock(sk); + tp->copied_seq += req->parsed_len; + /* + * We unlink the skb here, but free it only + * after the TUX request is finished - we + * need to access headers. + */ + __skb_unlink(skb, &sk->receive_queue); + skb_orphan(skb); + release_sock(sk); + kfree_skb(skb); + + Dprintk("truncated (skb) %d bytes at %p. (wanted: %d.)\n", req->parsed_len, __builtin_return_address(0), req->parsed_len); + } else { +repeat_trunc: + len = sk->prot->recvmsg(sk, NULL, req->parsed_len, 1, MSG_TRUNC, &addr_len); + if ((len == -ERESTARTSYS) || (len == -EAGAIN)) { + reap_kids(); + goto repeat_trunc; + } + Dprintk("truncated (TRUNC) %d bytes at %p. (wanted: %d.)\n", len, __builtin_return_address(0), req->parsed_len); + if (len != req->parsed_len) + printk("hm, truncated only %d bytes, wanted: %d.\n", + len, req->parsed_len); + } + req->parsed_len = 0; +} + +void print_req (tux_req_t *req) +{ + struct sock *sk; + + printk("PRINT req %p <%p>, sock %p\n", + req, __builtin_return_address(0), req->sock); + printk("... idx: %d\n", req->atom_idx); + if (req->sock) { + sk = req->sock->sk; + printk("... sock %p, sk %p, sk->state: %d, sk->err: %d\n", req->sock, sk, sk->state, sk->err); + printk("... write_queue: %d, receive_queue: %d, error_queue: %d, keepalive: %d, status: %d\n", !skb_queue_empty(&sk->write_queue), !skb_queue_empty(&sk->receive_queue), !skb_queue_empty(&sk->error_queue), req->keep_alive, req->status); + printk("...tp->send_head: %p\n", sk->tp_pinfo.af_tcp.send_head); + printk("...tp->snd_una: %08x\n", sk->tp_pinfo.af_tcp.snd_una); + printk("...tp->snd_nxt: %08x\n", sk->tp_pinfo.af_tcp.snd_nxt); + printk("...tp->packets_out: %08x\n", sk->tp_pinfo.af_tcp.packets_out); + } + printk("... meth:{%s}, uri:{%s}, query:{%s}, ver:{%s}\n", req->method_str ? req->method_str : "", req->uri_str ? req->uri_str : "", req->query_str ? req->query_str : "", req->version_str ? req->version_str : ""); + printk("... post_data:{%s}(%d).\n", req->post_data_str, req->post_data_len); + printk("... headers: {%s}\n", req->headers); +} +/* + * parse_request() reads all available TCP/IP data and prepares + * the request if the TUX request is complete. (we can get TUX + * requests in several packets.) Invalid requests are redirected + * to the secondary server. + */ + +void parse_request (tux_req_t *req, int cachemiss) +{ + u32 peek_seq; + struct sk_buff *skb = NULL; + int len, parsed_len, offset; + struct sock *sk = req->sock->sk; + struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; + char *data; + int was_keepalive = req->keep_alive; + + if (req->magic != TUX_MAGIC) + TUX_BUG(); + + SET_TIMESTAMP(req->parse_timestamp); + + spin_lock_irq(&req->ti->work_lock); + add_keepalive_timer(req); + if (test_and_set_bit(0, &req->idle_input)) + TUX_BUG(); + INC_STAT(nr_idle_input_pending); + spin_unlock_irq(&req->ti->work_lock); + + Dprintk("idled request %p.\n", req); + + if (!tux_zerocopy_parse) + goto slow_path_nonlocked; + lock_sock(sk); + if ((sk->state != TCP_ESTABLISHED) || sk->err) + goto slow_path; + + skb = skb_peek(&sk->receive_queue); + if (!skb) + goto slow_path; + if (!skb || skb_shinfo(skb)->nr_frags) + goto slow_path; + + Dprintk("got skb %p. (frags: %d)\n", skb, skb_shinfo(skb)->nr_frags); + + if (skb->h.th->fin || skb->h.th->urg) + goto slow_path; + peek_seq = tp->copied_seq; + offset = tp->copied_seq - TCP_SKB_CB(skb)->seq; + if (skb->h.th->syn) + offset--; + + Dprintk("peek_seq: %u, offset: %u.\n", peek_seq, offset); + + if (offset) + goto slow_path; + len = skb->len; + data = (unsigned char *)skb->h.th + skb->h.th->doff*4; + + Dprintk("len: %d, data: %p (dataoff: %d).\n", len, data, skb->h.th->doff*4); + release_sock(sk); + if (len >= tux_max_header_len-1) + goto slow_path_nonlocked; + + INC_STAT(input_fastpath); + req->headers = data; + req->headers_len = len; + if (req->input_skb) + BUG(); + skb_get(skb); + req->input_skb = skb; + + + data[len] = 0; + goto parse; + +slow_path: + release_sock(sk); +slow_path_nonlocked: + + while (tp->urg_data && !(tp->urg_data & TCP_URG_READ)) + zap_urg_data(req->sock); + + skb = NULL; + req->input_skb = NULL; + INC_STAT(input_slowpath); + + if (!req->headers_buf) + req->headers_buf = kmalloc(tux_max_header_len, GFP_KERNEL); + req->headers = req->headers_buf; + + /* First, read the data */ + len = read_request(req->sock, req->headers, tux_max_header_len-1); + if (len < 0) { + Dprintk("got %d from read_request().\n", len); + GOTO_REDIRECT; + } + if (!len) + GOTO_INCOMPLETE; + +parse: + /* + * Make it a zero-delimited string to automatically get + * protection against various buffer overflow situations. + * Then pass it to the TUX application protocol stack. + */ + req->headers[len] = 0; + req->headers_len = len; + + parsed_len = req->proto->parse_message(req, len); + + /* + * Is the request fully read? (or is there any error) + */ + if (parsed_len < 0) { + req->error = 3; + goto redirect_error; + } + if (!parsed_len) { + /* + * Push pending ACK which was delayed due to the + * pingpong optimization: + */ + if (was_keepalive) { + lock_sock(sk); + tp->ack.pingpong = 0; + tp->ack.pending |= TCP_ACK_PUSHED; + cleanup_rbuf(sk, 1); + release_sock(sk); + } + if (len >= tux_max_header_len-1) + GOTO_REDIRECT; + if (skb) { + DEC_STAT(input_fastpath); + goto slow_path_nonlocked; + } + GOTO_INCOMPLETE; + } + unidle_req(req); + + req->sock->sk->tp_pinfo.af_tcp.nonagle = tux_nonagle; + + add_req_to_workqueue(req); + return; + +redirect: + TDprintk("req %p will be redirected!\n", req); + req_err(req); + +redirect_error: + unidle_req(req); + + if (len < 0) + req->parsed_len = 0; + else + req->parsed_len = len; + + INC_STAT(parse_static_redirect); + req->input_skb = NULL; + req->headers = NULL; + if (req->error) + zap_request(req, cachemiss); + return; + +incomplete: + if (req->error) + goto redirect_error; + if (tp->urg_data && !(tp->urg_data & TCP_URG_READ)) + goto slow_path_nonlocked; + + add_tux_atom(req, parse_request); + INC_STAT(parse_static_incomplete); + req->input_skb = NULL; + req->headers = NULL; +} + +int process_requests (threadinfo_t *ti, tux_req_t **user_req) +{ + struct list_head *head, *curr; + int count = 0; + tux_req_t *req; + + *user_req = NULL; + +restart_loop: + spin_lock_irq(&ti->work_lock); + head = &ti->work_pending; + curr = head->next; + + if (curr != head) { + int i; + + req = list_entry(curr, tux_req_t, work); + Dprintk("PROCESS req %p <%p>.\n", + req, __builtin_return_address(0)); + for (i = 0; i < req->atom_idx; i++) + Dprintk("... atom %d: %p\n", i, req->atoms[i]); + + if (req->ti != ti) + TUX_BUG(); + if (req->magic != TUX_MAGIC) + TUX_BUG(); + + if (list_empty(&req->work)) + TUX_BUG(); + list_del(curr); + DEBUG_DEL_LIST(&req->work); + spin_unlock_irq(&ti->work_lock); + + if (!req->atom_idx) { + if (req->usermode) { + *user_req = req; + return count; + } + /* + * idx == 0 requests are flushed automatically. + */ + flush_request(req, 0); + } else + tux_schedule_atom(req, 0); + count++; + goto restart_loop; + } + spin_unlock_irq(&ti->work_lock); + + return count; +} + +int flush_workqueue (threadinfo_t *ti) +{ + struct list_head *head, *curr, *next; + tux_req_t *req; + int count = 0; + +restart: + spin_lock_irq(&ti->work_lock); + head = &ti->work_pending; + curr = head->next; + + if (curr != head) { + req = list_entry(curr, tux_req_t, work); + next = curr->next; + clear_bit(0, &req->idle_input); + clear_bit(0, &req->wait_output_space); + if (list_empty(&req->work)) + TUX_BUG(); + list_del(curr); + DEBUG_DEL_LIST(curr); + DEC_STAT(nr_input_pending); + spin_unlock_irq(&ti->work_lock); +#if CONFIG_TUX_DEBUG + req->bytes_expected = 0; +#endif + req->in_file.f_pos = 0; + req->atom_idx = 0; + clear_keepalive(req); + req->status = -1; + if (req->usermode) { + req->usermode = 0; + req->private = NULL; + } + flush_request(req, 0); + count++; + goto restart; + } + spin_unlock_irq(&ti->work_lock); + + return count; +} + +int print_all_requests (threadinfo_t *ti) +{ + struct list_head *head, *curr, *next; + tux_req_t *req; + int count = 0; + +restart: + spin_lock_irq(&ti->work_lock); + head = &ti->all_requests; + curr = head->next; + + if (curr != head) { + req = list_entry(curr, tux_req_t, all); + next = curr->next; + list_del(curr); + DEBUG_DEL_LIST(curr); + print_req(req); + spin_unlock_irq(&ti->work_lock); + count++; + goto restart; + } + spin_unlock_irq(&ti->work_lock); + + return count; +} + diff -rNu linux-2.4.9-ac10/net/tux/logger.c linux/net/tux/logger.c --- linux-2.4.9-ac10/net/tux/logger.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/logger.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,773 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * Cleaned up logger output for Alpha. + * -- Phil Ezolt (Phillip.Ezolt@compaq.com) & Bill Carr (wcarr92@yahoo.com) + * + * logger.c: log requests finished by TUX. + */ + +#define __KERNEL_SYSCALLS__ +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +static spinlock_t log_lock = SPIN_LOCK_UNLOCKED; +static unsigned int log_head, log_tail; +static char * log_buffer = NULL; +static DECLARE_WAIT_QUEUE_HEAD(log_wait); +static DECLARE_WAIT_QUEUE_HEAD(log_full); +static int logger_pid = 0; + +/* + * High-speed TUX logging architecture: + * + * All fast threads share a common log-ringbuffer. (default size 1MB) + * Log entries are binary and are padded to be cacheline aligned, this + * ensures that there is no cache-pingpong between fast threads. + * + * The logger thread writes out pending log entries within 1 second + * (buffer-cache writes data out within 5 seconds). The logger thread + * gets activated once we have more than 25% of the log ringbuffer + * filled - or the 1 second log timeout expires. Fast threads block + * if if more than 95% of the ringbuffer is filled and unblock only + * if used logbuffer space drops below 90%. + * + * This architecture guarantees that 1) logging is reliable (no + * log entry is ever lost), 2) timely (touches disk within 6 seconds), + * 3) in the log-contention case the saturation behavior is still + * write-clustered, but 4) if the logger thread can keep up then + * the coupling is completely asynchron and parallel. + * + * The binary log format gives us about 50% saved IO/memory bandwith + * and 50% less on-disk used log space than the traditional W3C ASCII + * format. + * + * (We might switch to raw IO though to write the logfile.) + */ + +#define SOFT_LIMIT (LOG_LEN*25/100) +#define HARD_LIMIT (LOG_LEN*95/100) +#define HARD_RELAX_LIMIT (LOG_LEN*90/100) + +int tux_logentry_align_order = 5; + +#if SMP_CACHE_BYTES == 16 +# define TUX_LOGENTRY_ALIGN 4 +#else +#if SMP_CACHE_BYTES == 32 +# define TUX_LOGENTRY_ALIGN 5 +#else +#if SMP_CACHE_BYTES == 64 +# define TUX_LOGENTRY_ALIGN 6 +#else +#if SMP_CACHE_BYTES == 128 +# define TUX_LOGENTRY_ALIGN 7 +#else +#error Add entry! +#endif +#endif +#endif +#endif + +#define ROUND_UP(x) (((((x)-1) >> TUX_LOGENTRY_ALIGN) + 1) \ + << TUX_LOGENTRY_ALIGN) + +static void __throttle_logging (void) +{ + DECLARE_WAITQUEUE(wait, current); + int pending; + + add_wait_queue(&log_full, &wait); + for (;;) { + static unsigned long last_warning = 0; + + if (jiffies - last_warning > 10*HZ) { + last_warning = jiffies; + printk(KERN_NOTICE "TUX: log buffer overflow, have to throttle TUX thread!\n"); + } + + current->state = TASK_INTERRUPTIBLE; + + spin_lock(&log_lock); + pending = log_head-log_tail; + spin_unlock(&log_lock); + + if ((pending % LOG_LEN) < HARD_LIMIT) + break; + + schedule(); + } + current->state = TASK_RUNNING; + remove_wait_queue(&log_full, &wait); +} + +#if CONFIG_TUX_DEBUG +#define CHECK_LOGPTR(ptr) \ +do { \ + if ((ptr < log_buffer) || (ptr > log_buffer + LOG_LEN)) { \ + printk(KERN_ERR "TUX: ouch: log ptr %p > %p + %ld!\n", \ + ptr, log_buffer, LOG_LEN); \ + TUX_BUG(); \ + } \ +} while (0) +#else +#define CHECK_LOGPTR(ptr) do { } while (0) +#endif + +void __log_request (tux_req_t *req) +{ + char *str, *next; + unsigned int inc, len, uri_len, pending, next_head, def_vhost_len = 0; + unsigned long flags; + + /* + * Log the reply status (success, or type of failure) + */ + if (!req->status || (req->bytes_sent == -1) || !req->uri_len) { + + Dprintk("not logging req %p: {%s} [%d/%d]\n", req, req->uri_str, req->status, req->bytes_sent); + return; + } + Dprintk("uri: {%s} [%d]\n", req->uri_str, req->uri_len); + + uri_len = req->uri_len; + len = uri_len + 1; + + if (virtual_server) { + if (req->host_len) + len += req->host_len; + else { + def_vhost_len = strlen(tux_default_vhost); + len += def_vhost_len; + } + } + + Dprintk("method_str: {%s} [%d]\n", req->method_str, req->method_len); + len += req->method_len + 1; + + Dprintk("version_str: {%s} [%d]\n", req->version_str, req->version_len); + len += req->version_len + 1; + +#if CONFIG_TUX_EXTENDED_LOG + Dprintk("user_agent_str: {%s} [%d]\n", req->user_agent_str, req->user_agent_len); + len += req->user_agent_len + 1; +#endif + if (tux_referer_logging) { + Dprintk("referer_str: {%s} [%d]\n", req->referer_str, req->referer_len); + len += req->referer_len; + } + len++; + + inc = 5*sizeof(u32) + len; +#if CONFIG_TUX_EXTENDED_LOG + inc += 7*sizeof(u32); +#endif + + spin_lock_irqsave(&log_lock, flags); + + next_head = ROUND_UP(log_head + inc); + + if (next_head < LOG_LEN) { + str = log_buffer + log_head; + if (str > log_buffer + LOG_LEN) + TUX_BUG(); + log_head = next_head; + } else { + if (log_head < LOG_LEN) + memset(log_buffer+log_head, 0, LOG_LEN-log_head); + str = log_buffer; + log_head = ROUND_UP(inc); + } + + if (str < log_buffer || str+inc >= log_buffer+LOG_LEN) + TUX_BUG(); + + /* + * Log record signature - this makes finding the next entry + * easier (since record length is variable), and makes the + * binary logfile more robust against potential data corruption + * and other damage. The signature also servers as a log format + * version identifier. + */ +#if CONFIG_TUX_EXTENDED_LOG + *(u32 *)str = 0x2223beef; +#else + *(u32 *)str = 0x1112beef; +#endif + str += sizeof(u32); + CHECK_LOGPTR(str); + + *(u32 *)str = 0; + /* + * Log the client IP address: + */ + if (req->sock && req->sock->sk && tux_ip_logging) + *(u32 *)str = req->sock->sk->daddr; + str += sizeof(u32); + CHECK_LOGPTR(str); + +#if CONFIG_TUX_EXTENDED_LOG + /* + * Log the client port number: + */ + if (req->sock && req->sock->sk) + *(u32 *)str = req->sock->sk->dport; + else + *(u32 *)str = 0xffffffff; + str += sizeof(u32); + CHECK_LOGPTR(str); +#endif + + /* + * Log the request timestamp, in units of 'seconds since 1970'. + */ + *(u32 *)str = CURRENT_TIME; + str += sizeof(u32); + CHECK_LOGPTR(str); + +#if CONFIG_TUX_EXTENDED_LOG + *(u32 *)str = req->accept_timestamp; str += sizeof(u32); + *(u32 *)str = req->parse_timestamp; str += sizeof(u32); + *(u32 *)str = req->output_timestamp; str += sizeof(u32); + *(u32 *)str = req->flush_timestamp; str += sizeof(u32); + *(u32 *)str = req->had_cachemiss; str += sizeof(u32); + *(u32 *)str = req->keep_alive; str += sizeof(u32); +#endif + /* + * Log the requested file size (in fact, log actual bytes sent.) + */ + *(u32 *)str = req->bytes_sent; + str += sizeof(u32); + CHECK_LOGPTR(str); + + *(u32 *)str = req->status; + str += sizeof(u32); + CHECK_LOGPTR(str); + + /* + * Zero-terminated method, (base) URI, query and version string. + */ + if (req->method_len) { + memcpy(str, req->method_str, req->method_len); + str += req->method_len; + CHECK_LOGPTR(str); + } + *str++ = 0; + + if (virtual_server) { + if (req->host_len) { + memcpy(str, req->host, req->host_len); + str += req->host_len; + } else { + memcpy(str, tux_default_vhost, def_vhost_len); + str += def_vhost_len; + } + CHECK_LOGPTR(str); + } + + strncpy(str, req->uri_str, req->uri_len); + str += uri_len; + *str++ = 0; + + CHECK_LOGPTR(str); + + if (req->version_len) { + memcpy(str, req->version_str, req->version_len); + str += req->version_len; + CHECK_LOGPTR(str); + } + *str++ = 0; +#if CONFIG_TUX_EXTENDED_LOG + if (req->user_agent_len) { + memcpy(str, req->user_agent_str, req->user_agent_len); + str += req->user_agent_len; + CHECK_LOGPTR(str); + } + *str++ = 0; +#endif + CHECK_LOGPTR(str); + + if (tux_referer_logging && req->referer_len) { + memcpy(str, req->referer_str, req->referer_len); + str += req->referer_len; + CHECK_LOGPTR(str); + } + *str++ = 0; + CHECK_LOGPTR(str); + /* + * pad with spaces to next cacheline, with an ending newline. + * (not needed for the user-space log utility, but results in + * a more readable binary log file, and reduces the amount + * of cache pingpong.) + */ + next = (char *)ROUND_UP((unsigned long)str); + + CHECK_LOGPTR(next); + len = next-str; + memset(str, ' ', len); + + pending = (log_head-log_tail) % LOG_LEN; + spin_unlock_irqrestore(&log_lock, flags); + + if (pending >= SOFT_LIMIT) + wake_up(&log_wait); + + if (pending >= HARD_LIMIT) + __throttle_logging(); +} + +void tux_push_pending (struct sock *sk) +{ + struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; + + lock_sock(sk); + tp->ack.pingpong = tux_ack_pingpong; + sk->tp_pinfo.af_tcp.nonagle = 1; + __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk), 1); + release_sock(sk); +} + +void flush_request (tux_req_t *req, int cachemiss) +{ + struct socket *sock; + struct sock *sk; + int keep_alive; + + if (cachemiss) + TUX_BUG(); + __set_task_state(current, TASK_RUNNING); + + if (req->magic != TUX_MAGIC) + TUX_BUG(); + if (req->ti->thread != current) + TUX_BUG(); +#if CONFIG_TUX_DEBUG + if (req->bytes_expected && (req->bytes_sent != req->bytes_expected)) { + printk("hm, bytes_expected: %d != bytes_sent: %d!\n", + req->bytes_expected, req->bytes_sent); + TUX_BUG(); + } +#endif + SET_TIMESTAMP(req->flush_timestamp); + + log_request(req); + sock = req->sock; + sk = NULL; + if (sock) + sk = sock->sk; + Dprintk("FLUSHING req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), sock, sk, req->keep_alive, req->status); + if (req->in_file.f_pos) + TUX_BUG(); + release_req_dentry(req); + if (req->private) { + kfree(req->private); + req->private = NULL; + } + if (req->usermode) + TUX_BUG(); + if (req->postponed) + TUX_BUG(); + if (test_bit(0, &req->idle_input)) + TUX_BUG(); + if (test_bit(0, &req->wait_output_space)) + TUX_BUG(); + if (req->parsed_len) + trunc_headers(req); + if (req->parsed_len) + TUX_BUG(); + req->attr = NULL; + req->usermodule_idx = 0; + req->atom_idx = 0; + if (req->module_dentry) { + dput(req->module_dentry); + req->module_dentry = NULL; + } + if (req->input_skb) { + kfree_skb(req->input_skb); + req->input_skb = NULL; + } + if (req->headers_buf) + kfree(req->headers_buf); + req->headers = NULL; + req->headers_buf = NULL; + req->headers_len = 0; + + req->method = METHOD_NONE; + req->method_len = 0; + req->method_str = NULL; + req->version = 0; + req->version_str = NULL; + req->version_len = 0; + + req->uri_str = NULL; + req->uri_len = 0; + + req->objectname[0] = 0; + req->objectname_len = 0; + + req->query_str = NULL; + req->query_len = 0; + + req->cookies_str = NULL; + req->cookies_len = 0; + req->parse_cookies = 0; + + req->contentlen_str = NULL; + req->contentlen_len = 0; + req->content_len = 0; + + req->user_agent_str = NULL; + req->user_agent_len = 0; + + req->may_send_gzip = 0; + req->content_gzipped = 0; + + req->host[0] = 0; + req->host_len = 0; + + req->content_type_str = NULL; + req->content_type_len = 0; + + req->accept_str = NULL; + req->accept_len = 0; + + req->accept_charset_str = NULL; + req->accept_charset_len = 0; + + req->accept_encoding_str = NULL; + req->accept_encoding_len = 0; + + req->accept_language_str = NULL; + req->accept_language_len = 0; + + req->cache_control_str = NULL; + req->cache_control_len = 0; + + req->if_modified_since_str = NULL; + req->if_modified_since_len = 0; + + req->if_none_match_str = NULL; + req->if_none_match_len = 0; + + req->negotiate_str = NULL; + req->negotiate_len = 0; + + req->pragma_str = NULL; + req->pragma_len = 0; + + req->referer_str = NULL; + req->referer_len = 0; + + req->post_data_str = NULL; + req->post_data_len = 0; + + SET_TIMESTAMP(req->accept_timestamp); +#if CONFIG_TUX_EXTENDED_LOG + req->parse_timestamp = 0; + req->output_timestamp = 0; + req->flush_timestamp = 0; +#endif + req->status = 0; + + req->total_bytes += req->bytes_sent; + req->bytes_sent = 0; +#if CONFIG_TUX_DEBUG + req->bytes_expected = 0; +#endif + req->body_len = 0; + keep_alive = req->keep_alive; + clear_keepalive(req); + req->had_cachemiss = 0; + // first_timestamp and total_bytes is kept! + req->event = 0; + req->lookup_dir = 0; + req->lookup_404 = 0; + + req->error = 0; + + if (req->abuf.page) + __free_page(req->abuf.page); + memset(&req->abuf, 0, sizeof(req->abuf)); + + if (sk && (keep_alive || tux_push_all)) { + tux_push_pending(sk); + if (req->ftp_data_sock) + tux_push_pending(req->ftp_data_sock->sk); + } + + if (sk && keep_alive) { + add_tux_atom(req, parse_request); + if (skb_queue_empty(&sk->receive_queue)) { + spin_lock_irq(&req->ti->work_lock); + add_keepalive_timer(req); + if (test_and_set_bit(0, &req->idle_input)) + TUX_BUG(); + /* + * Avoid the race with the event callback: + */ + if (skb_queue_empty(&sk->receive_queue) || + !test_and_clear_bit(0, &req->idle_input)) { + INC_STAT(nr_idle_input_pending); + spin_unlock_irq(&req->ti->work_lock); + goto out; + } + del_keepalive_timer(req); + spin_unlock_irq(&req->ti->work_lock); + } + Dprintk("KEEPALIVE PENDING req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status); + add_req_to_workqueue(req); + INC_STAT(nr_keepalive_optimized); + goto out; + } + + del_timer_sync(&req->keepalive_timer); + del_timer_sync(&req->output_timer); + + if (timer_pending(&req->keepalive_timer)) + TUX_BUG(); + if (timer_pending(&req->output_timer)) + TUX_BUG(); + if (!list_empty(&req->lru)) + TUX_BUG(); + req->nr_keepalives = 0; + + if (req->cwd) { + Dprintk("dput() cwd %p, count: %d.\n", + req->cwd, atomic_read(&req->cwd->d_count)); + dput(req->cwd); + req->cwd = NULL; + if (!req->cwdmnt) + TUX_BUG(); + } + if (req->cwdmnt) { + Dprintk("mntput() cwdmnt %p, count: %d.\n", + req->cwdmnt, atomic_read(&req->cwdmnt->mnt_count)); + mntput(req->cwdmnt); + req->cwdmnt = NULL; + } + if (req->ftp_data_sock) { + unlink_tux_ftp_data_socket(req); + sock_release(req->ftp_data_sock); + req->ftp_data_sock = NULL; + } + req->ftp_offset = 0; + req->ftp_curroff = 0; + req->ftp_total = 0; + if (req->ftp_dirp0) { + kfree(req->ftp_dirp0); + req->ftp_dirp0 = NULL; + } + + if (sk) + unlink_tux_socket(req); + req->sock = NULL; + /* + * Close potential user-space file descriptors. + */ + { + int fd = req->fd, ret; + + if (fd != -1) { + req->fd = -1; + ret = sys_close(fd); + if (ret) + TUX_BUG(); + } else + if (sock) + sock_release(sock); + } + kfree_req(req); +out: +} + +static int warn_once = 1; + +static int writeout_log (void) +{ + unsigned int len, pending, next_log_tail; + struct file *log_filp; + char * str; + int ret; + + Dprintk("TUX logger: opening log file {%s}.\n", tux_logfile); + log_filp = tux_open_file(tux_logfile, O_CREAT|O_APPEND|O_WRONLY|O_LARGEFILE); + if (!log_filp) { + if (warn_once) { + printk(KERN_ERR "TUX: could not open log file {%s}!\n", + tux_logfile); + warn_once = 0; + } + __set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ); + return 0; + } + spin_lock(&log_lock); + str = log_buffer + log_tail; + if (log_head < log_tail) { + len = LOG_LEN-log_tail; + next_log_tail = 0; + } else { + len = log_head-log_tail; + next_log_tail = log_head; + } + if (!len) + goto out; + spin_unlock(&log_lock); + + ret = tux_write_file(log_filp, str, len); + if (len != ret) { + if (ret == -ENOSPC) { + printk(KERN_ERR "TUX: trying to write TUX logfile %s, but filesystem is full! Lost %d bytes of log data.\n", tux_logfile, len); + } else { + printk(KERN_ERR "TUX: log write %d != %d.\n", ret, len); + printk(KERN_ERR "TUX: log_filp: %p, str: %p, len: %d str[len-1]: %d.\n", log_filp, str, len, str[len-1]); + } + goto out_lock; + } + + /* + * Sync log data to disk: + */ + if (log_filp->f_op && log_filp->f_op->fsync) { + down(&log_filp->f_dentry->d_inode->i_sem); + log_filp->f_op->fsync(log_filp, log_filp->f_dentry, 1); + up(&log_filp->f_dentry->d_inode->i_sem); + } + + /* + * Reduce the cache footprint of the logger file - it's + * typically write-once. + */ + flush_inode_pages(log_filp->f_dentry->d_inode); + +out_lock: + spin_lock(&log_lock); +out: + log_tail = next_log_tail; + pending = (log_head-log_tail) % LOG_LEN; + spin_unlock(&log_lock); + + if (pending < HARD_LIMIT) + wake_up(&log_full); + + fput(log_filp); + return pending; +} + +static DECLARE_WAIT_QUEUE_HEAD(stop_logger_wait); +static int stop_logger = 0; + +static int logger_thread (void *data) +{ + DECLARE_WAITQUEUE(wait, current); + mm_segment_t oldmm; + + daemonize(); + + oldmm = get_fs(); + set_fs(KERNEL_DS); + printk(KERN_NOTICE "TUX: logger thread started.\n"); + sprintf(current->comm, "TUX logger"); +#if CONFIG_SMP + { + unsigned long mask = log_cpu_mask; + + if (cpu_online_map & mask) + current->cpus_allowed = mask; + } +#endif + + + spin_lock_irq(¤t->sigmask_lock); + siginitsetinv(¤t->blocked, 0); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + if (log_buffer) + TUX_BUG(); + log_buffer = vmalloc(LOG_LEN); + memset(log_buffer, 0, LOG_LEN); + log_head = log_tail = 0; + + current->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; + + add_wait_queue(&log_wait, &wait); + for (;;) { + Dprintk("logger does writeout - stop:%d.\n", stop_logger); + + while (writeout_log() >= SOFT_LIMIT) { + if (stop_logger) + break; + } + if (stop_logger) + break; + /* nothing */; + + Dprintk("logger does sleep - stop:%d.\n", stop_logger); + __set_current_state(TASK_INTERRUPTIBLE); + if (log_head != log_tail) { + __set_current_state(TASK_RUNNING); + continue; + } + schedule_timeout(HZ); + Dprintk("logger back from sleep - stop:%d.\n", stop_logger); + } + remove_wait_queue(&log_wait, &wait); + + vfree(log_buffer); + log_buffer = NULL; + stop_logger = 0; + wake_up(&stop_logger_wait); + + set_fs(oldmm); + + return 0; +} + +void init_log_thread (void) +{ + warn_once = 1; + + logger_pid = kernel_thread(logger_thread, NULL, 0); + if (logger_pid < 0) + TUX_BUG(); +} + +void stop_log_thread (void) +{ + DECLARE_WAITQUEUE(wait, current); + int ret; + + Dprintk("stopping logger thread %d ...\n", logger_pid); + + __set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&stop_logger_wait, &wait); + stop_logger = 1; + wake_up(&log_wait); + schedule(); + __set_current_state(TASK_RUNNING); + remove_wait_queue(&stop_logger_wait, &wait); + + ret = sys_wait4(logger_pid, NULL, __WCLONE, NULL); + if (stop_logger) + TUX_BUG(); + Dprintk("logger thread stopped!\n"); +} diff -rNu linux-2.4.9-ac10/net/tux/main.c linux/net/tux/main.c --- linux-2.4.9-ac10/net/tux/main.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/main.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,1248 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * main.c: main management and initialization routines + */ + +#define __KERNEL_SYSCALLS__ +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +/* + * Threads information. + */ +static int nr_threads; +static atomic_t nr_threads_running = ATOMIC_INIT(0); +static int stop_threads = 0; + +static threadinfo_t threadinfo[CONFIG_TUX_NUMTHREADS]; + +struct nameidata docroot; + +static void flush_all_requests (threadinfo_t *ti); + +void flush_all_signals (void) +{ + spin_lock_irq(¤t->sigmask_lock); + flush_signals(current); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); +} + +int nr_requests_used (void) +{ + int i, nr = 0; + + for (i = 0; i < nr_threads; i++) { + threadinfo_t *ti = threadinfo + i; + nr += ti->nr_requests - ti->nr_free_requests; + } + + return nr; +} +int accept_pending (threadinfo_t *ti) +{ + int j; + + mb(); + for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) { + if (!ti->listen[j].proto) + break; + if (ti->listen[j].sock->sk->tp_pinfo.af_tcp.accept_queue) + return 1; + } + return 0; +} + +int requests_pending (threadinfo_t *ti) +{ + mb(); + if (!list_empty(&ti->work_pending)) + return 1; + return 0; +} + +void reap_kids (void) +{ + int count = 0; + + flush_all_signals(); + __set_task_state(current, TASK_RUNNING); + while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0) + count++; + + Dprintk("reaped %d kids (%p) [signals pending: %08lx].\n", count, __builtin_return_address(0), current->pending.signal.sig[0]); +} + +static int event_loop (threadinfo_t *ti) +{ + tux_req_t *req; + int work_done; + +repeat_accept: + if (ti->thread != current) + TUX_BUG(); + + /* + * Any (relevant) event on the socket will change this + * thread to TASK_RUNNING because we add it to both + * the main listening and the connection request socket + * waitqueues. Thus we can do 'lazy checking' of work + * to be done and schedule away only if the thread is + * still TASK_INTERRUPTIBLE. This makes TUX fully + * event driven. + */ + __set_task_state(current, TASK_INTERRUPTIBLE); + work_done = 0; + if (accept_pending(ti)) + work_done = accept_requests(ti); + + if (requests_pending(ti)) { + work_done = process_requests(ti, &req); + if (req) + goto handle_userspace_req; + } + + /* + * Be nice to other processes: + */ + if (current->need_resched) { + __set_task_state(current, TASK_RUNNING); + schedule(); + goto repeat_accept; + } + + if (ti->userspace_req) + TUX_BUG(); + if (stop_threads) + goto handle_stop; + /* + * Catch possible SIGCHLDs coming from external CGI + * processes. + */ + if (signal_pending(current)) { + reap_kids(); + work_done = 1; + } + /* + * Any signals left? + */ + if (signal_pending(current)) + goto handle_signal; + + if (work_done) + goto repeat_accept; + /* + * Any socket event either on the listen socket + * or on the request sockets will wake us up: + */ + mb(); + if ((current->state != TASK_RUNNING) && + !requests_pending(ti) && !accept_pending(ti)) { + Dprintk("fast thread: no work to be done, sleeping.\n"); + schedule(); + Dprintk("fast thread: back from sleep!\n"); + goto repeat_accept; + } + /* + * Be nice to other processes: + */ + if (!current->need_resched) + goto repeat_accept; + + __set_task_state(current, TASK_RUNNING); + schedule(); + goto repeat_accept; + +handle_userspace_req: + if (!req->attr->tcapi) + TUX_BUG(); + ti->userspace_req = req; + __set_task_state(current, TASK_RUNNING); + return TUX_RETURN_USERSPACE_REQUEST; + +handle_signal: + __set_task_state(current, TASK_RUNNING); + return TUX_RETURN_SIGNAL; + +handle_stop: + __set_task_state(current, TASK_RUNNING); + return TUX_RETURN_EXIT; +} + +static int init_queues (int nr_threads) +{ + int i; + + for (i = 0; i < nr_threads; i++) { + threadinfo_t *ti = threadinfo + i; + + INIT_LIST_HEAD(&ti->all_requests); + + ti->free_requests_lock = SPIN_LOCK_UNLOCKED; + INIT_LIST_HEAD(&ti->free_requests); + + ti->work_lock = SPIN_LOCK_UNLOCKED; + INIT_LIST_HEAD(&ti->work_pending); + INIT_LIST_HEAD(&ti->lru); + + } + return 0; +} + +int tux_chroot (char *dir) +{ + kernel_cap_t saved_cap = current->cap_effective; + mm_segment_t oldmm; + int err; + + /* Allow chroot dir to be in kernel space. */ + oldmm = get_fs(); set_fs(KERNEL_DS); + set_fs(KERNEL_DS); + cap_raise (current->cap_effective, CAP_SYS_CHROOT); + + err = sys_chroot(dir); + if (!err) + sys_chdir("/"); + + current->cap_effective = saved_cap; + set_fs(oldmm); + + return err; +} + +/* + * Right now this is not fully SMP-safe against multiple TUX + * managers. It's just a rudimentary protection against typical + * mistakes. + */ +static int initialized = 0; + +static int user_req_startup (void) +{ + int i, err; + + if (initialized) + return -EINVAL; + initialized = 1; + + /* + * Look up document root: + */ + if (docroot.mnt) + TUX_BUG(); + docroot.mnt = mntget(current->fs->rootmnt); + docroot.dentry = dget(current->fs->root); + docroot.last.len = 0; + docroot.flags = LOOKUP_FOLLOW|LOOKUP_POSITIVE; + + err = path_walk(tux_docroot, &docroot); + if (err) { + docroot.mnt = NULL; + initialized = 0; + printk(KERN_ERR "TUX: could not look up documentroot: \"%s\"\n", + tux_docroot); + return err; + } + + /* + * Start up the logger thread. (which opens the logfile) + */ + init_log_thread(); + + nr_threads = tux_threads; + if (nr_threads < 1) + nr_threads = 1; + if (nr_threads > CONFIG_TUX_NUMTHREADS) + nr_threads = CONFIG_TUX_NUMTHREADS; + tux_threads = nr_threads; + + /* + * Set up per-thread work-queues: + */ + memset(threadinfo, 0, CONFIG_TUX_NUMTHREADS*sizeof(threadinfo_t)); + init_queues(nr_threads); + + /* + * Prepare the worker thread structures. + */ + for (i = 0; i < nr_threads; i++) { + threadinfo_t *ti = threadinfo + i; + ti->cpu = i; + } + + MOD_INC_USE_COUNT; + + return 0; +} + +static DECLARE_WAIT_QUEUE_HEAD(wait_stop); +static DECLARE_WAIT_QUEUE_HEAD(thread_stopped); + +static int user_req_shutdown (void) +{ + DECLARE_WAITQUEUE(wait, current); + int err = -EINVAL; + + lock_kernel(); + if (!initialized) { + Dprintk("TUX is not up - cannot shut down.\n"); + goto err; + } + initialized = 0; + stop_threads = 1; + add_wait_queue(&thread_stopped, &wait); + +wait_more: + /* + * Wake up all the worker threads so they notice + * that we are being stopped. + */ + __set_task_state(current, TASK_UNINTERRUPTIBLE); + if (atomic_read(&nr_threads_running)) { + Dprintk("TUX: shutdown, %d threads still running.\n", + atomic_read(&nr_threads_running)); + wake_up(&wait_stop); + schedule(); + goto wait_more; + } + __set_task_state(current, TASK_RUNNING); + stop_threads = 0; + remove_wait_queue(&thread_stopped, &wait); + + if (nr_async_io_pending()) + TUX_BUG(); + + stop_log_thread(); + path_release(&docroot); + memset(&docroot, 0, sizeof(docroot)); + err = 0; + + flush_dentry_tuxinfo(); + free_mimetypes(); + unregister_all_tuxmodules(); + + MOD_DEC_USE_COUNT; + +err: + unlock_kernel(); + return err; +} + +void drop_permissions (void) +{ + /* Give the new process no privileges.. */ + current->uid = current->euid = + current->suid = current->fsuid = tux_cgi_uid; + current->gid = current->egid = + current->sgid = current->fsgid = tux_cgi_gid; + + current->session = current->pgrp = current->pid; + + current->ngroups = 0; + cap_clear(current->cap_permitted); + cap_clear(current->cap_inheritable); + cap_clear(current->cap_effective); +} + +static int user_req_start_thread (threadinfo_t *ti) +{ + unsigned int err, cpu, mask, i, j, k; + struct k_sigaction *ka; + tux_proto_t *proto; + + err = tux_chroot(tux_docroot); + if (err) { + printk(KERN_ERR "TUX: chroot to docroot returned %d, /proc/sys/net/tux/docroot is probably set up incorrectly! Aborting TUX startup.\n", err); + stop_log_thread(); + return err; + } + + err = start_cachemiss_threads(ti); + if (err) + return err; + cpu = ti->cpu; + mask = 1 << cpu; +#if CONFIG_SMP + if (cpu_online_map & mask) + current->cpus_allowed = mask; +#endif + ti->thread = current; + current->flags |= PF_ATOMICALLOC; + atomic_inc(&nr_threads_running); + + init_waitqueue_entry(&ti->stop, current); + for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) + init_waitqueue_entry(ti->wait_event + j, current); + + ka = current->sig->action + SIGCHLD-1; + ka->sa.sa_handler = SIG_IGN; + + /* Block all signals except SIGKILL, SIGSTOP, SIGHUP and SIGCHLD */ + spin_lock_irq(¤t->sigmask_lock); + siginitsetinv(¤t->blocked, sigmask(SIGKILL) | + sigmask(SIGSTOP)| sigmask(SIGHUP) | sigmask(SIGCHLD)); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + for (k = 0; k < CONFIG_TUX_NUMSOCKETS; k++) { + if (tux_listen[cpu][k] == -1) + break; + for (i = 0; i < cpu; i++) { + for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) { + if (tux_listen[i][j] == -1) + break; + if (tux_listen[i][j] == tux_listen[cpu][k]) { + while (!threadinfo[i].listen[j].proto) { + current->policy |= SCHED_YIELD; + schedule(); + if (threadinfo[i].listen_error) + goto error; + } + ti->listen[k] = threadinfo[i].listen[j]; + ti->listen[k].cloned = 1; + goto out; + } + } + } + + switch (tux_application_protocol) { + case 0: + proto = &tux_proto_http; + break; + case 1: + proto = &tux_proto_ftp; + break; + default: + proto = &tux_proto_http; + } + + ti->listen[k].sock = start_listening(tux_serverport, + tux_listen[cpu][k], proto->defer_accept, cpu); + if (!ti->listen[k].sock) + goto error; + ti->listen[k].cloned = 0; + ti->listen[k].proto = proto; + } +out: + if (!ti->listen[0].proto) + TUX_BUG(); + + add_wait_queue(&wait_stop, &ti->stop); + for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) + if (ti->listen[j].proto) + add_wait_queue_exclusive(ti->listen[j].sock->sk->sleep, + ti->wait_event + j); + drop_permissions(); + + ti->started = 1; + MOD_INC_USE_COUNT; + return 0; + +error: + ti->listen_error = 1; + flush_all_requests(ti); + stop_cachemiss_threads(ti); + + printk(KERN_NOTICE "TUX: could not start worker thread %i.\n", ti->cpu); + /* + * Last thread close the door: + */ + if (atomic_dec_and_test(&nr_threads_running)) + user_req_shutdown(); + + return -EINVAL; +} + +static int flush_idleinput (threadinfo_t * ti) +{ + struct list_head *head, *tmp; + tux_req_t *req; + int count = 0; + + head = &ti->all_requests; + tmp = head->next; + + while (tmp != head) { + req = list_entry(tmp, tux_req_t, all); + tmp = tmp->next; + if (test_bit(0, &req->idle_input)) { + idle_event(req); + count++; + } + } + return count; +} + +static int flush_waitoutput (threadinfo_t * ti) +{ + struct list_head *head, *tmp; + tux_req_t *req; + int count = 0; + + head = &ti->all_requests; + tmp = head->next; + + while (tmp != head) { + req = list_entry(tmp, tux_req_t, all); + tmp = tmp->next; + if (test_bit(0, &req->wait_output_space)) { + output_space_event(req); + count++; + } + } + return count; +} + +static void flush_all_requests (threadinfo_t *ti) +{ + for (;;) { + int count; + + count = flush_idleinput(ti); + count += flush_waitoutput(ti); + count += flush_workqueue(ti); + count += flush_freequeue(ti); + if (!ti->nr_requests) + break; + /* + * Go through again if we advanced: + */ + if (count) + continue; + Dprintk("flush_all_requests: %d requests still waiting.\n", ti->nr_requests); +#if TUX_DEBUG + count = print_all_requests(ti); + Dprintk("flush_all_requests: printed %d requests.\n", count); +#endif + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(HZ/10); + } +} + +int nr_async_io_pending (void) +{ + int i, sum = 0; + + for (i = 0; i < nr_threads; i++) { + threadinfo_t *ti = threadinfo + i; + if (ti->iot) + sum += ti->iot->nr_async_pending; + } + return sum; +} + +static int user_req_stop_thread (threadinfo_t *ti) +{ + int j; + + printk(KERN_NOTICE "TUX: thread %d stopping ...\n", + (int)(ti-threadinfo)); + + if (!ti->started) + TUX_BUG(); + for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) + if (ti->listen[j].proto) + remove_wait_queue(ti->listen[j].sock->sk->sleep, + ti->wait_event + j); + remove_wait_queue(&wait_stop, &ti->stop); + + Dprintk(KERN_NOTICE "TUX: thread %d waiting for sockets to go inactive ...\n", (int)(ti-threadinfo)); + for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) { + if (!ti->listen[j].proto) + break; + if (!ti->listen[j].cloned) { + struct socket *sock; + + sock = ti->listen[j].sock; + while (waitqueue_active(sock->sk->sleep)) { + current->policy |= SCHED_YIELD; + schedule(); + } + ti->listen[j].sock = NULL; + sock_release(sock); + } + } + Dprintk(KERN_NOTICE "TUX: thread %d has all sockets inactive.\n", (int)(ti-threadinfo)); + + flush_all_requests(ti); + stop_cachemiss_threads(ti); + + if (ti->nr_requests) + TUX_BUG(); + ti->started = 0; + mb(); + + printk(KERN_INFO "TUX: thread %i stopped.\n", ti->cpu); + + ti->thread = NULL; + current->tux_info = NULL; + current->tux_exit = NULL; + atomic_dec(&nr_threads_running); + wake_up(&thread_stopped); + + MOD_DEC_USE_COUNT; + + return 0; +} + +#define COPY_INT(u_field, k_field) \ +do { \ + if (__copy_to_user(&u_info->u_field, &req->k_field, \ + sizeof(req->k_field))) \ + return_EFAULT; \ +} while (0) + +#define GETLEN(k_field, maxlen) \ + ((req->k_field##_len < maxlen) ? \ + req->k_field##_len : maxlen-1) + +#define COPY_STR(u_field, k_field, maxlen) \ +do { \ + if (__copy_to_user(u_info->u_field, req->k_field##_str, \ + GETLEN(k_field, maxlen))) \ + return_EFAULT; \ +} while (0) + +#define COPY_COND_STR(u_field,k_field,maxlen) \ +do { \ + if (req->k_field##_len) \ + COPY_STR(u_field, k_field, maxlen); \ + if (__put_user((char)0, u_info->u_field + \ + GETLEN(k_field, maxlen))) \ + return_EFAULT; \ +} while (0) + +static void finish_userspace_req (tux_req_t *req) +{ + threadinfo_t *ti = req->ti; + + ti->userspace_req = NULL; + req->usermode = 0; + req->private = NULL; + req->error = 0; + DEC_STAT(nr_userspace_pending); + flush_request(req, 0); +} + +static void zap_userspace_req (tux_req_t *req) +{ + clear_keepalive(req); + finish_userspace_req(req); +} + +/* + * Fills in the user-space request structure: + */ +static int prepare_userspace_req (threadinfo_t *ti, user_req_t *u_info) +{ + tux_req_t *req = ti->userspace_req; + unsigned int tmp; + int filelen; + int fd; + + Dprintk("prepare_userspace_req(%p).\n", req); + if (!req) + TUX_BUG(); + if (req->error) { + TDprintk("userspace request has error %d.\n", req->error); + return -1; + } + fd = req->fd; + if (fd == -1) { + fd = sock_map_fd(req->sock); + Dprintk("sock_map_fd(%p) :%d.\n", req, fd); + if (fd < 0) { + Dprintk("sock_map_fd() returned %d.\n", fd); + return -EMFILE; + } + req->fd = fd; + } + +#define return_EFAULT do { Dprintk("-EFAULT at %d:%s.\n", __LINE__, __FILE__); return -EFAULT; } while (0) + + if (!access_ok(VERIFY_WRITE, u_info, sizeof(*u_info))) + return_EFAULT; + if (__copy_to_user(&u_info->sock, &fd, sizeof(fd))) + return_EFAULT; + if (!req->attr->tcapi) + TUX_BUG(); + + COPY_INT(module_index, usermodule_idx); + + COPY_COND_STR(query, query, MAX_URI_LEN); + + COPY_INT(event, event); + + filelen = req->filelen; + if (filelen < 0) + filelen = 0; + if (__copy_to_user(&u_info->objectlen, &filelen, sizeof(filelen))) + return_EFAULT; + if ((req->method == METHOD_POST) && !filelen) + if (__copy_to_user(&u_info->objectlen, + &req->content_len, sizeof(filelen))) + return_EFAULT; + if (req->objectname_len) { + if (req->objectname[req->objectname_len]) + TUX_BUG(); + if (__copy_to_user(u_info->objectname, req->objectname, + req->objectname_len + 1)) + return_EFAULT; + } else + if (__put_user((char)0, u_info->objectname)) + return_EFAULT; + + COPY_INT(http_version, version); + COPY_INT(http_method, method); + COPY_INT(keep_alive, keep_alive); + + COPY_INT(cookies_len, cookies_len); + if (req->cookies_len) + COPY_STR(cookies, cookies, MAX_COOKIE_LEN); + if (__put_user((char)0, u_info->cookies + req->cookies_len)) + return_EFAULT; + + if (__copy_to_user(&u_info->id, &req, sizeof(req))) + return_EFAULT; + COPY_INT(priv, private); + COPY_INT(bytes_sent, bytes_sent); + + tmp = tux_client_addr(req); + if (__copy_to_user(&u_info->client_host, &tmp, sizeof(tmp))) + return_EFAULT; + + COPY_COND_STR(content_type, content_type, MAX_FIELD_LEN); + COPY_COND_STR(user_agent, user_agent, MAX_FIELD_LEN); + COPY_COND_STR(accept, accept, MAX_FIELD_LEN); + COPY_COND_STR(accept_charset, accept_charset, MAX_FIELD_LEN); + COPY_COND_STR(accept_encoding, accept_encoding, MAX_FIELD_LEN); + COPY_COND_STR(accept_language, accept_language, MAX_FIELD_LEN); + COPY_COND_STR(cache_control, cache_control, MAX_FIELD_LEN); + COPY_COND_STR(if_modified_since, if_modified_since, MAX_FIELD_LEN); + COPY_COND_STR(negotiate, negotiate, MAX_FIELD_LEN); + COPY_COND_STR(pragma, pragma, MAX_FIELD_LEN); + COPY_COND_STR(referer, referer, MAX_FIELD_LEN); + + return TUX_RETURN_USERSPACE_REQUEST; +} + +#define GOTO_ERR_no_unlock do { Dprintk("sys_tux() ERR at %s:%d.\n", __FILE__, __LINE__); goto err_no_unlock; } while (0) +#define GOTO_ERR_unlock do { Dprintk("sys_tux() ERR at %s:%d.\n", __FILE__, __LINE__); goto err_unlock; } while (0) + +static int register_mimetype(user_req_t *u_info) +{ + char extension[MAX_URI_LEN], mimetype[MAX_URI_LEN]; + char *addr; + int ret; + + ret = strncpy_from_user(extension, u_info->objectname, MAX_URI_LEN); + if (ret <= 0) + GOTO_ERR_no_unlock; + extension[ret] = 0; + Dprintk("got MIME extension: %s.\n", extension); + ret = copy_from_user(&addr, &u_info->object_addr, sizeof(addr)); + if (ret) + GOTO_ERR_no_unlock; + ret = strncpy_from_user(mimetype, addr, MAX_URI_LEN); + if (ret <= 0) + GOTO_ERR_no_unlock; + mimetype[ret] = 0; + Dprintk("got MIME type: %s.\n", mimetype); + add_mimetype(extension, mimetype); + ret = 0; +err_no_unlock: + return ret; +} + +void user_send_buffer (tux_req_t *req, int cachemiss) +{ + int ret; + + + SET_TIMESTAMP(req->output_timestamp); + +repeat: + ret = send_sync_buf(req, req->sock, req->userbuf, req->userlen, MSG_DONTWAIT | MSG_MORE); + switch (ret) { + case -EAGAIN: + add_tux_atom(req, user_send_buffer); + if (add_output_space_event(req, req->sock)) { + del_tux_atom(req); + goto repeat; + } + INC_STAT(user_sendbuf_write_misses); + break; + default: + if (ret <= 0) { + req_err(req); + req->usermode = 0; + req->private = NULL; + add_req_to_workqueue(req); + break; + } + req->userbuf += ret; + req->userlen -= ret; + if ((int)req->userlen < 0) + TUX_BUG(); + if (req->userlen) + goto repeat; + add_req_to_workqueue(req); + break; + } +} + +void user_send_object (tux_req_t *req, int cachemiss) +{ + int ret; + + + SET_TIMESTAMP(req->output_timestamp); + +repeat: + ret = generic_send_file(req, 1, !cachemiss, req->sock); + switch (ret) { + case -5: + add_tux_atom(req, user_send_object); + break; + case -4: + add_tux_atom(req, user_send_object); + if (add_output_space_event(req, req->sock)) { + del_tux_atom(req); + goto repeat; + } + INC_STAT(user_sendobject_write_misses); + break; + case -3: + INC_STAT(user_sendobject_cachemisses); + add_tux_atom(req, user_send_object); + queue_cachemiss(req); + break; + default: + req->in_file.f_pos = 0; + add_req_to_workqueue(req); + break; + } +} + +void user_get_object (tux_req_t *req, int cachemiss) +{ + int missed; + + if (!req->dentry) { + req->usermode = 0; + missed = lookup_url(req, cachemiss ? 0 : LOOKUP_ATOMIC); + if (req->usermode) + TUX_BUG(); + req->usermode = 1; + if ((!missed && !req->dentry) || (missed == 2)) { + req_err(req); + add_req_to_workqueue(req); + return; + } + if (missed) { + if (cachemiss) + TUX_BUG(); + INC_STAT(user_lookup_cachemisses); +fetch_missed: + req->ti->userspace_req = NULL; + DEC_STAT(nr_userspace_pending); + add_tux_atom(req, user_get_object); + queue_cachemiss(req); + return; + } + } + if (tux_fetch_file(req, !cachemiss)) { + INC_STAT(user_fetch_cachemisses); + goto fetch_missed; + } + req->in_file.f_pos = 0; + add_req_to_workqueue(req); +} + +asmlinkage int __sys_tux (unsigned int action, user_req_t *u_info) +{ + int ret = -1; + threadinfo_t *ti; + tux_req_t *req; + + Dprintk("got sys_tux(%d, %p).\n", action, u_info); + + if (action >= MAX_TUX_ACTION) + GOTO_ERR_no_unlock; + + ti = (threadinfo_t *) current->tux_info; + if (ti) + if (ti->thread != current) + TUX_BUG(); + + if (!capable(CAP_SYS_ADMIN) + && (action != TUX_ACTION_CONTINUE_REQ) && + (action != TUX_ACTION_STOPTHREAD)) + goto userspace_actions; + + switch (action) { + case TUX_ACTION_CONTINUE_REQ: + ret = continue_request((int)u_info); + goto out; + + case TUX_ACTION_STARTUP: + lock_kernel(); + ret = user_req_startup(); + unlock_kernel(); + goto out; + + case TUX_ACTION_SHUTDOWN: + lock_kernel(); + ret = user_req_shutdown(); + unlock_kernel(); + goto out; + + case TUX_ACTION_REGISTER_MODULE: + ret = user_register_module(u_info); + goto out; + + case TUX_ACTION_UNREGISTER_MODULE: + ret = user_unregister_module(u_info); + goto out; + + case TUX_ACTION_STARTTHREAD: + { + int nr; + + ret = copy_from_user(&nr, &u_info->thread_nr, + sizeof(int)); + if (ret) + GOTO_ERR_no_unlock; + if (nr >= nr_threads) + GOTO_ERR_no_unlock; + ti = threadinfo + nr; + if (ti->started) + GOTO_ERR_unlock; + current->tux_info = ti; + current->tux_exit = tux_exit; + if (ti->thread) + TUX_BUG(); + Dprintk("TUX: current open files limit for TUX%d: %ld.\n", nr, current->rlim[RLIMIT_NOFILE].rlim_cur); + lock_kernel(); + ret = user_req_start_thread(ti); + unlock_kernel(); + if (ret) { + current->tux_info = NULL; + current->tux_exit = NULL; + } else { + if (ti->thread != current) + TUX_BUG(); + } + goto out_userreq; + } + + case TUX_ACTION_STOPTHREAD: + if (!ti) + GOTO_ERR_no_unlock; + if (!ti->started) + GOTO_ERR_unlock; + req = ti->userspace_req; + if (req) + zap_userspace_req(req); + + lock_kernel(); + ret = user_req_stop_thread(ti); + unlock_kernel(); + goto out_userreq; + + case TUX_ACTION_CURRENT_DATE: + ret = strncpy_from_user(tux_date, u_info->new_date, + DATE_LEN); + if (ret <= 0) + GOTO_ERR_no_unlock; + goto out; + + case TUX_ACTION_REGISTER_MIMETYPE: + ret = register_mimetype(u_info); + if (ret) + GOTO_ERR_no_unlock; + goto out; + + default: + } + +userspace_actions: + + if (!ti) + GOTO_ERR_no_unlock; + + if (!ti->started) + GOTO_ERR_unlock; + + req = ti->userspace_req; + if (!req) { + if (action == TUX_ACTION_EVENTLOOP) + goto eventloop; + GOTO_ERR_unlock; + } + if (!req->usermode) + TUX_BUG(); + + ret = copy_from_user(&req->event, &u_info->event, sizeof(int)); + if (ret) + GOTO_ERR_unlock; + ret = copy_from_user(&req->status, &u_info->http_status, sizeof(int)); + if (ret) + GOTO_ERR_unlock; + ret = copy_from_user(&req->bytes_sent, &u_info->bytes_sent, sizeof(int)); + if (ret) + GOTO_ERR_unlock; + ret = copy_from_user(&req->private, &u_info->priv, sizeof(req->private)); + if (ret) + GOTO_ERR_unlock; + + switch (action) { + + case TUX_ACTION_EVENTLOOP: +eventloop: + req = ti->userspace_req; + if (req) + zap_userspace_req(req); + ret = event_loop(ti); + goto out_userreq; + + /* + * Module forces keepalive off, server will close + * the connection. + */ + case TUX_ACTION_FINISH_CLOSE_REQ: + clear_keepalive(req); + + case TUX_ACTION_FINISH_REQ: + finish_userspace_req(req); + goto eventloop; + + case TUX_ACTION_REDIRECT_REQ: + + ti->userspace_req = NULL; + req->usermode = 0; + req->private = NULL; + req->error = 1; + DEC_STAT(nr_userspace_pending); + add_tux_atom(req, redirect_request); + add_req_to_workqueue(req); + + goto eventloop; + + case TUX_ACTION_POSTPONE_REQ: + + postpone_request(req); + ti->userspace_req = NULL; + ret = TUX_RETURN_USERSPACE_REQUEST; + break; + + case TUX_ACTION_GET_OBJECT: + release_req_dentry(req); + ret = strncpy_from_user(req->objectname, + u_info->objectname, MAX_URI_LEN-1); + if (ret <= 0) { + req->objectname[0] = 0; + req->objectname_len = 0; + GOTO_ERR_unlock; + } + req->objectname[ret] = 0; // string delimit + req->objectname_len = ret; + req->filelen = ret; + + Dprintk("got objectname {%s} (%d) from user-space req %p (req: %p).\n", req->objectname, req->objectname_len, u_info, req); + req->ti->userspace_req = NULL; + DEC_STAT(nr_userspace_pending); + user_get_object(req, 0); + goto eventloop; + + case TUX_ACTION_READ_OBJECT: + { + char *addr; + loff_t ppos = 0; + struct file *filp; + + if (!req->dentry) + GOTO_ERR_unlock; + + ret = copy_from_user(&addr, &u_info->object_addr, + sizeof(addr)); + if (ret) + GOTO_ERR_unlock; + filp = dentry_open(req->dentry, O_RDONLY, 0); + dget(req->dentry); + generic_file_read(filp, addr, req->filelen, &ppos); + fput(filp); + ret = TUX_RETURN_USERSPACE_REQUEST; + break; + } + + case TUX_ACTION_SEND_OBJECT: + if (!req->dentry) + GOTO_ERR_unlock; + req->ti->userspace_req = NULL; + DEC_STAT(nr_userspace_pending); + user_send_object(req, 0); + goto eventloop; + + case TUX_ACTION_SEND_BUFFER: + { + char *addr; + unsigned int len; + + ret = copy_from_user(&addr, + &u_info->object_addr, sizeof(addr)); + if (ret) + GOTO_ERR_unlock; + ret = copy_from_user(&len, + &u_info->objectlen, sizeof(addr)); + if (ret) + GOTO_ERR_unlock; + if ((int)len <= 0) + GOTO_ERR_unlock; + + ret = -EFAULT; + if (!access_ok(VERIFY_READ, addr, len)) + GOTO_ERR_unlock; + req->userbuf = addr; + req->userlen = len; + + req->ti->userspace_req = NULL; + DEC_STAT(nr_userspace_pending); + user_send_buffer(req, 0); + ret = 0; + goto eventloop; + } + + case TUX_ACTION_READ_HEADERS: + { + char *addr; + + ret = copy_from_user(&addr, &u_info->object_addr, + sizeof(addr)); + if (ret) + GOTO_ERR_unlock; + ret = copy_to_user(&u_info->objectlen, + &req->headers_len, sizeof(req->headers_len)); + if (ret) + GOTO_ERR_unlock; + ret = copy_to_user(addr,req->headers, req->headers_len); + if (ret) + GOTO_ERR_unlock; + break; + } + + case TUX_ACTION_READ_POST_DATA: + { + char *addr; + unsigned int size; + + ret = copy_from_user(&addr, &u_info->object_addr, + sizeof(addr)); + if (ret) + GOTO_ERR_unlock; + ret = copy_from_user(&size, &u_info->objectlen, + sizeof(size)); + if (ret) + GOTO_ERR_unlock; + Dprintk("READ_POST_DATA: got %p(%d).\n", addr, size); + if (req->post_data_len < size) + size = req->post_data_len; + Dprintk("READ_POST_DATA: writing %d.\n", size); + ret = copy_to_user(&u_info->objectlen, + &size, sizeof(size)); + if (ret) + GOTO_ERR_unlock; + ret = copy_to_user(addr, req->post_data_str, size); + if (ret) + GOTO_ERR_unlock; + goto out; + } + + default: + GOTO_ERR_unlock; + } + +out_userreq: + req = ti->userspace_req; + if (req) { + ret = prepare_userspace_req(ti, u_info); + if (ret < 0) { + TDprintk("hm, user req %p returned %d, zapping.\n", + req, ret); + zap_userspace_req(req); + goto eventloop; + } + } +out: + Dprintk("sys_tux(%d, %p) returning %d.\n", action, u_info, ret); + while (current->need_resched) { + __set_task_state(current, TASK_RUNNING); + schedule(); + } + return ret; +err_unlock: +err_no_unlock: + Dprintk("sys_tux(%d, %p) returning -EINVAL (ret:%d)!\n", action, u_info, ret); + while (current->need_resched) { + __set_task_state(current, TASK_RUNNING); + schedule(); + } + return -EINVAL; +} + +/* + * This gets called if a TUX thread does an exit(). + */ +void tux_exit (void) +{ + __sys_tux(TUX_ACTION_STOPTHREAD, NULL); +} + +int tux_init(void) +{ + start_sysctl(); + +#if CONFIG_TUX_MODULE + spin_lock(&tux_module_lock); + sys_tux_ptr = __sys_tux; + tux_module = THIS_MODULE; + spin_unlock(&tux_module_lock); +#endif + + return 0; +} + +void tux_cleanup (void) +{ +#if CONFIG_TUX_MODULE + spin_lock(&tux_module_lock); + tux_module = NULL; + sys_tux_ptr = NULL; + spin_unlock(&tux_module_lock); +#endif + + end_sysctl(); +} + +module_init(tux_init) +module_exit(tux_cleanup) + diff -rNu linux-2.4.9-ac10/net/tux/mod.c linux/net/tux/mod.c --- linux-2.4.9-ac10/net/tux/mod.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/mod.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,243 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * mod.c: loading/registering of dynamic TUX modules + */ + +#include +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +spinlock_t tuxmodules_lock = SPIN_LOCK_UNLOCKED; +static LIST_HEAD(tuxmodules_list); + +tcapi_template_t * get_first_usermodule (void) +{ + tcapi_template_t *tcapi; + struct list_head *head, *curr, *next; + + spin_lock(&tuxmodules_lock); + head = &tuxmodules_list; + next = head->next; + + while ((curr = next) != head) { + tcapi = list_entry(curr, tcapi_template_t, modules); + next = curr->next; + if (tcapi->userspace_id) { + spin_unlock(&tuxmodules_lock); + return tcapi; + } + } + spin_unlock(&tuxmodules_lock); + return NULL; +} + +static tcapi_template_t * lookup_module (const char *vfs_name) +{ + tcapi_template_t *tcapi; + struct list_head *head, *curr, *next; + + while (*vfs_name == '/') + vfs_name++; + Dprintk("looking up TUX module {%s}.\n", vfs_name); + head = &tuxmodules_list; + next = head->next; + + while ((curr = next) != head) { + tcapi = list_entry(curr, tcapi_template_t, modules); + next = curr->next; + Dprintk("checking module {%s} == {%s}?\n", vfs_name, tcapi->vfs_name); + if (!strcmp(tcapi->vfs_name, vfs_name)) + return tcapi; + } + return NULL; +} + +/* + * Attempt to load a TUX application module. + * This is the slow path, we cache ('link') the module's + * API vector to the inode. + * The module loading path is serialized, and we handshake + * with the loaded module and fetch its API vector. + */ +tcapi_template_t * lookup_tuxmodule (const char *filename) +{ + tcapi_template_t *tcapi; + + spin_lock(&tuxmodules_lock); + tcapi = lookup_module(filename); + if (!tcapi) + Dprintk("did not find module vfs:{%s}\n", filename); + spin_unlock(&tuxmodules_lock); + return tcapi; +} + + +int register_tuxmodule (tcapi_template_t *tcapi) +{ + int ret = -EEXIST; + + spin_lock(&tuxmodules_lock); + + if (lookup_module(tcapi->vfs_name)) { + Dprintk("module with VFS binding '%s' already registered!\n", + tcapi->vfs_name); + goto out; + } + + list_add(&tcapi->modules, &tuxmodules_list); + ret = 0; + Dprintk("TUX module %s registered.\n", tcapi->vfs_name); +out: + spin_unlock(&tuxmodules_lock); + + return ret; +} + +void unregister_all_tuxmodules (void) +{ + tcapi_template_t *tcapi; + struct list_head *curr; + + spin_lock(&tuxmodules_lock); + while (((curr = tuxmodules_list.next)) != &tuxmodules_list) { + tcapi = list_entry(curr, tcapi_template_t, modules); + list_del(curr); + kfree(tcapi->vfs_name); + kfree(tcapi); + } + spin_unlock(&tuxmodules_lock); +} + +tcapi_template_t * unregister_tuxmodule (char *vfs_name) +{ + tcapi_template_t *tcapi; + int err = 0; + + spin_lock(&tuxmodules_lock); + tcapi = lookup_module(vfs_name); + if (!tcapi) { + Dprintk("huh, module %s not registered??\n", vfs_name); + err = -1; + } else { + list_del(&tcapi->modules); + Dprintk("TUX module %s unregistered.\n", vfs_name); + } + spin_unlock(&tuxmodules_lock); + + return tcapi; +} + +static int check_module_version (user_req_t *u_info) +{ + int major, minor, patch, ret; + + ret = copy_from_user(&major, &u_info->version_major, sizeof(int)); + ret += copy_from_user(&minor, &u_info->version_minor, sizeof(int)); + ret += copy_from_user(&patch, &u_info->version_patch, sizeof(int)); + if (ret) + return -EFAULT; + + if ((major != TUX_MAJOR_VERSION) || (minor > TUX_MINOR_VERSION)) { + + printk(KERN_ERR "TUX: module version %d:%d incompatible with kernel version %d:%d!\n", major, minor, TUX_MAJOR_VERSION, TUX_MINOR_VERSION); + return -EINVAL; + } + return 0; +} + +int user_register_module (user_req_t *u_info) +{ + int idx, len, ret; + tcapi_template_t *tcapi; + char modulename [MAX_MODULENAME_LEN+1]; + + ret = check_module_version(u_info); + if (ret) + return ret; + + Dprintk("register user-module, %p.\n", u_info); + ret = strncpy_from_user(modulename, u_info->modulename, + MAX_MODULENAME_LEN); + if (ret <= 0) + goto out; + modulename[ret] = 0; + Dprintk("... user-module is: {%s}.\n", modulename); + len = strlen(modulename); + if (!len || (len > MAX_MODULENAME_LEN)) + return -EINVAL; + Dprintk("... user-module len is: %d.\n", len); + + ret = copy_from_user(&idx, &u_info->module_index, sizeof(int)); + if (ret || !idx) + goto out; + Dprintk("... user-module index is: %d.\n", idx); + + ret = -ENOMEM; + tcapi = (tcapi_template_t *) kmalloc(sizeof(*tcapi), GFP_KERNEL); + if (!tcapi) + goto out; + memset(tcapi, 0, sizeof(*tcapi)); + + tcapi->vfs_name = (char *) kmalloc(len+1, GFP_KERNEL); + if (!tcapi->vfs_name) { + kfree(tcapi); + goto out; + } + strcpy(tcapi->vfs_name, modulename); + tcapi->userspace_id = idx; + + Dprintk("... registering module {%s}.\n", tcapi->vfs_name); + ret = register_tuxmodule(tcapi); +out: + return ret; +} + +int user_unregister_module (user_req_t *u_info) +{ + int len, ret; + tcapi_template_t *tcapi; + char modulename [MAX_MODULENAME_LEN+1]; + + Dprintk("unregister user-module, %p.\n", u_info); + ret = strncpy_from_user(modulename, u_info->modulename, + MAX_MODULENAME_LEN); + if (ret <= 0) + goto out; + modulename[ret] = 0; + Dprintk("... user-module is: {%s}.\n", modulename); + len = strlen(modulename); + if (!len || (len > MAX_MODULENAME_LEN)) + return -EINVAL; + Dprintk("... user-module len is: %d.\n", len); + + Dprintk("... unregistering module {%s}.\n", modulename); + tcapi = unregister_tuxmodule(modulename); + ret = -EINVAL; + if (tcapi) { + ret = 0; + kfree(tcapi->vfs_name); + kfree(tcapi); + } +out: + return ret; +} + diff -rNu linux-2.4.9-ac10/net/tux/output.c linux/net/tux/output.c --- linux-2.4.9-ac10/net/tux/output.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/output.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,268 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * output.c: Send data to clients + */ + +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +int send_sync_buf (tux_req_t *req, struct socket *sock, const char *buf, const size_t length, unsigned long flags) +{ + struct msghdr msg; + struct iovec iov; + int len, written = 0, left = length; + + msg.msg_name = 0; + msg.msg_namelen = 0; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = flags | MSG_NOSIGNAL; +repeat_send: + msg.msg_iov->iov_len = left; + msg.msg_iov->iov_base = (char *) buf + written; + + len = sock_sendmsg(sock, &msg, left); + + Dprintk("sendmsg ret: %d, written: %d, left: %d.\n", len,written,left); + if ((len == -ERESTARTSYS) || (!(flags & MSG_DONTWAIT) && + (len == -EAGAIN))) { + reap_kids(); + goto repeat_send; + } + if (len > 0) { + written += len; + left -= len; + if (left) + goto repeat_send; + } + if (len >= 0) { + if (written != length) + TUX_BUG(); + if (left) + TUX_BUG(); + } + if (req && (written > 0)) + req->bytes_sent += written; + Dprintk("sendmsg FINAL ret: %d, written: %d, left: %d.\n", len,written,left); + return written ? written : len; +} + +int tux_zerocopy_sendfile = 1; + +typedef struct sock_send_desc +{ + struct socket *sock; + int push; +} sock_send_desc_t; + +int sock_send_actor (read_descriptor_t * desc, struct page *page, + unsigned long offset, unsigned long size) +{ + sock_send_desc_t *sock_desc = (sock_send_desc_t *)desc->buf; + struct socket *sock = sock_desc->sock; + int push = sock_desc->push; + unsigned int flags; + ssize_t written; + + if (desc->count < size) + size = desc->count; + Dprintk("sock_send_actor(), page: %p, offset: %ld, size: %ld, sock: %p, desc->count: %d, desc->written: %d.\n", page, offset, size, sock, desc->count, desc->written); + flags = MSG_DONTWAIT | MSG_NOSIGNAL; + if (!push || !(desc->count == size)) + flags |= MSG_MORE; + if (tux_zerocopy_sendfile && sock->ops->sendpage && + (sock->sk->route_caps&NETIF_F_SG)) { + written = sock->ops->sendpage(sock, page, offset, size, flags); + } else { + struct msghdr msg; + struct iovec iov; + char *kaddr; + mm_segment_t oldmm; + + if (offset+size > PAGE_SIZE) + return -EFAULT; + + kaddr = kmap(page); + + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = flags; + iov.iov_base = kaddr + offset; + iov.iov_len = size; + + oldmm = get_fs(); set_fs(KERNEL_DS); + written = sock_sendmsg(sock, &msg, size); + set_fs(oldmm); + + Dprintk("kaddr: %p, offset: %ld, size: %ld, written: %d.\n", kaddr, offset, size, written); + kunmap(page); + } + if (written < 0) { + desc->error = written; + written = 0; + } + Dprintk("desc->count: %d, desc->written: %d, written: %d.\n", desc->count, desc->written, written); + desc->count -= written; + if (desc->count < 0) + TUX_BUG(); + desc->written += written; + + return written; +} + +/* + * Return 1 if the output space condition went away + * before adding the handler. + */ +int add_output_space_event (tux_req_t *req, struct socket *sock) +{ + struct sock *sk = sock->sk; + /* + * blocked due to socket IO? + */ + spin_lock_irq(&req->ti->work_lock); + add_keepalive_timer(req); + if (test_and_set_bit(0,&req->wait_output_space)) + TUX_BUG(); + INC_STAT(nr_output_space_pending); + + if ((sk->state == TCP_ESTABLISHED) && enough_wspace(sk)) { + if (test_and_clear_bit(0, &req->wait_output_space)) { + DEC_STAT(nr_output_space_pending); + del_keepalive_timer(req); + spin_unlock_irq(&req->ti->work_lock); +// tux_push_pending(sk); + return 1; + } + } + spin_unlock_irq(&req->ti->work_lock); + + return 0; +} + +#define SEND_BLOCKSIZE 8192 + +int generic_send_file (tux_req_t *req, int push, int nonblock, + struct socket *sock) +{ + sock_send_desc_t sock_desc; + int len, want; + +repeat: + Dprintk("generic_send_file(%p,%d,%d,%p) called, f_pos: %d.\n", req, push, nonblock, sock, (int)req->in_file.f_pos); + if (connection_too_fast(req) == 2) { + len = -5; + goto out; + } + if (req->filelen < req->in_file.f_pos) + TUX_BUG(); + + req->desc.written = 0; + want = req->filelen - req->in_file.f_pos; + if (want > SEND_BLOCKSIZE) + want = SEND_BLOCKSIZE; + req->desc.count = want; + sock_desc.sock = sock; + sock_desc.push = push; + req->desc.buf = (char *) &sock_desc; + req->desc.error = 0; + Dprintk("sendfile(), desc.count: %d.\n", req->desc.count); + do_generic_file_read(&req->in_file, &req->in_file.f_pos, &req->desc, sock_send_actor, nonblock); + if (req->desc.written > 0) + req->bytes_sent += req->desc.written; + if (!nonblock && (req->desc.error == -EWOULDBLOCKIO)) + TUX_BUG(); + Dprintk("sendfile() wrote: %d bytes.\n", req->desc.written); + + switch (req->desc.error) { + + case -EWOULDBLOCKIO: + len = -3; + break; + case -EAGAIN: + Dprintk("sk->wmem_queued: %d, sk->sndbuf: %d.\n", + sock->sk->wmem_queued, sock->sk->sndbuf); + len = -4; + break; + default: + len = req->desc.written; +#if CONFIG_TUX_DEBUG + if (req->desc.error) + printk(KERN_ERR "TUX: sendfile() returned error %d (signals pending: %08lx)!\n", req->desc.error, current->pending.signal.sig[0]); +#endif + if (!req->desc.error && (req->filelen > req->in_file.f_pos)) + goto repeat; +#if CONFIG_TUX_DEBUG + if (req->desc.written != want) + printk(KERN_ERR "TUX: sendfile() wrote %d bytes, wanted %d! (pos %d) (signals pending: %08lx).\n", req->desc.written, want, (int)req->in_file.f_pos, current->pending.signal.sig[0]); + else + Dprintk("TUX: sendfile() FINISHED for req %p, wrote %d bytes.\n", req, req->desc.written); + req->bytes_expected = 0; +#endif + req->in_file.f_pos = 0; + break; + } + +out: + Dprintk("sendfile() wrote %d bytes.\n", len); + + return len; +} + +static int file_fetch_actor (read_descriptor_t * desc, struct page *page, + unsigned long offset, unsigned long size) +{ + if (desc->count < size) + size = desc->count; + + desc->count -= size; + desc->written += size; + + return size; +} + +int tux_fetch_file (tux_req_t *req, int nonblock) +{ + int len; + + req->desc.written = 0; + req->desc.count = req->filelen - req->in_file.f_pos; + req->desc.buf = NULL; + req->desc.error = 0; + + do_generic_file_read(&req->in_file, &req->in_file.f_pos, &req->desc, + file_fetch_actor, nonblock); + if (nonblock && (req->desc.error == -EWOULDBLOCKIO)) + return 1; + len = req->desc.written; + if (req->desc.error) + Dprintk("fetchfile() returned %d error!\n", req->desc.error); + Dprintk("fetchfile() fetched %d bytes.\n", len); + return 0; +} + diff -rNu linux-2.4.9-ac10/net/tux/parser.h linux/net/tux/parser.h --- linux-2.4.9-ac10/net/tux/parser.h Thu Jan 1 01:00:00 1970 +++ linux/net/tux/parser.h Mon Sep 10 16:18:08 2001 @@ -0,0 +1,92 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, Ingo Molnar + * + * parser.h: generic parsing routines + */ + +#define get_c(ptr,left) \ +({ \ + unsigned char __ret; \ + \ + if (!left) \ + GOTO_INCOMPLETE; \ + left--; \ + __ret = *((ptr)++); \ + if (!__ret) \ + GOTO_REDIR; \ + __ret; \ +}) + +#define PARSE_TOKEN(ptr,str,left) \ + ({ \ + int __ret; \ + \ + if (!left) \ + GOTO_INCOMPLETE; \ + if (sizeof(str)-1 > left) { \ + if (memcmp(ptr, str, left)) \ + GOTO_REDIR; \ + GOTO_INCOMPLETE; \ + } \ + \ + if (memcmp(ptr, str, sizeof(str)-1)) \ + __ret = 0; \ + else { \ + ptr += sizeof(str)-1; \ + left -= sizeof(str)-1; \ + __ret = 1; \ + } \ + __ret; \ + }) + +#define PARSE_METHOD(req,ptr,name,left) \ + ({ \ + int __ret; \ + \ + if (PARSE_TOKEN(ptr,#name" ",left)) { \ + req->method = METHOD_##name; \ + __ret = 1; \ + } else \ + __ret = 0; \ + __ret; \ + }) + +#define COPY_LINE(ptr,target,left) \ + do { \ + char prev_c = 0, c; \ + while (((c = get_c(ptr,left))) != '\n') \ + *target++ = prev_c = c; \ + if (prev_c != '\r') \ + GOTO_REDIR; \ + } while (0) + +#define COPY_LINE_TOLOWER(ptr,target,left) \ + do { \ + char prev_c = 0, c; \ + while (((c = get_c(ptr,left))) != '\n') { \ + if ((c >= 'A') && (c <= 'Z')) \ + c -= 'A'-'a'; \ + *target++ = prev_c = c; \ + } \ + if (prev_c != '\r') \ + GOTO_REDIR; \ + } while (0) + +#define COPY_FIELD(ptr,target,left) \ + do { \ + char c; \ + while ((c = get_c(ptr,left)) != ' ') \ + *target++ = c; \ + } while (0) + +#define SKIP_LINE(ptr,left) \ + do { \ + char prev_c = 0, c; \ + while (((c = get_c(ptr,left))) != '\n') \ + prev_c = c; \ + if (prev_c != '\r') \ + GOTO_REDIR; \ + } while (0) + diff -rNu linux-2.4.9-ac10/net/tux/postpone.c linux/net/tux/postpone.c --- linux-2.4.9-ac10/net/tux/postpone.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/postpone.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,77 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * postpone.c: postpone/continue userspace requests + */ + +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +void postpone_request (tux_req_t *req) +{ + if (!req->usermode) + TUX_BUG(); + INC_STAT(nr_postpone_pending); + req->postponed = 1; +} + +/* + * Continue a postponed request. The request will show up in the + * userspace queue and will be handled by the fast thread. + * A request can only be postponed in a TUX process, but can be + * continued from any process that has access to the socket file + * descriptor. + */ +int continue_request (int fd) +{ + threadinfo_t *ti; + struct socket *sock; + tux_req_t *req; + int err; + + sock = sockfd_lookup(fd, &err); + if (!sock || !sock->sk) + goto out; + req = sock->sk->user_data; + + err = -EINVAL; + if (!req) + goto out_put; + ti = req->ti; + if (!req->postponed) + goto out_unlock_put; + if (!req->usermode) + TUX_BUG(); + + req->postponed = 0; + DEC_STAT(nr_postpone_pending); + + Dprintk("continuing postponed req %p.\n", req); + add_req_to_workqueue(req); + +out_unlock_put: + err = 0; +out_put: + fput(sock->file); +out: + return err; +} + diff -rNu linux-2.4.9-ac10/net/tux/proc.c linux/net/tux/proc.c --- linux-2.4.9-ac10/net/tux/proc.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/proc.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,742 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * proc.c: /proc/sys/tux handling + */ + +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +char tux_docroot[200] = "/var/www/tux/"; +char tux_logfile[200] = "/var/log/tux"; +char tux_cgiroot[200] = "/var/www/tux/cgiroot/"; +char tux_404_page[200] = "404.html"; +char tux_default_vhost[200] = "default"; + +int tux_cgi_uid = -1; +int tux_cgi_gid = -1; +int tux_clientport = 8080; +int tux_logging = 0; +int tux_serverport= 80; +int tux_threads = 2; +int tux_max_connect = 10000; +int tux_max_keepalives = 10000; +int tux_max_backlog = 2048; +int tux_keepalive_timeout = 0; +int tux_max_output_bandwidth = 0; +int tux_defer_accept = 1; +int tux_mode_forbidden = 0 /*S_IXUGO*/; /* do not allow executable (CGI) files */ +int tux_mode_allowed = S_IROTH; /* allow access if read-other is set */ +int multifragment_api = 1; +int pagecache_special = 0; +int virtual_server = 0; +int tux_max_object_size = 0; +unsigned int log_cpu_mask = ~0; +int tux_compression = 0; +int tux_noid = 0; +int tux_cgi_inherit_cpu = 0; +unsigned int tux_cgi_cpu_mask = ~0; +int tux_zerocopy_header = 1; +unsigned int tux_max_free_requests = 1000; +int tux_all_userspace = 0; +int tux_redirect_logging = 1; +unsigned int tux_max_header_len = 3000; +int tux_application_protocol = 0; +int tux_referer_logging = 0; +int tux_generate_etags = 1; +int tux_ip_logging = 1; +int tux_ftp_wait_close = 1; + +static struct ctl_table_header *tux_table_header; + +static ctl_table tux_table[] = { + { NET_TUX_DOCROOT, + "documentroot", + &tux_docroot, + sizeof(tux_docroot), + 0644, + NULL, + proc_dostring, + &sysctl_string, + NULL, + NULL, + NULL + }, + { NET_TUX_LOGFILE, + "logfile", + &tux_logfile, + sizeof(tux_logfile), + 0644, + NULL, + proc_dostring, + &sysctl_string, + NULL, + NULL, + NULL + }, + { NET_TUX_THREADS, + "threads", + &tux_threads, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_KEEPALIVE_TIMEOUT, + "keepalive_timeout", + &tux_keepalive_timeout, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_MAX_KEEPALIVE_BW, + "max_output_bandwidth", + &tux_max_output_bandwidth, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_DEFER_ACCEPT, + "defer_accept", + &tux_defer_accept, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_MAX_BACKLOG, + "max_backlog", + &tux_max_backlog, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_MAX_CONNECT, + "max_connect", + &tux_max_connect, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_MAX_KEEPALIVES, + "max_keepalives", + &tux_max_keepalives, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_MODE_FORBIDDEN, + "mode_forbidden", + &tux_mode_forbidden, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_MODE_ALLOWED, + "mode_allowed", + &tux_mode_allowed, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_CGI_UID, + "cgi_uid", + &tux_cgi_uid, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_CGI_GID, + "cgi_gid", + &tux_cgi_gid, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_CGIROOT, + "cgiroot", + &tux_cgiroot, + sizeof(tux_cgiroot), + 0644, + NULL, + proc_dostring, + &sysctl_string, + NULL, + NULL, + NULL + }, + { NET_TUX_404_PAGE, + "404_page", + &tux_404_page, + sizeof(tux_404_page), + 0644, + NULL, + proc_dostring, + &sysctl_string, + NULL, + NULL, + NULL + }, + { NET_TUX_404_PAGE, + "default_vhost", + &tux_default_vhost, + sizeof(tux_default_vhost), + 0644, + NULL, + proc_dostring, + &sysctl_string, + NULL, + NULL, + NULL + }, + { NET_TUX_CLIENTPORT, + "clientport", + &tux_clientport, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_CLIENTPORT, + "generate_etags", + &tux_generate_etags, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_CLIENTPORT, + "ip_logging", + &tux_ip_logging, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_CLIENTPORT, + "ftp_wait_close", + &tux_ftp_wait_close, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, +#if CONFIG_TUX_DEBUG + { NET_TUX_LOGGING, + "Dprintk", + &tux_Dprintk, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, +#endif + { NET_TUX_LOGGING, + "logging", + &tux_logging, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_SERVERPORT, + "serverport", + &tux_serverport, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_LOGENTRY_ALIGN_ORDER, + "logentry_align_order", + &tux_logentry_align_order, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_NONAGLE, + "nonagle", + &tux_nonagle, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_ACK_PINGPONG, + "ack_pingpong", + &tux_ack_pingpong, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_PUSH_ALL, + "push_all", + &tux_push_all, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_ZEROCOPY_PARSE, + "zerocopy_parse", + &tux_zerocopy_parse, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_VIRTUAL_SERVER, + "virtual_server", + &virtual_server, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_MAX_OBJECT_SIZE, + "max_object_size", + &tux_max_object_size, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_COMPRESSION, + "compression", + &tux_compression, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_NOID, + "noid", + &tux_noid, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_CGI_INHERIT_CPU, + "cgi_inherit_cpu", + &tux_cgi_inherit_cpu, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_CGI_CPU_MASK, + "cgi_cpu_mask", + &tux_cgi_cpu_mask, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_ZEROCOPY_HEADER, + "zerocopy_header", + &tux_zerocopy_header, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_ZEROCOPY_SENDFILE, + "zerocopy_sendfile", + &tux_zerocopy_sendfile, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_MAX_FREE_REQUESTS, + "max_free_requests", + &tux_max_free_requests, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_ALL_USERSPACE, + "all_userspace", + &tux_all_userspace, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_REDIRECT_LOGGING, + "redirect_logging", + &tux_redirect_logging, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_REFERER_LOGGING, + "referer_logging", + &tux_referer_logging, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_MAX_HEADER_LEN, + "max_header_len", + &tux_max_header_len, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + { NET_TUX_APPLICATION_PROTOCOL, + "application_protocol", + &tux_application_protocol, + sizeof(int), + 0644, + NULL, + proc_dointvec, + &sysctl_intvec, + NULL, + NULL, + NULL + }, + {0,0,0,0,0,0,0,0,0,0,0} }; + + +static ctl_table tux_dir_table[] = { + {NET_TUX, "tux", NULL, 0, 0555, tux_table,0,0,0,0,0}, + {0,0,0,0,0,0,0,0,0,0,0} +}; + +static ctl_table tux_root_table[] = { + {CTL_NET, "net", NULL, 0, 0555, tux_dir_table,0,0,0,0,0}, + {0,0,0,0,0,0,0,0,0,0,0} +}; + + +static struct proc_dir_entry * root_tux_dir; +static struct proc_dir_entry * log_cpu_mask_entry; +static struct proc_dir_entry * tux_dir [CONFIG_TUX_NUMTHREADS]; +static struct proc_dir_entry * listen_dir [CONFIG_TUX_NUMTHREADS]; +static struct proc_dir_entry * listen_entries [CONFIG_TUX_NUMTHREADS][CONFIG_TUX_NUMSOCKETS]; + +unsigned int tux_listen [CONFIG_TUX_NUMTHREADS][CONFIG_TUX_NUMSOCKETS] = + { [0 ... CONFIG_TUX_NUMTHREADS-1] = { 0, [1 ... CONFIG_TUX_NUMSOCKETS-1] = -1 } }; + +#define HEX_DIGITS 8 + +static int hex_read_proc (char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + if (count < HEX_DIGITS+1) + return -EINVAL; + return sprintf (page, "%08x\n", *(unsigned int *)data); +} + +static int hex_write_proc (struct file *file, const char *buffer, + unsigned long count, void *data) +{ + unsigned char hexnum [HEX_DIGITS]; + unsigned int new_value; + int i, full_count = count; + + if (!count) + return -EINVAL; + if (count > HEX_DIGITS) + count = HEX_DIGITS; + if (copy_from_user(hexnum, buffer, count)) + return -EFAULT; + + /* + * Parse the first 8 characters as a hex string, any non-hex char + * is end-of-string. '00e1', 'e1', '00E1', 'E1' are the same. + */ + new_value = 0; + + for (i = 0; i < count; i++) { + unsigned int c = hexnum[i]; + + switch (c) { + case '0' ... '9': c -= '0'; break; + case 'a' ... 'f': c -= 'a'-10; break; + case 'A' ... 'F': c -= 'A'-10; break; + default: + goto out; + } + new_value = (new_value << 4) | c; + } +out: + *(int *)data = new_value; + + return full_count; +} + +#define MAX_NAMELEN 10 + +static void register_tux_proc (unsigned int nr) +{ + struct proc_dir_entry *entry; + char name [MAX_NAMELEN]; + int i; + + if (!root_tux_dir) + TUX_BUG(); + + sprintf(name, "%d", nr); + + /* create /proc/net/tux/1234/ */ + tux_dir[nr] = proc_mkdir(name, root_tux_dir); + + /* create /proc/net/tux/1234/listen/ */ + listen_dir[nr] = proc_mkdir("listen", tux_dir[nr]); + + /* create /proc/net/tux/1234/listen/ */ + for (i = 0; i < CONFIG_TUX_NUMSOCKETS; i++) { + sprintf(name, "%d", i); + entry = create_proc_entry(name, 0700, listen_dir[nr]); + + entry->nlink = 1; + entry->data = (void *)&tux_listen[nr][i]; + entry->read_proc = hex_read_proc; + entry->write_proc = hex_write_proc; + + listen_entries[nr][i] = entry; + } +} + +static void unregister_tux_proc (unsigned int nr) +{ + int i; + + for (i = 0; i < CONFIG_TUX_NUMSOCKETS; i++) + remove_proc_entry(listen_entries[nr][i]->name, listen_dir[nr]); + + remove_proc_entry(listen_dir[nr]->name, tux_dir[nr]); + + remove_proc_entry(tux_dir[nr]->name, root_tux_dir); +} + +static void cleanup_tux_proc (void) +{ + int i; + + Dprintk("cleaning up /proc/net/tux/\n"); + + for (i = 0; i < CONFIG_TUX_NUMTHREADS; i++) + unregister_tux_proc(i); + remove_proc_entry(log_cpu_mask_entry->name, root_tux_dir); + remove_proc_entry(root_tux_dir->name, proc_net); +} + +static void init_tux_proc (void) +{ + struct proc_dir_entry *entry; + int i; + + if (root_tux_dir) + return; + + /* create /proc/net/tux */ + root_tux_dir = proc_mkdir("tux", proc_net); + + entry = create_proc_entry("log_cpu_mask", 0700, root_tux_dir); + + entry->nlink = 1; + entry->data = (void *)&log_cpu_mask; + entry->read_proc = hex_read_proc; + entry->write_proc = hex_write_proc; + + log_cpu_mask_entry = entry; + + /* + * Create entries for all existing threads. + */ + for (i = 0; i < CONFIG_TUX_NUMTHREADS; i++) + register_tux_proc(i); +} + +void start_sysctl(void) +{ + init_tux_proc(); + tux_table_header = register_sysctl_table(tux_root_table,1); +} + +void end_sysctl(void) +{ + cleanup_tux_proc(); + unregister_sysctl_table(tux_table_header); +} diff -rNu linux-2.4.9-ac10/net/tux/proto_ftp.c linux/net/tux/proto_ftp.c --- linux-2.4.9-ac10/net/tux/proto_ftp.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/proto_ftp.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,1337 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * ftp_proto.c: FTP application protocol support + */ + +#define __KERNEL_SYSCALLS__ +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +#define HELLO "220 Linux TUX FTP server welcomes you!\r\n" +#define WRITE_DONE "226 Transfer complete.\r\n" +#define BAD_FILENAME "550 No such file or directory.\r\n" +#define GOOD_DIR "250 CWD command successful.\r\n" +#define WRITE_FILE "150 Opening BINARY mode data connection.\r\n" +#define LIST_ERR "503 LIST without PORT! Closing connection.\r\n" +#define WRITE_LIST "150 Opening ASCII mode data connection.\r\n" +#define RETR_ERR "503 RETR without PORT! Closing connection.\r\n" +#define PORT_OK "200 PORT command successful.\r\n" +#define LOGIN_OK "230-There are currently %d users logged in, out of %d maximum.\r\n230 TUX Guest login ok.\r\n" +#define LOGIN_OK_ONE "230-There is currently 1 user logged in, out of %d maximum.\r\n230 TUX Guest login ok.\r\n" +#define LOGIN_FORBIDDEN "530 Sorry, Login Denied!\r\n" +#define TYPE_OK "200 Type set to I.\r\n" +#define BYE "221 Thank You for using TUX!\r\n" +#define NOT_IMPLEMENTED "502 Command not implemented.\r\n" +#define CLOSE_2 "221 Cannot handle request, closing connection!\r\n" +#define CLOSE "500 Command not understood.\r\n" +#define CLOSE_TIMEOUT "421 Timeout, closing connection!\r\n" +#define LINUX_SYST "215 UNIX Type: L8, Linux 2.4 TUX/FTP 2.0 Server\r\n" +#define NO_EXTRA_FEATURES \ + "211 No Extra Features\r\n" +#define COMMAND_OK "200 Command OK.\r\n" +#define WRITE_ABORTED "426 Transfer aborted. Data connection closed.\r\n" +#define SITE "214 No SITE commands are recognized.\r\n" + +static void ftp_got_request (tux_req_t *req) +{ + add_tux_atom(req, parse_request); + __send_async_message(req, HELLO, 220, 1); +} + +#define GOTO_ERR { TDprintk("FTP protocol error at: %s:%d\n", \ + __FILE__, __LINE__); goto error; } + +static void zap_data_socket (tux_req_t *req) +{ + if (req->ftp_data_sock) { + Dprintk("zapping req %p's data socket %p.\n", + req, req->ftp_data_sock); + unlink_tux_ftp_data_socket(req); + sock_release(req->ftp_data_sock); + req->ftp_data_sock = NULL; + } +} +static void ftp_execute_command (tux_req_t *req, int cachemiss); + +static int parse_ftp_message (tux_req_t *req, const int total_len) +{ + int comm, comm1 = 0, comm2 = 0, comm3 = 0, comm4 = 0; + int newline_pos, i; + char *mess, *curr; + + curr = mess = req->headers; + + Dprintk("FTP parser got %d bytes: --->{%s}<---\n", total_len, curr); + + newline_pos = -1; + for (i = 0; i < total_len; i++, curr++) { + if (!*curr) + GOTO_ERR; + if (!(*curr == '\r') || !(*(curr+1) == '\n')) + continue; + newline_pos = i; + break; + } + Dprintk("Newline pos: %d\n", newline_pos); + if (newline_pos == -1) { + Dprintk("incomplete mess on req %p!\n", req); + return 0; + } + if (newline_pos < 3) + GOTO_ERR; + +#define STRING_VAL(c1,c2,c3,c4) \ + ((c1) + ((c2) << 8) + ((c3) << 16) + ((c4) << 24)) +#define STRING_VAL_STR(str) \ + STRING_VAL(str[0], str[1], str[2], str[3]) + + Dprintk("string val (%c%c%c%c): %08x\n", + mess[0], mess[1], mess[2], mess[3], + STRING_VAL_STR(mess)); + +#define PARSE_FTP_COMM(c1,c2,c3,c4,name,num) \ + if (STRING_VAL_STR(mess) == STRING_VAL(c1,c2,c3,c4)) \ + { \ + Dprintk("parsed "#name".\n"); \ + comm##num = FTP_COMM_##name; \ + } + + PARSE_FTP_COMM('P','A','S','S', PASS,1); + PARSE_FTP_COMM('A','C','C','T', ACCT,2); + PARSE_FTP_COMM('C','D','U','P', CDUP,3); + PARSE_FTP_COMM('S','M','N','T', SMNT,4); + PARSE_FTP_COMM('Q','U','I','T', QUIT,1); + PARSE_FTP_COMM('R','E','I','N', REIN,2); + PARSE_FTP_COMM('P','A','S','V', PASV,3); + PARSE_FTP_COMM('S','T','R','U', STRU,4); + PARSE_FTP_COMM('M','O','D','E', MODE,1); + PARSE_FTP_COMM('S','T','O','R', STOR,2); + PARSE_FTP_COMM('S','T','O','U', STOU,3); + PARSE_FTP_COMM('A','P','P','E', APPE,4); + PARSE_FTP_COMM('A','L','L','O', ALLO,1); + PARSE_FTP_COMM('R','N','F','R', RNFR,2); + PARSE_FTP_COMM('R','N','T','O', RNTO,3); + PARSE_FTP_COMM('A','B','O','R', ABOR,4); + PARSE_FTP_COMM('D','E','L','E', DELE,1); + PARSE_FTP_COMM('R','M','D',' ', RMD, 2); + PARSE_FTP_COMM('M','K','D',' ', MKD, 3); + PARSE_FTP_COMM('P','W','D',' ', PWD, 4); + PARSE_FTP_COMM('S','Y','S','T', SYST,2); + PARSE_FTP_COMM('N','O','O','P', NOOP,3); + PARSE_FTP_COMM('F','E','A','T', FEAT,4); + + comm = comm1 | comm2 | comm3 | comm4; + + if (comm) { + if (newline_pos != 4) + GOTO_ERR; + req->ftp_command = comm; + goto out; + } + + switch (STRING_VAL(mess[0], mess[1], mess[2], mess[3])) { + +#define PARSE_FTP_COMM_3CHAR(c1,c2,c3,name) \ + case STRING_VAL(c1,c2,c3,'\r'): \ + { \ + Dprintk("parsed "#name".\n"); \ + req->ftp_command = FTP_COMM_##name; \ + if (newline_pos != 3) \ + GOTO_ERR; \ + } + +#define PARSE_FTP_3CHAR_COMM_IGNORE(c1,c2,c3,name) \ + case STRING_VAL(c1,c2,c3,' '): \ + { \ + Dprintk("parsed "#name".\n"); \ + req->ftp_command = FTP_COMM_##name; \ + } + +#define PARSE_FTP_COMM_IGNORE(c1,c2,c3,c4,name) \ + case STRING_VAL(c1,c2,c3,c4): \ + { \ + Dprintk("parsed "#name".\n"); \ + req->ftp_command = FTP_COMM_##name; \ + } + +#define PARSE_FTP_3CHAR_COMM_1_FIELD(c1,c2,c3,name,field,field_len,max) \ + case STRING_VAL(c1,c2,c3,' '): \ + { \ + Dprintk("parsed "#name".\n"); \ + req->ftp_command = FTP_COMM_##name; \ + if (newline_pos == 4) \ + GOTO_ERR; \ + if (newline_pos >= 5) { \ + curr = mess + 3; \ + if (*curr++ != ' ') \ + GOTO_ERR; \ + *(field_len) = newline_pos-4; \ + if (*(field_len) >= max) \ + GOTO_ERR; \ + memcpy(field, curr, *(field_len)); \ + (field)[*(field_len)] = 0; \ + } \ + } + +#define PARSE_FTP_COMM_1_FIELD(c1,c2,c3,c4,name,field,field_len,max) \ + case STRING_VAL(c1,c2,c3,c4): \ + { \ + Dprintk("parsed "#name".\n"); \ + req->ftp_command = FTP_COMM_##name; \ + if (newline_pos < 5) \ + GOTO_ERR; \ + curr = mess + 4; \ + if (*curr++ != ' ') \ + GOTO_ERR; \ + *(field_len) = newline_pos-5; \ + if (*(field_len) >= max) \ + GOTO_ERR; \ + memcpy(field, curr, *(field_len)); \ + (field)[*(field_len)] = 0; \ + } + PARSE_FTP_COMM_1_FIELD('U','S','E','R', USER, + req->username, &req->username_len, + MAX_USERNAME_LEN-1); + if (!req->username_len) + GOTO_ERR; + break; + + PARSE_FTP_3CHAR_COMM_1_FIELD('C','W','D', CWD, + req->objectname, &req->objectname_len, + MAX_OBJECTNAME_LEN-1); + if (!req->objectname_len) + GOTO_ERR; + break; + + PARSE_FTP_COMM_3CHAR('P','W','D', PWD); break; + + { + char type[3]; + unsigned int type_len; + + PARSE_FTP_COMM_1_FIELD('T','Y','P','E', TYPE, + type, &type_len, 2); + if (!type_len) + GOTO_ERR; + if ((type[0] != 'I') && (type[0] != 'A')) + GOTO_ERR; + } + break; + + PARSE_FTP_COMM_1_FIELD('R','E','T','R', RETR, + req->objectname, &req->objectname_len, + MAX_OBJECTNAME_LEN-1); + if (!req->objectname_len) + req->ftp_command = FTP_COMM_NONE; +// GOTO_ERR; + break; + + PARSE_FTP_COMM_IGNORE('S','T','A','T', STAT); + break; + + PARSE_FTP_COMM_IGNORE('S','I','T','E', SITE); + break; + + PARSE_FTP_COMM_IGNORE('L','I','S','T', LIST); + break; + + PARSE_FTP_COMM_IGNORE('N','L','S','T', NLST); + break; + + PARSE_FTP_COMM_IGNORE('H','E','L','P', HELP); + break; + + PARSE_FTP_COMM_IGNORE('C','L','N','T', CLNT); + break; + +#define IS_NUM(n) (((n) >= '0') && ((n) <= '9')) + +#define GET_DIGIT(curr,n) \ + n += (*curr) - '0'; \ + curr++; \ + if (IS_NUM(*curr)) { \ + n *= 10; + +#define PARSE_PORTNUM(curr,n) \ +do { \ + Dprintk("PORT NUM parser:--->{%s}<---\n", curr);\ + if (!IS_NUM(*curr)) \ + GOTO_ERR; \ + n = 0; \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + }}} \ + if (n > 255) \ + GOTO_ERR; \ + Dprintk("PORT NUM parser:--->{%s}<---\n", curr);\ + Dprintk("PORT NUM parser parsed %d.\n", n); \ +} while (0) + +#define PARSE_NUM(curr,n) \ +do { \ + Dprintk("NUM parser:--->{%s}<---\n", curr); \ + if (!IS_NUM(*curr)) \ + GOTO_ERR; \ + n = 0; \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + GET_DIGIT(curr,n); \ + }}}}}}}}}} \ + Dprintk("NUM parser:--->{%s}<---\n", curr); \ + Dprintk("NUM parser parsed %d.\n", n); \ +} while (0) + + case STRING_VAL('P','O','R','T'): + { + unsigned int h1, h2, h3, h4, p1, p2; + if (req->ftp_data_sock) + zap_data_socket(req); + /* + * Minimum size: "PORT 0,0,0,0,0,0", 16 bytes. + */ + if (newline_pos < 16) + GOTO_ERR; + Dprintk("parsed PORT.\n"); + if (req->ftp_data_sock) + GOTO_ERR; + curr = mess + 4; + if (*curr++ != ' ') + GOTO_ERR; + PARSE_PORTNUM(curr,h1); + if (*curr++ != ',') + GOTO_ERR; + PARSE_PORTNUM(curr,h2); + if (*curr++ != ',') + GOTO_ERR; + PARSE_PORTNUM(curr,h3); + if (*curr++ != ',') + GOTO_ERR; + PARSE_PORTNUM(curr,h4); + if (*curr++ != ',') + GOTO_ERR; + PARSE_PORTNUM(curr,p1); + if (*curr++ != ',') + GOTO_ERR; + PARSE_PORTNUM(curr,p2); + if (curr-mess != newline_pos) + GOTO_ERR; + req->ftp_command = FTP_COMM_PORT; + req->ftp_user_addr = (h1<<24) + (h2<<16) + (h3<<8) + h4; + req->ftp_user_port = (p1<<8) + p2; + Dprintk("FTP PORT got: %d.%d.%d.%d:%d.\n", + h1, h2, h3, h4, req->ftp_user_port); + Dprintk("FTP user-addr: %08x (htonl: %08x), socket: %08x.\n", + req->ftp_user_addr, htonl(req->ftp_user_addr), + req->sock->sk->daddr); + /* + * Do not allow arbitrary redirection + * of connections ... + */ + if (req->sock->sk->daddr != htonl(req->ftp_user_addr)) + GOTO_ERR; + + break; + } + case STRING_VAL('R','E','S','T'): + { + unsigned int offset; + + /* + * Minimum size: "REST 0", 6 bytes. + */ + if (newline_pos < 6) + GOTO_ERR; + Dprintk("parsed REST.\n"); + curr = mess + 4; + if (*curr++ != ' ') + GOTO_ERR; + PARSE_NUM(curr,offset); + if (curr-mess != newline_pos) + GOTO_ERR; + req->ftp_command = FTP_COMM_REST; + req->ftp_offset = offset; + Dprintk("FTP REST got: %d bytes offset.\n", offset); + + break; + } + default: + req->ftp_command = FTP_COMM_NONE; + break; + } + +out: + req->parsed_len = newline_pos + 2; + add_tux_atom(req, ftp_execute_command); + + req->lookup_dir = 1; // disable index.html auto-transfer + + return req->parsed_len; +error: + clear_keepalive(req); + TDprintk("rejecting FTP session!\n"); + TDprintk("mess :--->{%s}<---\n", mess); + TDprintk("mess left:--->{%s}<---\n", curr); + req_err(req); + return -1; +} + +#define data_sock_err(req) \ + ((req)->ftp_data_sock && (req)->ftp_data_sock->sk && \ + ((req)->ftp_data_sock->sk->state >= TCP_FIN_WAIT1)) + +static void ftp_wait_close (tux_req_t *req, int cachemiss); + +void ftp_send_file (tux_req_t *req, int cachemiss) +{ + int ret; + + + SET_TIMESTAMP(req->output_timestamp); + +repeat: + if (req->error || data_sock_err(req)) { +#if CONFIG_TUX_DEBUG + req->bytes_expected = 0; +#endif + req->in_file.f_pos = 0; + TDprintk("zapping, data sock state: %d\n", + req->ftp_data_sock->sk->state); + /* + * We are in the middle of a file transfer, + * zap it immediately: + */ + req->error = 3; + zap_request(req, cachemiss); + return; + } + if (req->sock->sk->tp_pinfo.af_tcp.urg_data) { + req->in_file.f_pos = 0; + zap_data_socket(req); + __send_async_message(req, WRITE_ABORTED, 426, 1); + return; + } + + ret = generic_send_file(req, 1, !cachemiss, req->ftp_data_sock); + switch (ret) { + case -5: + add_tux_atom(req, ftp_send_file); + output_timeout(req); + break; + case -4: + add_tux_atom(req, ftp_send_file); + if (add_output_space_event(req, req->ftp_data_sock)) { + del_tux_atom(req); + goto repeat; + } + break; + case -3: + add_tux_atom(req, ftp_send_file); + queue_cachemiss(req); + break; + default: + req->in_file.f_pos = 0; + Dprintk("FTP send file req %p finished!\n", req); + if (!tux_ftp_wait_close) { + zap_data_socket(req); + __send_async_message(req, WRITE_DONE, 200, 1); + } else { + tcp_shutdown(req->ftp_data_sock->sk, SEND_SHUTDOWN); + add_tux_atom(req, ftp_wait_close); + add_req_to_workqueue(req); + } + break; + } +} + +void ftp_create_host (tux_req_t *req) +{ +#define IP(n) ((unsigned char *)& req->sock->sk->rcv_saddr)[n] + + req->host_len = sprintf(req->host, "%d.%d.%d.%d", + IP(0), IP(1), IP(2), IP(3)); + Dprintk("created FTP virtual hostname: {%s}\n", req->host); + +#undef IP +} + +static void ftp_wait_accept (tux_req_t *req, int cachemiss) +{ + struct sock *sk = req->ftp_data_sock->sk; + + Dprintk("ftp_wait_accept: data socket state %d.\n", sk->state); + + if (sk->state != TCP_ESTABLISHED) { + spin_lock_irq(&req->ti->work_lock); + add_keepalive_timer(req); + if (test_and_set_bit(0, &req->idle_input)) + TUX_BUG(); + spin_unlock_irq(&req->ti->work_lock); + if ((sk->state != TCP_ESTABLISHED) && (sk->state <= TCP_SYN_RECV) && !sk->err) { + add_tux_atom(req, ftp_wait_accept); + return; + } + unidle_req(req); + } + add_tux_atom(req, ftp_send_file); + add_req_to_workqueue(req); +} + +static void ftp_wait_close (tux_req_t *req, int cachemiss) +{ + struct sock *sk = req->ftp_data_sock->sk; + + Dprintk("ftp_wait_close: data socket state %d.\n", sk->state); + + if (sk->state <= TCP_FIN_WAIT2) { + spin_lock_irq(&req->ti->work_lock); + add_keepalive_timer(req); + if (test_and_set_bit(0, &req->idle_input)) + TUX_BUG(); + spin_unlock_irq(&req->ti->work_lock); + if ((sk->state <= TCP_FIN_WAIT2) && !sk->err) { + add_tux_atom(req, ftp_wait_close); + return; + } + unidle_req(req); + } + zap_data_socket(req); + __send_async_message(req, WRITE_DONE, 200, 1); +} + +void ftp_get_file (tux_req_t *req, int cachemiss) +{ + int missed; + + if (!req->dentry) { + if (virtual_server) + ftp_create_host(req); + missed = lookup_url(req, cachemiss ? 0 : LOOKUP_ATOMIC); + if ((!missed && !req->dentry) || (missed == 2)) { + __send_async_message(req, BAD_FILENAME, 200, 1); + return; + } + if (missed) { + if (cachemiss) + TUX_BUG(); + add_tux_atom(req, ftp_get_file); + queue_cachemiss(req); + return; + } + } + req->in_file.f_pos = 0; + if (req->ftp_offset <= req->filelen) + req->in_file.f_pos = req->ftp_offset; + req->ftp_offset = 0; + Dprintk("ftp_send_file %p, f_pos: %d.\n", req, (int)req->in_file.f_pos); + add_tux_atom(req, ftp_send_file); + __send_async_message(req, WRITE_FILE, 200, 1); +} + +static void __exchange_sockets (tux_req_t *req) +{ + struct socket *tmp; + + tmp = req->ftp_data_sock; + req->ftp_data_sock = req->sock; + req->sock = tmp; + + req->in_file.f_pos = 0; +} + +static void ftp_do_ls_start (tux_req_t *req, int cachemiss) +{ + Dprintk("ftp_do_ls_start(%p, %d).\n", req, cachemiss); + __exchange_sockets(req); + queue_cachemiss(req); +} + +static void ftp_do_ls_end (tux_req_t *req, int cachemiss) +{ + Dprintk("ftp_do_ls_end(%p, %d).\n", req, cachemiss); + __exchange_sockets(req); + zap_data_socket(req); + __send_async_message(req, WRITE_DONE, 200, 1); +} + +static void ftp_do_ls_line (tux_req_t *req, int cachemiss) +{ + struct linux_dirent64 *dirp, *dirp0; +#define MAX_NAME 400 + char string0[MAX_NAME+200], *tmp; + int len, curroff, total; + + tmp = NULL; + dirp0 = req->ftp_dirp0; + curroff = req->ftp_curroff; + total = req->ftp_total; + + dirp = (struct linux_dirent64 *)((char *)dirp0 + curroff); + if (!dirp->d_name) + goto next_dir; + Dprintk("<%s T:%d (off:%d) (len:%d)>\n", dirp->d_name, dirp->d_type, (int)dirp->d_off, dirp->d_reclen); + len = strlen(dirp->d_name); + if (len >= MAX_NAME) { + dirp->d_name[MAX_NAME] = 0; + len = MAX_NAME-1; + } + + if (req->ftp_command == FTP_COMM_NLST) { + tmp = string0; + memcpy(tmp, dirp->d_name, len); + tmp += len; + *tmp++ = '\r'; + *tmp++ = '\n'; + *tmp = 0; + } else { + int size, err, flag = cachemiss ? 0 : LOOKUP_ATOMIC; + struct nameidata base; + struct dentry *dentry; + + if (!req->cwd) + TUX_BUG(); + + base.flags = LOOKUP_POSITIVE|flag; + base.last_type = LAST_ROOT; + base.dentry = req->cwd; + dget(base.dentry); + base.mnt = req->cwdmnt; + mntget(base.mnt); + + err = path_walk(dirp->d_name, &base); + + Dprintk("path_walk() returned %d.\n", err); + + if (err) { + if (err == -EWOULDBLOCKIO) { + add_tux_atom(req, ftp_do_ls_line); + queue_cachemiss(req); + return; + } + } else { + struct inode *inode; + + mntput(base.mnt); + dentry = base.dentry; + if (!dentry) + TUX_BUG(); + if (IS_ERR(dentry)) + TUX_BUG(); + inode = dentry->d_inode; + if (!inode) + TUX_BUG(); + tmp = string0; + switch (dirp->d_type) { + default: + case DT_UNKNOWN: + case DT_WHT: + *tmp++ = '?'; break; + case DT_FIFO: + *tmp++ = 'p'; break; + case DT_CHR: + *tmp++ = 'c'; break; + case DT_DIR: + *tmp++ = 'd'; break; + case DT_BLK: + *tmp++ = 'b'; break; + case DT_REG: + *tmp++ = '-'; break; + case DT_LNK: + *tmp++ = 'l'; break; + case DT_SOCK: + *tmp++ = 's'; break; + } + + if (inode->i_mode & S_IRUSR) *tmp++ = 'r'; else *tmp++ = '-'; + if (inode->i_mode & S_IWUSR) *tmp++ = 'w'; else *tmp++ = '-'; + if (inode->i_mode & S_IXUSR) *tmp++ = 'x'; else *tmp++ = '-'; + if (inode->i_mode & S_IRGRP) *tmp++ = 'r'; else *tmp++ = '-'; + if (inode->i_mode & S_IWGRP) *tmp++ = 'w'; else *tmp++ = '-'; + if (inode->i_mode & S_IXGRP) *tmp++ = 'x'; else *tmp++ = '-'; + if (inode->i_mode & S_IROTH) *tmp++ = 'r'; else *tmp++ = '-'; + if (inode->i_mode & S_IWOTH) *tmp++ = 'w'; else *tmp++ = '-'; + if (inode->i_mode & S_IXOTH) *tmp++ = 'x'; else *tmp++ = '-'; + + *tmp++ = ' '; + + + size = sprintf(tmp, "%4d %d", inode->i_nlink, inode->i_uid); + tmp += size; + + size = 14 - size; + if (size <= 0) + size = 1; + memset(tmp, ' ', size); + tmp += size; + + size = sprintf(tmp, "%d", inode->i_gid); + tmp += size; + + size = 9 - size; + if (size <= 0) + size = 1; + memset(tmp, ' ', size); + tmp += size; + + tmp += sprintf(tmp, "%8Ld", inode->i_size); + *tmp++ = ' '; + + tmp += time_unix2ls(inode->i_mtime, tmp); + *tmp++ = ' '; + + memcpy(tmp, dirp->d_name, len); + tmp += len; + *tmp++ = '\r'; + *tmp++ = '\n'; + *tmp++ = 0; + dput(dentry); + } + } +next_dir: + curroff += dirp->d_reclen; + + if (tmp) + Dprintk("writing line (len: %d): <%s>\n", strlen(string0), string0); + + if (curroff < total) { + req->ftp_dirp0 = dirp0; + req->ftp_curroff = curroff; + add_tux_atom(req, ftp_do_ls_line); + } else { + kfree(dirp0); + req->ftp_dirp0 = NULL; + req->ftp_curroff = 0; + // falls back to ftp_do_ls + } + if (tmp) + __send_async_message(req, string0, 200, 0); + else + add_req_to_workqueue(req); +} + +#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de))) +#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1)) +#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1)) + +static int filldir64(void * __buf, const char * name, int namlen, loff_t offset, + ino_t ino, unsigned int d_type) +{ + struct linux_dirent64 * dirent, d; + struct getdents_callback64 * buf = (struct getdents_callback64 *) __buf; + int reclen = ROUND_UP64(NAME_OFFSET(dirent) + namlen + 1); + + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; + dirent = buf->previous; + if (dirent) { + d.d_off = offset; + copy_to_user(&dirent->d_off, &d.d_off, sizeof(d.d_off)); + } + dirent = buf->current_dir; + buf->previous = dirent; + memset(&d, 0, NAME_OFFSET(&d)); + d.d_ino = ino; + d.d_reclen = reclen; + d.d_type = d_type; + copy_to_user(dirent, &d, NAME_OFFSET(&d)); + copy_to_user(dirent->d_name, name, namlen); + put_user(0, dirent->d_name + namlen); + ((char *) dirent) += reclen; + buf->current_dir = dirent; + buf->count -= reclen; + return 0; +} +#define DIRENT_SIZE 3000 + +static void ftp_do_ls (tux_req_t *req, int cachemiss) +{ + struct getdents_callback64 buf; + struct linux_dirent64 *dirp0; + mm_segment_t oldmm; + int total; + + Dprintk("ftp_do_ls(%p, %d), dentry: %p.\n", req, cachemiss, req->dentry); + + if (!req->cwd) + TUX_BUG(); + + if (!cachemiss) { + add_tux_atom(req, ftp_do_ls); + queue_cachemiss(req); + return; + } + + dirp0 = kmalloc(DIRENT_SIZE, GFP_KERNEL); + if (!dirp0) + TUX_BUG(); + + buf.current_dir = dirp0; + buf.previous = NULL; + buf.count = DIRENT_SIZE; + buf.error = 0; + + oldmm = get_fs(); set_fs(KERNEL_DS); + set_fs(KERNEL_DS); + total = vfs_readdir(&req->in_file, filldir64, &buf); + set_fs(oldmm); + + if (buf.previous) + total = DIRENT_SIZE - buf.count; + + Dprintk("total: %d (buf.error: %d, buf.previous %p)\n", + total, buf.error, buf.previous); + + if (total < 0) { + req_err(req); + __send_async_message(req, LIST_ERR, 200, 0); + return; + } + if (!total) { + queue_cachemiss(req); + return; + } + + add_tux_atom(req, ftp_do_ls); + + req->ftp_dirp0 = dirp0; + req->ftp_curroff = 0; + req->ftp_total = total; + add_tux_atom(req, ftp_do_ls_line); + + add_req_to_workqueue(req); +} + +static void ftp_get_listfile (tux_req_t *req, int cachemiss) +{ + int missed; + + if (!req->dentry) { + if (virtual_server) + ftp_create_host(req); + missed = lookup_url(req, cachemiss ? 0 : LOOKUP_ATOMIC); + Dprintk("get_listfile(): missed: %d, dentry: %p, error: %d.\n", + missed, req->dentry, req->error); + if ((!missed && !req->dentry) || (missed == 2)) { + req->error = 0; + __send_async_message(req, BAD_FILENAME, 200, 1); + return; + } + if (missed) { + if (cachemiss) + TUX_BUG(); + add_tux_atom(req, ftp_get_listfile); + queue_cachemiss(req); + return; + } + } + req->ftp_offset = 0; + req->in_file.f_pos = 0; + add_tux_atom(req, ftp_send_file); + __send_async_message(req, WRITE_LIST, 200, 1); +} + +static void ftp_chdir (tux_req_t *req, int cachemiss) +{ + unsigned int lookupflag = cachemiss ? 0 : LOOKUP_ATOMIC; + struct nameidata nd; + int err; + + Dprintk("ftp_chdir(%p, %d, {%s})\n", req, cachemiss, req->objectname); + lookupflag |= LOOKUP_POSITIVE|LOOKUP_FOLLOW|LOOKUP_DIRECTORY; + nd.flags = lookupflag; + nd.last_type = LAST_ROOT; + if ((req->objectname[0] == '/') && req->cwd) { + dput(req->cwd); + mntput(req->cwdmnt); + req->cwd = NULL; + req->cwdmnt = NULL; + } + if (!req->cwd) { + req->cwd = dget(docroot.dentry); + req->cwdmnt = mntget(docroot.mnt); + } + nd.dentry = req->cwd; + nd.mnt = req->cwdmnt; + dget(nd.dentry); + mntget(nd.mnt); + + err = path_walk(req->objectname, &nd); + if (err == -EWOULDBLOCKIO) { + if (cachemiss) + TUX_BUG(); + add_tux_atom(req, ftp_chdir); + queue_cachemiss(req); + return; + } + if (err) { + mntput(nd.mnt); + goto out_err; + } + err = permission(nd.dentry->d_inode, MAY_EXEC); + if (err) + goto out_err_put; + + req->cwd = nd.dentry; + req->cwdmnt = nd.mnt; + + __send_async_message(req, GOOD_DIR, 200, 1); + return; + +out_err_put: + path_release(&nd); +out_err: + __send_async_message(req, BAD_FILENAME, 550, 1); +} + +void ftp_accept_pasv (tux_req_t *req, int cachemiss) +{ + struct socket *sock, *new_sock = NULL; + struct tcp_opt *tp1, *tp2; + int err; + + tp1 = &req->ftp_data_sock->sk->tp_pinfo.af_tcp; + + Dprintk("PASV accept on req %p, accept_queue: %p.\n", + req, tp1->accept_queue); + if (req->error || (req->ftp_data_sock->sk->state != TCP_LISTEN)) + goto error; +new_socket: + if (!tp1->accept_queue) { + spin_lock_irq(&req->ti->work_lock); + add_keepalive_timer(req); + if (test_and_set_bit(0, &req->idle_input)) + TUX_BUG(); + spin_unlock_irq(&req->ti->work_lock); + if (!tp1->accept_queue) { + add_tux_atom(req, ftp_accept_pasv); + return; + } + unidle_req(req); + } + new_sock = sock_alloc(); + if (!new_sock) + goto error; + sock = req->ftp_data_sock; + new_sock->type = sock->type; + new_sock->ops = sock->ops; + + err = sock->ops->accept(sock, new_sock, O_NONBLOCK); + Dprintk("PASV accept() returned %d.\n", err); + if (err < 0) + goto error; + if (new_sock->sk->state != TCP_ESTABLISHED) + goto error; + /* + * Do not allow other clients to steal the FTP connection! + */ + if (new_sock->sk->daddr != req->sock->sk->daddr) { + Dprintk("PASV: ugh, unauthorized connect?\n"); + unlink_tux_listen_socket(req); + sock_release(new_sock); + new_sock = NULL; + goto new_socket; + } + /* + * Zap the listen socket: + */ + unlink_tux_listen_socket(req); + sock_release(sock); + req->ftp_data_sock = NULL; + + tp2 = &new_sock->sk->tp_pinfo.af_tcp; + tp2->nonagle = tux_nonagle; + tp2->ack.pingpong = tux_ack_pingpong; + new_sock->sk->reuse = 1; + new_sock->sk->urginline = 1; + + link_tux_ftp_accept_socket(req, new_sock); + + add_req_to_workqueue(req); + return; + +error: + if (new_sock) + sock_release(new_sock); + req_err(req); + zap_data_socket(req); + __send_async_message(req, CLOSE, 500, 1); +} + +static void ftp_execute_command (tux_req_t *req, int cachemiss) +{ + if (!req->parsed_len) + TUX_BUG(); + trunc_headers(req); + req->keep_alive = 1; + + switch (req->ftp_command) { + +#define ABORTED \ + "226 Abort successful.\r\n" + + case FTP_COMM_ABOR: + { + zap_data_socket(req); + __send_async_message(req, ABORTED, 226, 1); + break; + } + + case FTP_COMM_PWD: + { + struct dentry *cwd, *root; + struct vfsmount *cwdmnt, *rootmnt; + char *buf, *path; + + if (!req->cwd) { + req->cwd = dget(docroot.dentry); + req->cwdmnt = mntget(docroot.mnt); + } + buf = (char *)__get_free_page(GFP_KERNEL); + +// "257 "/" is current directory.\r\n" + +#define PART_1 "257 \"" +#define PART_1_LEN (sizeof(PART_1)-1) + +#define PART_3 "\" is current directory.\r\n" +#define PART_3_LEN sizeof(PART_3) + + cwd = dget(req->cwd); + cwdmnt = mntget(req->cwdmnt); + root = dget(docroot.dentry); + rootmnt = mntget(docroot.mnt); + + spin_lock(&dcache_lock); + path = __d_path(cwd, cwdmnt, root, rootmnt, + buf+PART_1_LEN, PAGE_SIZE - PART_3_LEN - PART_1_LEN); + spin_unlock(&dcache_lock); + + dput(cwd); + mntput(cwdmnt); + dput(root); + mntput(rootmnt); + + if (path < buf + PART_1_LEN) + BUG(); + + memcpy(path - PART_1_LEN, PART_1, PART_1_LEN); + memcpy(buf + PAGE_SIZE-PART_3_LEN-1, PART_3, PART_3_LEN); + + __send_async_message(req, path - PART_1_LEN, 226, 1); + free_page((unsigned long)buf); + break; + } + + case FTP_COMM_CDUP: + { + memcpy(req->objectname, "..", 3); + req->objectname_len = 2; + + // fall through to CWD: + } + case FTP_COMM_CWD: + { + ftp_chdir(req, cachemiss); + break; + } + + case FTP_COMM_NLST: + case FTP_COMM_LIST: + { + if (!req->ftp_data_sock) { + req_err(req); + __send_async_message(req, LIST_ERR, 200, 1); + GOTO_ERR; + } + if (req->dentry) + TUX_BUG(); + if (!req->cwd) { + req->cwd = dget(docroot.dentry); + req->cwdmnt = mntget(docroot.mnt); + } + dget(req->cwd); + install_req_dentry(req, req->cwd); + if (!req->dentry) + TUX_BUG(); + + add_tux_atom(req, ftp_do_ls_end); + add_tux_atom(req, ftp_do_ls); + add_tux_atom(req, ftp_do_ls_start); + + __send_async_message(req, WRITE_LIST, 200, 1); + break; + } + + case FTP_COMM_RETR: + { + if (!req->ftp_data_sock) { + req_err(req); + __send_async_message(req, RETR_ERR, 200, 1); + GOTO_ERR; + } + ftp_get_file(req, cachemiss); + break; + } + + case FTP_COMM_PASV: + { + char buf [36 + 4*3 + 5 + 10]; + struct socket *data_sock; + struct sockaddr_in addr; + struct tcp_opt *tp; + u32 local_addr; + int err; + + if (req->ftp_data_sock) + zap_data_socket(req); + /* + * Create FTP data connection to client: + */ + err = sock_create(AF_INET, SOCK_STREAM, IPPROTO_IP, &data_sock); + if (err < 0) { + Dprintk("sock create err: %d\n", err); + req_err(req); + __send_async_message(req, CLOSE, 500, 1); + GOTO_ERR; + } + +#define IP(n) ((unsigned char *)&req->sock->sk->daddr)[n] + + local_addr = req->sock->sk->rcv_saddr; + addr.sin_family = AF_INET; + addr.sin_port = 0; + addr.sin_addr.s_addr = local_addr; + Dprintk("client address: (%d,%d,%d,%d).\n", + IP(0), IP(1), IP(2), IP(3)); +#undef IP + + data_sock->sk->reuse = 1; + + err = data_sock->ops->bind(data_sock, + (struct sockaddr*)&addr, sizeof(addr)); + Dprintk("PASV bind() ret: %d.\n", err); + if (err < 0) { + req_err(req); + sock_release(data_sock); + __send_async_message(req, CLOSE, 500, 1); + GOTO_ERR; + } + + data_sock->sk->linger = 0; + data_sock->sk->urginline = 1; + + tp = &data_sock->sk->tp_pinfo.af_tcp; + tp->ack.pingpong = tux_ack_pingpong; + if (!tux_keepalive_timeout) + tp->linger2 = 0; + else + tp->linger2 = tux_keepalive_timeout * HZ; + + err = data_sock->ops->listen(data_sock, 1); + Dprintk("PASV listen() ret: %d\n", err); + if (err) { + req_err(req); + sock_release(data_sock); + __send_async_message(req, CLOSE, 500, 1); + GOTO_ERR; + } + link_tux_ftp_data_socket(req, data_sock); + + Dprintk("FTP PASV listen sock state: %d, sk state: %d\n", + data_sock->state, data_sock->sk->state); + +#define IP(n) ((unsigned char *)&local_addr)[n] + sprintf(buf, + "227 Entering Passive Mode (%d,%d,%d,%d,%d,%d).\r\n", + IP(0), IP(1), IP(2), IP(3), + ntohs(data_sock->sk->sport) / 256, + ntohs(data_sock->sk->sport) & 255 ); +#undef IP + Dprintk("PASV mess: {%s}\n", buf); + + add_tux_atom(req, ftp_accept_pasv); + __send_async_message(req, buf, 227, 1); + break; + } + + case FTP_COMM_PORT: + { + struct socket *data_sock; + struct sockaddr_in addr; + kernel_cap_t saved_cap; + u32 local_addr; + int err; + + /* + * Create FTP data connection to client: + */ + err = sock_create(AF_INET, SOCK_STREAM, IPPROTO_IP, &data_sock); + if (err < 0) { + Dprintk("sock create err: %d\n", err); + req_err(req); + __send_async_message(req, CLOSE, 500, 1); + GOTO_ERR; + } + + local_addr = req->sock->sk->rcv_saddr; + addr.sin_family = AF_INET; + addr.sin_port = htons(20); + addr.sin_addr.s_addr = local_addr; + +#define IP(n) ((unsigned char *)&local_addr)[n] + Dprintk("data socket address: (%d,%d,%d,%d).\n", + IP(0), IP(1), IP(2), IP(3)); +#undef IP + data_sock->sk->reuse = 1; + + saved_cap = current->cap_effective; + cap_raise (current->cap_effective, CAP_NET_BIND_SERVICE); + err = data_sock->ops->bind(data_sock, + (struct sockaddr*)&addr, sizeof(addr)); + current->cap_effective = saved_cap; + + Dprintk("ACTIVE bind() ret: %d.\n", err); + if (err) { + sock_release(data_sock); + req_err(req); + __send_async_message(req, CLOSE, 500, 1); + GOTO_ERR; + } + + link_tux_ftp_data_socket(req, data_sock); + + addr.sin_family = AF_INET; + addr.sin_port = htons(req->ftp_user_port); + addr.sin_addr.s_addr = htonl(req->ftp_user_addr); + + err = data_sock->ops->connect(data_sock, (struct sockaddr *) &addr, sizeof(addr), O_RDWR|O_NONBLOCK); + if (err && (err != -EINPROGRESS)) { + Dprintk("connect error: %d\n", err); + zap_data_socket(req); + req_err(req); + __send_async_message(req, CLOSE, 500, 1); + GOTO_ERR; + } + Dprintk("FTP data sock state: %d, sk state: %d\n", data_sock->state, data_sock->sk->state); + __send_async_message(req, PORT_OK, 200, 1); + break; + } + + case FTP_COMM_USER: + { + if (!strcmp(req->username, "ftp") + || !strcmp(req->username, "FTP") + || !strcmp(req->username, "anonymous") + || !strcmp(req->username, "ANONYMOUS") + || !strcmp(req->username, "guest") + || !strcmp(req->username, "GUEST")) { + char login_ok [200]; + + if (nr_requests_used() == 1) + sprintf(login_ok, LOGIN_OK_ONE, + tux_max_connect); + else + sprintf(login_ok, LOGIN_OK, + nr_requests_used(), tux_max_connect); + __send_async_message(req, login_ok, 200, 1); + } else { + clear_keepalive(req); + __send_async_message(req, LOGIN_FORBIDDEN, 530, 1); + } + break; + } + case FTP_COMM_SITE: + { + __send_async_message(req, SITE, 214, 1); + break; + } + case FTP_COMM_SYST: + { + __send_async_message(req, LINUX_SYST, 200, 1); + break; + } + case FTP_COMM_TYPE: + { + __send_async_message(req, TYPE_OK, 200, 1); + break; + } + case FTP_COMM_FEAT: + { + __send_async_message(req, NO_EXTRA_FEATURES, 211, 1); + break; + } + case FTP_COMM_HELP: + case FTP_COMM_CLNT: + case FTP_COMM_NOOP: + { + __send_async_message(req, COMMAND_OK, 200, 1); + break; + } + case FTP_COMM_REST: + { + __send_async_message(req, COMMAND_OK, 200, 1); + break; + } + case FTP_COMM_QUIT: + { + clear_keepalive(req); + __send_async_message(req, BYE, 200, 1); + break; + } + + default: + { + req->keep_alive = 1; + __send_async_message(req, CLOSE, 500, 1); + break; + } + } + return; +error: + Dprintk("rejecting FTP session!\n"); + return; +} + + +static void ftp_close (tux_req_t *req, int cachemiss) +{ + if (req->error == 2) + __send_async_message(req, CLOSE_TIMEOUT, 421, 1); + else + __send_async_message(req, CLOSE, 500, 1); +} + +tux_proto_t tux_proto_ftp = { + defer_accept: 0, + got_request: ftp_got_request, + parse_message: parse_ftp_message, + illegal_request: ftp_close, +}; + diff -rNu linux-2.4.9-ac10/net/tux/proto_http.c linux/net/tux/proto_http.c --- linux-2.4.9-ac10/net/tux/proto_http.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/proto_http.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,1341 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * proto_http.c: HTTP application protocol support + * + * Right now we detect simple GET headers, anything more + * subtle gets redirected to secondary server port. + */ + +#include +#include "parser.h" + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +/* + * Parse the HTTP message and put results into the request structure. + * CISAPI extensions do not see the actual message buffer. + * + * Any perceived irregularity is honored with a redirect to the + * secondary server - which in most cases should be Apache. So + * if TUX gets confused by some strange request we fall back + * to Apache to be RFC-correct. + * + * The parser is 'optimistic', ie. it's optimized for the case where + * the whole message is available and correct. The parser is also + * supposed to be 'robust', ie. it can be called multiple times with + * an incomplete message, as new packets arrive. + */ + +#ifdef CONFIG_ARCH_S390 +# error Add EBCDIC support to the parser first! +#endif + +static inline int TOHEX (unsigned char c) +{ + switch (c) { + case '0' ... '9': c -= '0'; break; + case 'a' ... 'f': c -= 'a'-10; break; + case 'A' ... 'F': c -= 'A'-10; break; + default: + c = -1; + } + return c; +} + +/* + * This function determines whether the client supports + * gzip-type content-encoding. + */ +static int may_gzip (const char *str, int len) +{ + const char *tmp, *curr; + int i; + + if (len <= 4) + return 0; + tmp = str; + for (i = 0; i <= len-6; i++) { + Dprintk("gzip-checking: {%s}\n", tmp); + if (memcmp(tmp, " gzip", 5)) { + tmp++; + continue; + } + curr = tmp + 5; + + if (*curr == ',' || *curr == '\r') + return 1; + if (memcmp(curr, ";q=", 3)) + return 0; + curr += 3; + /* + * Every qvalue except explicitly zero is accepted. + * Zero values are "q=0.0", "q=0.00", "q=0.000". + * Parsing is optimized. + */ + if (*curr == '0') { + curr += 2; + if (*curr == '0') { + curr++; + if (*curr == ' ' || *curr == '\r') + return 0; + if (*curr == '0') { + curr++; + if (*curr == ' ' || *curr == '\r') + return 0; + if (*curr == '0') { + curr++; + if (*curr == ' ' || + *curr == '\r') + return 0; + } + } + } + } + return 1; + } + return 0; +} + +static void http_process_message (tux_req_t *req, int cachemiss); + +int parse_http_message (tux_req_t *req, const int total_len) +{ + int hexhex = 0, hex_val_0 = 0, hex_val_1 = 0; + const char *curr, *uri, *message; + int objectname_len, left; + int have_r = 0; + char c; + + left = total_len; + message = req->headers; + Dprintk("parsing request:\n---\n%s\n---\n", message); +/* + * RFC 2616, 5.1: + * + * Request-Line = Method SP Request-URI SP HTTP-Version CRLF + */ + + if (!total_len) + TUX_BUG(); + + curr = message; + +#define GOTO_INCOMPLETE do { Dprintk("incomplete at %s:%d.\n", __FILE__, __LINE__); goto incomplete_message; } while (0) +#define GOTO_REDIR do { TDprintk("redirect secondary at %s:%d.\n", __FILE__, __LINE__); goto error; } while (0) + +#define PRINT_MESSAGE_LEFT \ + Dprintk("message left (%d) at %s:%d:\n--->{%s}<---\n", left, __FILE__, __LINE__, curr) + + switch (*curr) { + case 'G': + if (PARSE_METHOD(req,curr,GET,left)) + break; + GOTO_REDIR; + + case 'H': + if (PARSE_METHOD(req,curr,HEAD,left)) + break; + GOTO_REDIR; + + case 'P': + if (PARSE_METHOD(req,curr,POST,left)) + break; + if (PARSE_METHOD(req,curr,PUT,left)) + break; + GOTO_REDIR; + + default: + GOTO_REDIR; + } + + req->method_str = message; + req->method_len = curr-message-1; + + Dprintk("got method %d\n", req->method); + + PRINT_MESSAGE_LEFT; + + /* + * Ok, we got one of the methods we can handle, parse + * the URI: + */ + + { + // Do not allow leading "../" and intermediate "/../" + int dotdot = 1; + char *tmp = req->objectname; + int slashcheck = 1; + + req->uri_str = uri = curr; + + for (;;) { + c = get_c(curr,left); + if (slashcheck) { + if (c == '/') + continue; + slashcheck = 0; + } + + PRINT_MESSAGE_LEFT; + if (c == ' ' || c == '?' || c == '\r' || c == '\n') + break; + if (c == '#') + GOTO_REDIR; + + /* + * First handle HEX HEX encoding + */ + switch (hexhex) { + case 0: + if (c == '%') { + hexhex = 1; + goto continue_parsing; + } + break; + case 1: + hex_val_0 = TOHEX(c); + if (hex_val_0 < 0) + GOTO_REDIR; + hexhex = 2; + goto continue_parsing; + case 2: + hex_val_1 = TOHEX(c); + if (hex_val_1 < 0) + GOTO_REDIR; + c = (hex_val_0 << 4) | hex_val_1; + if (!c) + GOTO_REDIR; + hexhex = 0; + default: + TUX_BUG(); + } + if (hexhex) + TUX_BUG(); + + switch (dotdot) { + case 0: + break; + case 1: + if (c == '.') + dotdot = 2; + else + dotdot = 0; + break; + case 2: + if (c == '.') + dotdot = 3; + else + dotdot = 0; + break; + case 3: + if (c == '/') + GOTO_REDIR; + else + dotdot = 0; + break; + default: + TUX_BUG(); + } + if (!dotdot && (c == '/')) + dotdot = 1; + + *(tmp++) = c; +continue_parsing: + if (curr - uri >= MAX_OBJECTNAME_LEN) + GOTO_REDIR; + } + PRINT_MESSAGE_LEFT; + *tmp = 0; + + // handle trailing "/.." + if (dotdot == 3) + GOTO_REDIR; + + objectname_len = tmp - req->objectname; + req->objectname_len = objectname_len; + } + Dprintk("got filename %s (%d)\n", req->objectname, req->objectname_len); + + PRINT_MESSAGE_LEFT; + + /* + * Parse optional query string. Copy until end-of-string or space. + */ + if (c == '?') { + int query_len; + const char *query; + + req->query_str = query = curr; + + for (;;) { + c = get_c(curr,left); + if (c == ' ') + break; + if (c == '#') + GOTO_REDIR; + } + query_len = curr-query-1; + req->query_len = query_len; + } + if (req->query_len) + Dprintk("got query string %s (%d)\n", req->query_str, req->query_len); + req->uri_len = curr-uri-1; + if (!req->uri_len) + GOTO_REDIR; + Dprintk("got URI %s (%d)\n", req->uri_str, req->uri_len); + + PRINT_MESSAGE_LEFT; + /* + * Parse the HTTP version field: + */ + req->version_str = curr; + if (!PARSE_TOKEN(curr,"HTTP/1.",left)) + GOTO_REDIR; + + switch (get_c(curr,left)) { + case '0': + req->version = HTTP_1_0; + break; + case '1': + req->version = HTTP_1_1; + break; + default: + GOTO_REDIR; + } + /* + * If this connection had other requests already (ie. it came from + * a keepalive-aware client) then default to keep-alive, except if + * the client specifically requests a close or max_keepalives is + * set to 0. + * + * Otherwise we default to keepalive in the HTTP/1.1 case and default + * to non-keepalive in the HTTP/1.0 case. + */ + clear_keepalive(req); + if (tux_max_keepalives && (req->nr_keepalives || + (req->version == HTTP_1_1))) + req->keep_alive = 1; + req->version_len = curr - req->version_str; + + if (get_c(curr,left) != '\r') + GOTO_REDIR; + if (get_c(curr,left) != '\n') + GOTO_REDIR; + + Dprintk("got version %d [%d]\n", req->version, req->version_len); + PRINT_MESSAGE_LEFT; + + /* + * Now parse (optional) request header fields: + */ + for (;;) { + char c; + + c = get_c(curr,left); + switch (c) { + case '\r': + if (have_r) + GOTO_REDIR; + have_r = 1; + continue; + case '\n': + if (!have_r) + GOTO_REDIR; + goto out; + default: + if (have_r) + GOTO_REDIR; + } + +#define PARSE_STR_FIELD(char,field,str,len) \ + if (PARSE_TOKEN(curr,field,left)) { \ + req->str = curr; \ + SKIP_LINE(curr,left); \ + req->len = curr - req->str - 2; \ + Dprintk(char field "field: %s.\n", req->str); \ + break; \ + } + +#define ALLOW_UNKNOWN_FIELDS 1 +#ifdef ALLOW_UNKNOWN_FIELDS +# define UNKNOWN_FIELD { SKIP_LINE(curr,left); break; } +#else +# define UNKNOWN_FIELD GOTO_REDIR +#endif + + switch (c) { + case 'A': + PARSE_STR_FIELD("A","ccept: ", + accept_str,accept_len); + if (PARSE_TOKEN(curr,"ccept-Encoding: ",left)) { + const char *str = curr-1; + + req->accept_encoding_str = curr; + SKIP_LINE(curr,left); + req->accept_encoding_len = curr - req->accept_encoding_str - 2; + Dprintk("Accept-Encoding field: {%s}.\n", str); + + if (tux_compression && may_gzip(str,curr-str)) { + Dprintk("client accepts gzip!.\n"); + req->may_send_gzip = 1; + } + break; + } + PARSE_STR_FIELD("A","ccept-Charset: ", + accept_charset_str,accept_charset_len); + PARSE_STR_FIELD("A","ccept-Language: ", + accept_language_str,accept_language_len); + UNKNOWN_FIELD; + + case 'C': + if (PARSE_TOKEN(curr,"onnection: ",left)) { + switch (get_c(curr,left)) { + case 'K': + if (!PARSE_TOKEN(curr,"eep-Alive",left)) + GOTO_REDIR; + req->keep_alive = 1; + break; + + case 'c': + if (!PARSE_TOKEN(curr,"lose",left)) + GOTO_REDIR; + clear_keepalive(req); + break; + + case 'k': + if (!PARSE_TOKEN(curr,"eep-alive",left)) + GOTO_REDIR; + req->keep_alive = 1; + break; + + default: + GOTO_REDIR; + } + // allow other tokens. + SKIP_LINE(curr,left); + break; + } + + PARSE_STR_FIELD("C","ookie: ", + cookies_str,cookies_len); + PARSE_STR_FIELD("C","ontent-Type: ", + content_type_str,content_type_len); + + if (PARSE_TOKEN(curr,"ontent-Length: ",left) || + PARSE_TOKEN(curr,"ontent-length: ",left)) { + const char *tmp; + req->contentlen_str = curr; + SKIP_LINE(curr,left); + req->contentlen_len = curr - req->contentlen_str - 2; + if (req->contentlen_len) { + tmp = req->contentlen_str; + req->content_len = simple_strtoul(tmp, NULL, 10); + } + Dprintk("Content-Length field: %s [%d].\n", req->contentlen_str, req->contentlen_len); + Dprintk("Content-Length value: %d.\n", req->content_len); + break; + } + PARSE_STR_FIELD("C","ache-Control: ", + cache_control_str,cache_control_len); + UNKNOWN_FIELD; + + case 'H': + if (PARSE_TOKEN(curr,"ost: ",left)) { + const char *tmp = curr; + char *tmp2 = req->host; + + COPY_LINE_TOLOWER(curr, tmp2, left); + req->host_len = curr - tmp - 2; + req->host[req->host_len] = 0; + Dprintk("Host field: %s [%d].\n", req->host, req->host_len); + break; + } + UNKNOWN_FIELD; + + case 'I': + PARSE_STR_FIELD("I","f-None-Match: ", + if_none_match_str,if_none_match_len); + PARSE_STR_FIELD("I","f-Modified-Since: ", + if_modified_since_str,if_modified_since_len); + UNKNOWN_FIELD; + + case 'N': + PARSE_STR_FIELD("N","egotiate: ", + negotiate_str,negotiate_len); + UNKNOWN_FIELD; + + case 'P': + PARSE_STR_FIELD("P","ragma: ", + pragma_str,pragma_len); + UNKNOWN_FIELD; + + case 'R': + PARSE_STR_FIELD("R","eferer: ", + referer_str,referer_len); + UNKNOWN_FIELD; + + case 'U': + PARSE_STR_FIELD("U","ser-Agent: ", + user_agent_str,user_agent_len); + UNKNOWN_FIELD; + + default: + UNKNOWN_FIELD; + } + PRINT_MESSAGE_LEFT; + } +out: + /* + * POST data. + */ + if ((req->method == METHOD_POST) && req->content_len) { + PRINT_MESSAGE_LEFT; + if (curr + req->content_len > message + total_len) + GOTO_INCOMPLETE; + req->post_data_str = curr; + req->post_data_len = req->content_len; + curr += req->content_len; + left -= req->content_len; + Dprintk("POST-ed data: {%s}\n", req->post_data_str); + } + + switch (req->method) { + default: + GOTO_REDIR; + case METHOD_GET: + case METHOD_HEAD: + case METHOD_POST: + case METHOD_PUT: + } + +#define TUX_SCHEME "http://" +#define TUX_SCHEME_LEN (sizeof(TUX_SCHEME)-1) + + if (!memcmp(req->objectname, TUX_SCHEME, TUX_SCHEME_LEN)) { + + /* http://user:password@host:port/object */ + + const char *head, *tail, *end, *host, *port; + int host_len, objectname_len; + + head = req->objectname + TUX_SCHEME_LEN; + end = req->objectname + req->objectname_len; + + tail = memchr(head, '/', end - head); + if (!tail) + GOTO_REDIR; + host = memchr(head, '@', tail - head); + if (!host) + host = head; + else + host++; + if (!*host) + GOTO_REDIR; + port = memchr(host, ':', tail - host); + if (port) + host_len = port - host; + else + host_len = tail - host; + if (host_len >= MAX_HOST_LEN) + GOTO_REDIR; + memcpy(req->host, host, host_len); + req->host_len = host_len; + req->host[host_len] = 0; + + if (*tail != '/') + TUX_BUG(); + + req->uri_str = tail; + req->uri_len = end - tail; + + tail++; + while (*tail == '/') + tail++; + + objectname_len = end - tail; + memcpy(req->objectname, tail, objectname_len); + req->objectname_len = objectname_len; + req->objectname[objectname_len] = 0; + } else + if (req->uri_str[0] != '/') + GOTO_REDIR; + + if ((req->version == HTTP_1_1) && !req->host_len) + GOTO_REDIR; + if (req->objectname[0] == '/') + GOTO_REDIR; + /* + * Lets make sure nobody plays games with the host + * header in a virtual hosting environment: + */ + if (virtual_server && req->host_len) { + if (memchr(req->host, '/', req->host_len)) + GOTO_REDIR; + if (req->host[0] == '.') { + if (req->host_len == 1) + GOTO_REDIR; + if ((req->host_len == 2) && (req->host[0] == '.')) + GOTO_REDIR; + } + } + /* + * From this point on the request is for the main TUX engine: + */ + Dprintk("ok, request accepted.\n"); + + if (req->keep_alive) { + req->nr_keepalives++; + if (req->nr_keepalives == KEEPALIVE_HIST_SIZE) + req->nr_keepalives--; + kstat.nr_keepalive_reqs++; + } else + kstat.nr_nonkeepalive_reqs++; + kstat.keepalive_hist[req->nr_keepalives]++; + + PRINT_MESSAGE_LEFT; + req->parsed_len = curr-message; + if (req->dentry) + TUX_BUG(); + add_tux_atom(req, http_process_message); + + return req->parsed_len; + +incomplete_message: + Dprintk("incomplete message!\n"); + PRINT_MESSAGE_LEFT; + + return 0; + +error: + if (total_len > 0) + req->parsed_len = total_len; + else + req->parsed_len = 0; + TDprintk("redirecting message to secondary server!\n"); + PRINT_MESSAGE_LEFT; + return -1; +} + +int handle_gzip_req (tux_req_t *req, unsigned int flags) +{ + char *curr = req->objectname + req->objectname_len; + struct dentry *dentry; + struct inode *inode, *orig_inode; + ssize_t size, orig_size; + + *curr++ = '.'; + *curr++ = 'g'; + *curr++ = 'z'; + *curr++ = 0; + req->objectname_len += 3; + + dentry = tux_lookup(req, req->objectname, flags); + + req->objectname_len -= 3; + req->objectname[req->objectname_len] = 0; + + if (!dentry) + return 0; + if (IS_ERR(dentry)) { + if (PTR_ERR(dentry) == -EWOULDBLOCKIO) + return 1; + return 0; + } + + inode = dentry->d_inode; + size = inode->i_size; + orig_inode = req->dentry->d_inode; + orig_size = orig_inode->i_size; + + if (!url_permission(inode) + && (size < orig_size) + && (inode->i_mtime >= orig_inode->i_mtime)) { + + release_req_dentry(req); + install_req_dentry(req, dentry); + req->filelen = size; + Dprintk("content WILL be gzipped!\n"); + req->content_gzipped = 1; + } else + dput(dentry); + + return 0; +} + +static spinlock_t mimetypes_lock = SPIN_LOCK_UNLOCKED; + +static LIST_HEAD(mimetypes_head); + +static mimetype_t default_mimetype = { type: "text/html", type_len: 9 }; + +void add_mimetype (char *new_ext, char *new_type) +{ + int type_len = strlen(new_type); + int ext_len = strlen(new_ext); + mimetype_t *mime; + char *ext, *type; + + mime = kmalloc(sizeof(*mime), GFP_KERNEL); + memset(mime, 0, sizeof(*mime)); + ext = kmalloc(ext_len + 1, GFP_KERNEL); + type = kmalloc(type_len + 1, GFP_KERNEL); + strcpy(ext, new_ext); + strcpy(type, new_type); + + mime->ext = ext; + mime->ext_len = ext_len; + + mime->type = type; + mime->type_len = type_len; + + mime->special = NORMAL_MIME_TYPE; + if (!strcmp(type, "TUX/redirect")) + mime->special = MIME_TYPE_REDIRECT; + if (!strcmp(type, "TUX/CGI")) + mime->special = MIME_TYPE_CGI; + if (!strcmp(type, "TUX/module")) + mime->special = MIME_TYPE_MODULE; + + spin_lock(&mimetypes_lock); + list_add(&mime->list, &mimetypes_head); + spin_unlock(&mimetypes_lock); +} + +static inline int ext_matches (char *file, int len, char *ext, int extlen) +{ + int i; + char *tmp = file + len-1; + char *tmp2 = ext + extlen-1; + + if (len < extlen) + return 0; + + for (i = 0; i < extlen; i++) { + if (*tmp != *tmp2) + return 0; + tmp--; + tmp2--; + } + return 1; +} + +/* + * Overhead is not a problem, we cache the MIME type + * in the dentry. + */ +static mimetype_t * lookup_mimetype (tux_req_t *req) +{ + char *objectname = req->objectname; + int len = req->objectname_len; + mimetype_t *mime = NULL; + struct list_head *head, *tmp, *tmp1, *tmp2, *tmp3; + + if (!memchr(objectname, '.', len)) + goto out; + + spin_lock(&mimetypes_lock); + head = &mimetypes_head; + tmp = head->next; + + while (tmp != head) { + mime = list_entry(tmp, mimetype_t, list); + if (ext_matches(objectname, len, mime->ext, mime->ext_len)) { + /* + * Percolate often-used mimetypes down: + */ + if (tmp->prev != &mimetypes_head) { + tmp1 = tmp; + tmp2 = tmp->prev; + tmp3 = tmp->prev->prev; + list_del(tmp1); + list_del(tmp2); + list_add(tmp, tmp3); + list_add(tmp2, tmp); + } + break; + } else + mime = NULL; + tmp = tmp->next; + } + spin_unlock(&mimetypes_lock); + +out: + if (!mime) + mime = &default_mimetype; + return mime; +} + +void free_mimetypes (void) +{ + struct list_head *head, *tmp, *next; + mimetype_t *mime; + + spin_lock(&mimetypes_lock); + head = &mimetypes_head; + tmp = head->next; + + while (tmp != head) { + next = tmp->next; + mime = list_entry(tmp, mimetype_t, list); + list_del(tmp); + + kfree(mime->ext); + mime->ext = NULL; + kfree(mime->type); + mime->type = NULL; + kfree(mime); + + tmp = next; + } + spin_unlock(&mimetypes_lock); +} + +/* + * Various constant HTTP responses: + */ + +static const char forbidden[] = + "HTTP/1.1 403 Forbidden\r\n" + "Content-Length: 24\r\n\r\n" + " Forbidden "; + +static const char not_found[] = + "HTTP/1.1 404 Not Found\r\n" + "Content-Length: 29\r\n\r\n" + " Page Not Found "; + +static const char timed_out[] = + "HTTP/1.1 408 Request Timeout\r\n" + "Content-Length: 34\r\n\r\n" + " Request Timed Out "; + +#define NOTMODIFIED_1 \ + "HTTP/1.1 304 Not Modified\r\n" \ + "Date: " + +#define NOTMODIFIED_1_LEN (sizeof(NOTMODIFIED_1) - 1) + +#define NOTMODIFIED_2 \ + "\r\nETag: \"" + +#define NOTMODIFIED_2_LEN (sizeof(NOTMODIFIED_2) - 1) + +#define NOTMODIFIED_3 \ + "\"\r\n\r\n" + +#define NOTMODIFIED_3_LEN (sizeof(NOTMODIFIED_2) - 1) + +#define REDIRECT_1 \ + "HTTP/1.1 301 Moved Permanently\r\n" \ + "Location: http://" + +#define REDIRECT_1_LEN (sizeof(REDIRECT_1) - 1) + +#define REDIRECT_2 \ + "/\r\nContent-Length: 36\r\n" \ + "Connection: Keep-Alive\r\n" \ + "Content-Type: text/html\r\n\r\n" \ + " 301 Moved Permanently " + +#define REDIRECT_2_LEN (sizeof(REDIRECT_2) - 1) + +void send_async_timed_out (tux_req_t *req) +{ + __send_async_message(req, timed_out, 408, 0); +} + +void send_async_err_forbidden (tux_req_t *req) +{ + __send_async_message(req, forbidden, 403, 0); +} + +void send_async_err_not_found (tux_req_t *req) +{ + __send_async_message(req, not_found, 404, 0); +} + +static void send_ret_notmodified (tux_req_t *req) +{ + char *buf; + int size; + + size = NOTMODIFIED_1_LEN + DATE_LEN - 1 + NOTMODIFIED_2_LEN + req->etaglen + NOTMODIFIED_3_LEN; + buf = get_abuf(req, size); + memcpy(buf, NOTMODIFIED_1, NOTMODIFIED_1_LEN); + buf += NOTMODIFIED_1_LEN; + memcpy(buf, tux_date, DATE_LEN-1); + buf += DATE_LEN-1; + memcpy(buf, NOTMODIFIED_2, NOTMODIFIED_2_LEN); + buf += NOTMODIFIED_2_LEN; + memcpy(buf, &req->etag, req->etaglen); + buf += req->etaglen; + memcpy(buf, NOTMODIFIED_3, NOTMODIFIED_3_LEN); + buf += NOTMODIFIED_3_LEN; + + req->status = 304; + send_abuf(req, size, MSG_DONTWAIT, 0); + add_req_to_workqueue(req); +} + +static void send_ret_redirect (tux_req_t *req, int cachemiss) +{ + char *buf; + int size; + int uts_len = 0; + + size = REDIRECT_1_LEN; + if (req->host_len) + size += req->host_len; + else { + down_read(&uts_sem); + uts_len = strlen(system_utsname.nodename); + size += uts_len; + } + if (req->objectname[0] != '/') + size++; + size += req->objectname_len; + size += REDIRECT_2_LEN; + + if (size > PAGE_SIZE) { + zap_request(req, cachemiss); + return; + } + + buf = get_abuf(req, size); + + memcpy(buf, REDIRECT_1, REDIRECT_1_LEN); + buf += REDIRECT_1_LEN; + + Dprintk("req %p, host: %s, host_len: %d.\n", req, req->host, req->host_len); + if (req->host_len) { + memcpy(buf, req->host, req->host_len); + buf += req->host_len; + } else { + memcpy(buf, system_utsname.nodename, uts_len); + up_read(&uts_sem); + buf += uts_len; + } + if (req->objectname[0] != '/') { + buf[0] = '/'; + buf++; + } + + memcpy(buf, req->objectname, req->objectname_len); + buf += req->objectname_len; + + memcpy(buf, REDIRECT_2, REDIRECT_2_LEN); + buf += REDIRECT_2_LEN; + + req->status = 301; + send_abuf(req, size, MSG_DONTWAIT, 0); + add_req_to_workqueue(req); +} + +static void http_got_request (tux_req_t *req) +{ + add_tux_atom(req, parse_request); + add_req_to_workqueue(req); +} + + +tux_attribute_t * lookup_tux_attribute (tux_req_t *req) +{ + tux_attribute_t *attr; + struct inode *inode; + mimetype_t *mime; + + attr = kmalloc(sizeof(*attr), GFP_KERNEL); + if (!attr) + TUX_BUG(); + memset(attr, 0, sizeof(*attr)); + + mime = lookup_mimetype(req); + + inode = req->dentry->d_inode; + if (!inode->i_uid && !inode->i_gid) { + if (mime->special == MIME_TYPE_MODULE) { + attr->tcapi = lookup_tuxmodule(req->objectname); + if (!attr->tcapi) { + req_err(req); + mime = &default_mimetype; + } + } + } else { + if (mime->special && (mime->special != MIME_TYPE_REDIRECT)) + mime = &default_mimetype; + } + attr->mime = mime; + + return attr; +} + +static void http_pre_header (tux_req_t *req, int push); +static void http_post_header (tux_req_t *req, int cachemiss); +static void http_send_body (tux_req_t *req, int cachemiss); + +static void http_process_message (tux_req_t *req, int cachemiss) +{ + tux_attribute_t *attr; + int missed; + unsigned int lookup_flag = cachemiss ? 0 : LOOKUP_ATOMIC; + + Dprintk("handling req %p, cachemiss: %d.\n", req, cachemiss); + + /* + * URL redirection support - redirect all valid requests + * to the first userspace module. + */ + if (tux_all_userspace) { + tcapi_template_t *tcapi = get_first_usermodule(); + if (tcapi) { + req->usermode = 1; + req->usermodule_idx = tcapi->userspace_id; + goto usermode; + } + } + missed = lookup_url(req, lookup_flag); + if (missed == 2) { + if (req->query_str) { + req->error = 1; + goto error; + } + send_ret_redirect(req, cachemiss); + return; + } + if (req->error) + goto error; + + if (missed) { +cachemiss: + if (cachemiss) + TUX_BUG(); + Dprintk("uncached request.\n"); + INC_STAT(static_lookup_cachemisses); + if (req->dentry) + TUX_BUG(); + add_tux_atom(req, http_process_message); + queue_cachemiss(req); + return; + } + + attr = req->dentry->d_tux_data; + if (!attr) { + attr = lookup_tux_attribute(req); + if (!attr) + TUX_BUG(); + req->dentry->d_tux_data = attr; + } + req->attr = attr; + if (attr->mime) + Dprintk("using MIME type %s:%s, %d.\n", attr->mime->type, attr->mime->ext, attr->mime->special); + if (attr->tcapi) { + req->usermode = 1; + req->usermodule_idx = attr->tcapi->userspace_id; + if (req->module_dentry) + TUX_BUG(); + req->module_dentry = dget(req->dentry); + release_req_dentry(req); + goto usermode; + } + + switch (attr->mime->special) { + case MIME_TYPE_MODULE: + req->usermode = 1; + goto usermode; + + case MIME_TYPE_REDIRECT: + req->error = 1; + goto error; + + case MIME_TYPE_CGI: +#if CONFIG_TUX_EXTCGI + Dprintk("CGI request %p.\n", req); + query_extcgi(req); + return; +#endif + + default: + if (req->query_str) { + req->error = 1; + goto error; + } + } + if (req->usermode) + TUX_BUG(); + + if (req->may_send_gzip) + if (handle_gzip_req(req, lookup_flag)) + goto cachemiss; + if (req->parsed_len) + trunc_headers(req); + + if (req->error) + goto error; + + add_tux_atom(req, http_send_body); + add_tux_atom(req, http_post_header); + + http_pre_header(req, req->method == METHOD_HEAD ? 1 : 0); + + add_req_to_workqueue(req); + return; + +error: + if (req->error) + zap_request(req, cachemiss); + return; + +usermode: + add_req_to_workqueue(req); +} + +static void http_post_header (tux_req_t *req, int cachemiss) +{ +#if CONFIG_TUX_DEBUG + req->bytes_expected = req->filelen; +#endif + req->bytes_sent = 0; // data comes now. + + add_req_to_workqueue(req); +} + +static void http_send_body (tux_req_t *req, int cachemiss) +{ + int ret; + + Dprintk("SEND req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status); + + SET_TIMESTAMP(req->output_timestamp); + + if (req->error) { +#if CONFIG_TUX_DEBUG + req->bytes_expected = 0; +#endif + req->in_file.f_pos = 0; + /* + * We are in the middle of a file transfer, + * zap it immediately: + */ + TDprintk("req->error = 3.\n"); + req->error = 3; + zap_request(req, cachemiss); + return; + } + +repeat: + ret = 0; + if (!req->status) + req->status = 200; + if (req->method != METHOD_HEAD) + ret = generic_send_file(req, 1, !cachemiss, req->sock); + else { +#if CONFIG_TUX_DEBUG + req->bytes_expected = 0; +#endif + } + + switch (ret) { + case -5: + add_tux_atom(req, http_send_body); + output_timeout(req); + break; + case -4: + add_tux_atom(req, http_send_body); + if (add_output_space_event(req, req->sock)) { + del_tux_atom(req); + goto repeat; + } + break; + case -3: + INC_STAT(static_sendfile_cachemisses); + add_tux_atom(req, http_send_body); + queue_cachemiss(req); + break; + default: + req->in_file.f_pos = 0; + add_req_to_workqueue(req); + break; + } +} + +#define DEFAULT_DATE "Wed, 01 Jan 1970 00:00:01 GMT" + +char tux_date [DATE_LEN] = DEFAULT_DATE; + +/* + * HTTP header + */ + +#define HEADER_PART1A \ + "HTTP/1.1 200 OK\r\n" \ + "Content-Type: " + +#define HEADER_PART1B \ + "HTTP/1.1 200 OK" + +#define HEADER_PART1C \ + "HTTP/1.1 404 Page Not Found\r\n" \ + "Content-Type: " + +#define MAX_MIMETYPE_LEN 20 + +#define HEADER_PART2_keepalive "\r\nConnection: Keep-Alive\r\nDate: " + +#define HEADER_PART2_close "\r\nConnection: close\r\nDate: " + +#define HEADER_PART2_none "\r\nDate: " + +// date "%s" + +#define HEADER_PART3A "\r\nContent-Encoding: gzip" +#define HEADER_PART3BX "\r\nContent-Length: " + +/* + * Please acknowledge our hard work by not changing this define, or + * at least please acknowledge us by leaving "TUX/2.0 (Linux)" in + * the ID string. Thanks! :-) + */ +#define HEADER_PART3BY "\r\nServer: TUX/2.0 (Linux)\r\nContent-Length: " +#define HEADER_PART3C "\r\nETag: \"" +#define HEADER_PART4 "\r\n\r\n" + +#define MAX_OUT_HEADER_LEN (sizeof(HEADER_PART1A) + MAX_MIMETYPE_LEN + \ + sizeof(HEADER_PART2_keepalive) + DATE_LEN + \ + sizeof(HEADER_PART3A) + sizeof(HEADER_PART3BY) + \ + 12 + sizeof(HEADER_PART3C) + 21 + sizeof(HEADER_PART4)) + +static void http_pre_header (tux_req_t *req, int push) +{ + unsigned long flags; + char *buf, *curr; + mimetype_t *mime; + int size; + + if (MAX_OUT_HEADER_LEN > PAGE_SIZE) + TUX_BUG(); + if (req->attr->tcapi || req->usermode) + TUX_BUG(); + +#define COPY_STATIC_PART(nr,curr) \ + do { \ + memcpy(curr, HEADER_PART##nr, sizeof(HEADER_PART##nr)-1); \ + curr += sizeof(HEADER_PART##nr)-1; \ + } while (0) + + buf = curr = get_abuf(req, MAX_OUT_HEADER_LEN); + + mime = req->attr->mime; + if (!mime) + TUX_BUG(); + + if (req->status == 404) { + COPY_STATIC_PART(1C, curr); + memcpy(curr, mime->type, mime->type_len); + curr += mime->type_len; + } else { + if (tux_noid && (mime == &default_mimetype)) + COPY_STATIC_PART(1B, curr); + else { + COPY_STATIC_PART(1A, curr); + memcpy(curr, mime->type, mime->type_len); + curr += mime->type_len; + } + } + + if (req->keep_alive && (req->version == HTTP_1_0)) + COPY_STATIC_PART(2_keepalive, curr); + else if (!req->keep_alive && (req->version == HTTP_1_1)) + COPY_STATIC_PART(2_close, curr); + else + COPY_STATIC_PART(2_none, curr); + + memcpy(curr, tux_date, DATE_LEN-1); + curr += DATE_LEN-1; + + if (req->content_gzipped) + COPY_STATIC_PART(3A, curr); + + if (tux_noid) + COPY_STATIC_PART(3BX, curr); + else + COPY_STATIC_PART(3BY, curr); + + // "%d" req->filelen + + memcpy(curr, &req->etag, req->lendigits); + curr += req->lendigits; + if (tux_generate_etags && (req->status != 404)) + { + COPY_STATIC_PART(3C, curr); + memcpy(curr, &req->etag, req->etaglen); + curr += req->etaglen; + curr[0] = '"'; + curr++; + } + COPY_STATIC_PART(4, curr); + + size = curr-buf; + +#if CONFIG_TUX_DEBUG + *curr = 0; + Dprintk("{%s} [%d/%d]\n", buf, size, strlen(buf)); +#endif + + flags = MSG_DONTWAIT; + if (!push) + flags |= MSG_MORE; + send_abuf(req, size, flags, 0); +} + +void http_illegal_request (tux_req_t *req, int cachemiss) +{ + if (req->error == 2) + /* + * Just zap timed out connections + */ + { + clear_keepalive(req); + add_req_to_workqueue(req); + } + else { + if (req->version != HTTP_1_1) + { + clear_keepalive(req); + }; + if (req->status == 304) + send_ret_notmodified(req); + else + { + if (req->status == 403) + send_async_err_forbidden(req); + else + send_async_err_not_found(req); + } + } +} + +tux_proto_t tux_proto_http = { + defer_accept: 1, + got_request: http_got_request, + parse_message: parse_http_message, + illegal_request: http_illegal_request, +}; + diff -rNu linux-2.4.9-ac10/net/tux/redirect.c linux/net/tux/redirect.c --- linux-2.4.9-ac10/net/tux/redirect.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/redirect.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,158 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * redirect.c: redirect requests to other server sockets (such as Apache). + */ + +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +static void dummy_destructor(struct open_request *req) +{ +} + +static struct or_calltable dummy = +{ + 0, + NULL, + NULL, + &dummy_destructor, + NULL +}; + +static int redirect_sock (tux_req_t *req, const int port) +{ + struct socket *sock = req->sock; + struct open_request *tcpreq; + struct sock *sk, *oldsk; + int err = -1; + + /* + * Look up (optional) listening user-space socket. + */ + local_bh_disable(); + sk = tcp_v4_lookup_listener(INADDR_ANY, port, 0); + /* + * Look up localhost listeners as well. + */ + if (!sk) { + u32 daddr; + ((char *)&daddr)[0] = 127; + ((char *)&daddr)[1] = 0; + ((char *)&daddr)[2] = 0; + ((char *)&daddr)[3] = 1; + sk = tcp_v4_lookup_listener(daddr, port, 0); + } + local_bh_enable(); + + /* No secondary server found */ + if (!sk) + goto out; + + /* + * Requeue the 'old' socket as an accept-socket of + * the listening socket. This way we can shuffle + * a socket around. Since we've read the input data + * via the non-destructive MSG_PEEK, the secondary + * server can be used transparently. + */ + oldsk = sock->sk; + lock_sock(sk); + + if (sk->state != TCP_LISTEN) + goto out_unlock; + + tcpreq = tcp_openreq_alloc(); + if (!tcpreq) + goto out_unlock; + + unlink_tux_socket(req); + + sock->sk = NULL; + sock->state = SS_UNCONNECTED; + + tcpreq->class = &dummy; + write_lock_irq(&oldsk->callback_lock); + oldsk->socket = NULL; + oldsk->sleep = NULL; + write_unlock_irq(&oldsk->callback_lock); + + oldsk->tp_pinfo.af_tcp.nonagle = 0; + + tcp_acceptq_queue(sk, tcpreq, oldsk); + + sk->data_ready(sk, 0); + + /* + * It's now completely up to the secondary + * server to handle this request. + */ + sock_release(req->sock); + req->sock = NULL; + req->parsed_len = 0; + req->input_skb = NULL; + + err = 0; + +out_unlock: + release_sock(sk); + sock_put(sk); +out: + return err; +} + +void redirect_request (tux_req_t *req, int cachemiss) +{ + if (cachemiss) + TUX_BUG(); + if (req->error == 3) + goto out_flush; + if (!req->sock) + TUX_BUG(); + + Dprintk("redirecting request (headers: {%s})\n", req->headers); + if (!req->status) + req->status = -1; + /* + * 301 redirects are special and do not go to + * the secondary server. + */ + if ((req->status == 301) || redirect_sock(req, tux_clientport)) { + if (req->parsed_len) + trunc_headers(req); + if ((req->status != 301) && req->proto) { + req->proto->illegal_request(req, cachemiss); + return; + } + goto out_flush; + } else { + if (req->ftp_data_sock) + BUG(); + } +out_flush: + if (req->status != 301) { + clear_keepalive(req); + if (!tux_redirect_logging) + req->status = 0; + } + flush_request(req, cachemiss); +} + diff -rNu linux-2.4.9-ac10/net/tux/times.c linux/net/tux/times.c --- linux-2.4.9-ac10/net/tux/times.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/times.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,176 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * times.c: time conversion routines. + * + * Original time convserion code Copyright (C) 1999 by Arjan van de Ven + */ + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ + +#include +#include +#include +#include + + +#include "times.h" + +static char *dayName[7] = { + "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" +}; + +static char *monthName[12] = { + "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" +}; + +static char itoa_h[60]={'0','0','0','0','0','0','0','0','0','0', + '1','1','1','1','1','1','1','1','1','1', + '2','2','2','2','2','2','2','2','2','2', + '3','3','3','3','3','3','3','3','3','3', + '4','4','4','4','4','4','4','4','4','4', + '5','5','5','5','5','5','5','5','5','5'}; + +static char itoa_l[60]={'0','1','2','3','4','5','6','7','8','9', + '0','1','2','3','4','5','6','7','8','9', + '0','1','2','3','4','5','6','7','8','9', + '0','1','2','3','4','5','6','7','8','9', + '0','1','2','3','4','5','6','7','8','9', + '0','1','2','3','4','5','6','7','8','9'}; + +int time_unix2ls (time_t zulu, char *buf) +{ + int Y=0,M=0,D=0; + int H=0,Min=0,S=0,WD=0; + int I,I2; + time_t rest, delta; + + if (zulu > xtime.tv_sec) + zulu = xtime.tv_sec; + + I=0; + while (Izulu) + break; + I++; + } + + Y=--I; + if (I<0) + { + Y=0; + goto BuildYear; + } + I2=0; + while (I2<=12) + { + if (TimeDays[I][I2]>zulu) + break; + I2++; + } + + M=I2-1; + + rest=zulu - TimeDays[Y][M]; + WD=WeekDays[Y][M]; + D=rest/86400; + rest=rest%86400; + WD+=D; + WD=WD%7; + H=rest/3600; + rest=rest%3600; + Min=rest/60; + rest=rest%60; + S=rest; + +BuildYear: + Y+=KHTTPD_YEAROFFSET; + + + /* Format: Day, 01 Mon 1999 01:01:01 GMT */ + + delta = xtime.tv_sec - zulu; + if (delta > 6*30*24*60) + // "May 23 2000" + return sprintf( buf, "%s %02i %04i", monthName[M], D+1, Y); + else + // "May 23 10:14" + return sprintf( buf, "%s %02i %02i:%02i", + monthName[M], D+1, H, Min); +} + +static int MonthHash[32] = {0,0,7,0,0,0,0,0,0,0,0,3,0,0,0,2,6,0,5,0,9,8,4,0,0,11,1,10,0,0,0,0}; + +#define is_digit(c) ((c) >= '0' && (c) <= '9') + +__inline static int skip_atoi(char **s) +{ + int i=0; + + while (is_digit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +time_t mimetime_to_unixtime(char *Q) +{ + int Y,M,D,H,Min,S; + unsigned int Hash; + time_t Temp; + char *s,**s2; + + s=Q; + s2=&s; + + if (strlen(s)<30) return 0; + if (s[3]!=',') return 0; + if (s[19]!=':') return 0; + + s+=5; /* Skip day of week */ + D = skip_atoi(s2); /* Day of month */ + s++; + Hash = (unsigned char)s[0]+(unsigned char)s[2]; + Hash = (Hash<<1) + (unsigned char)s[1]; + Hash = (Hash&63)>>1; + M = MonthHash[Hash]; + s+=4; + Y = skip_atoi(s2); /* Year */ + s++; + H = skip_atoi(s2); /* Hour */ + s++; + Min = skip_atoi(s2); /* Minutes */ + s++; + S = skip_atoi(s2); /* Seconds */ + s++; + if ((s[0]!='G')||(s[1]!='M')||(s[2]!='T')) + { + return 0; /* No GMT */ + } + + if (YKHTTPD_YEAROFFSET+9) Y = KHTTPD_YEAROFFSET+9; + + Temp = TimeDays[Y-KHTTPD_YEAROFFSET][M]; + Temp += D*86400+H*3600+Min*60+S; + + return Temp; +} diff -rNu linux-2.4.9-ac10/net/tux/times.h linux/net/tux/times.h --- linux-2.4.9-ac10/net/tux/times.h Thu Jan 1 01:00:00 1970 +++ linux/net/tux/times.h Mon Sep 10 16:18:08 2001 @@ -0,0 +1,26 @@ +static time_t TimeDays[10][13] = { + { 852073200, 854751600, 857170800, 859849200, 862441200, 865119600, 867711600, 870390000, 873068400, 875660400, 878338800, 880930800, 883609200 } , + { 883609200, 886287600, 888706800, 891385200, 893977200, 896655600, 899247600, 901926000, 904604400, 907196400, 909874800, 912466800, 915145200 } , + { 915145200, 917823600, 920242800, 922921200, 925513200, 928191600, 930783600, 933462000, 936140400, 938732400, 941410800, 944002800, 946681200 } , + { 946681200, 949359600, 951865200, 954543600, 957135600, 959814000, 962406000, 965084400, 967762800, 970354800, 973033200, 975625200, 978303600 } , + { 978303600, 980982000, 983401200, 986079600, 988671600, 991350000, 993942000, 996620400, 999298800, 1001890800, 1004569200, 1007161200, 1009839600 } , + { 1009839600, 1012518000, 1014937200, 1017615600, 1020207600, 1022886000, 1025478000, 1028156400, 1030834800, 1033426800, 1036105200, 1038697200, 1041375600 } , + { 1041375600, 1044054000, 1046473200, 1049151600, 1051743600, 1054422000, 1057014000, 1059692400, 1062370800, 1064962800, 1067641200, 1070233200, 1072911600 } , + { 1072911600, 1075590000, 1078095600, 1080774000, 1083366000, 1086044400, 1088636400, 1091314800, 1093993200, 1096585200, 1099263600, 1101855600, 1104534000 } , + { 1104534000, 1107212400, 1109631600, 1112310000, 1114902000, 1117580400, 1120172400, 1122850800, 1125529200, 1128121200, 1130799600, 1133391600, 1136070000 } , + { 1136070000, 1138748400, 1141167600, 1143846000, 1146438000, 1149116400, 1151708400, 1154386800, 1157065200, 1159657200, 1162335600, 1164927600, 1167606000 } +}; +static int WeekDays[10][13] = { + { 3, 6, 6, 2, 4, 0, 2, 5, 1, 3, 6, 1, 4 } , + { 4, 0, 0, 3, 5, 1, 3, 6, 2, 4, 0, 2, 5 } , + { 5, 1, 1, 4, 6, 2, 4, 0, 3, 5, 1, 3, 6 } , + { 6, 2, 3, 6, 1, 4, 6, 2, 5, 0, 3, 5, 1 } , + { 1, 4, 4, 0, 2, 5, 0, 3, 6, 1, 4, 6, 2 } , + { 2, 5, 5, 1, 3, 6, 1, 4, 0, 2, 5, 0, 3 } , + { 3, 6, 6, 2, 4, 0, 2, 5, 1, 3, 6, 1, 4 } , + { 4, 0, 1, 4, 6, 2, 4, 0, 3, 5, 1, 3, 6 } , + { 6, 2, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4, 0 } , + { 0, 3, 3, 6, 1, 4, 6, 2, 5, 0, 3, 5, 1 } +}; +#define KHTTPD_YEAROFFSET 1997 +#define KHTTPD_NUMYEARS 10 diff -rNu linux-2.4.9-ac10/net/tux/userspace.c linux/net/tux/userspace.c --- linux-2.4.9-ac10/net/tux/userspace.c Thu Jan 1 01:00:00 1970 +++ linux/net/tux/userspace.c Mon Sep 10 16:18:08 2001 @@ -0,0 +1,27 @@ +/* + * TUX - Integrated Application Protocols Layer and Object Cache + * + * Copyright (C) 2000, 2001, Ingo Molnar + * + * userspace.c: handle userspace-module requests + */ + +#include + +/**************************************************************** + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + ****************************************************************/ +