#!/usr/bin/perl # # CGIProxy 2.1.6 # # nph-proxy.cgi-- CGIProxy 2.1: a proxy in the form of a CGI script. # Retrieves the resource at any HTTP or FTP URL, updating embedded URLs # in HTML and other resources to point back through this script. By # default, no user info is sent to the server. Options include # text-only proxying to save bandwidth, cookie filtering, ad filtering, # script removal, user-defined encoding of the target URL, and more. # Besides running as a CGI script, can also run under mod_perl, as a # FastCGI script, or can use its own embedded HTTP server. # Requires Perl 5. # # Copyright (C) 1996, 1998-2013 by James Marshall, james@jmarshall.com # All rights reserved. Free for non-commercial use; commercial use # requires a license. # # For the latest, see http://www.jmarshall.com/tools/cgiproxy/ # # # IMPORTANT NOTE ABOUT ANONYMOUS BROWSING: # CGIProxy was originally made for indirect browsing more than # anonymity, but since people are using it for anonymity, I've tried # to make it as anonymous as possible. Suggestions welcome. For best # anonymity, browse with JavaScript turned off. That said, please notify # me if you find any privacy holes, even when using JavaScript. # Anonymity is good, but may not be bulletproof. For example, if even # a single unchecked JavaScript statement can be run, your anonymity # can be compromised. I've tried to handle JS in every place it can # exist, but please tell me if I missed any. Also, browser plugins # or other executable extensions may be able to reveal you to a server. # Also, be aware that this script doesn't modify PDF files or other # third-party document formats that may contain linking ability, so # you will lose your anonymity if you follow links in such files. # If you find any other way your anonymity can be compromised, please let # me know. # # # CONFIGURATION: # # None required in most situations. On some servers, these might be # required (all in the "user configuration" section): # . If you're using a database to store cookies (makes them behave better), # you need to set $DB_DRIVER, $DB_USER, and $DB_PASS . See the notes # by those settings for more details. Note that you should purge # the database periodically, with a cron job on Unix or Mac, or with # the Task Scheduler in Windows. # . If you're using another HTTP or SSL proxy, set $HTTP_PROXY, # $SSL_PROXY, and $NO_PROXY as needed. If those proxies use # authentication, set $PROXY_AUTH and $SSL_PROXY_AUTH accordingly. # . If this is running on an insecure server that doesn't use port 80, set # $RUNNING_ON_SSL_SERVER=0 (otherwise, the default of '' is fine). # . If you plan to run CGIProxy as a FastCGI script, set at least # $SECRET_PATH and see the configuration section "FastCGI configuration". # . If you plan to run CGIProxy using its own embedded server, set # $SECRET_PATH and see the configuration section "Embedded server configuration". # You'll also need a certificate and private key (key pair) in PEM # format. # # Options include: # . To install Perl CPAN modules that let you visit secure servers and # use compression, run "./nph-proxy.cgi install-modules" from the # command line (on Windows, run "perl nph-proxy.cgi install-modules"). # Ignore the scrolling text, and hit if asked any questions. # . Set $TEXT_ONLY, $REMOVE_COOKIES, $REMOVE_SCRIPTS, $FILTER_ADS, # $HIDE_REFERER, and $INSERT_ENTRY_FORM as desired. Set # $REMOVE_SCRIPTS if anonymity is important. # . To let the user choose all of those settings (except $TEXT_ONLY), # set $ALLOW_USER_CONFIG=1. # . To change the encoding format of the URL, modify the # proxy_encode() and proxy_decode() routines. The default # routines are suitable for simple PATH_INFO compliance. # . To encode cookies, modify the cookie_encode() and cookie_decode() # routines. # . You can restrict which servers this proxy will access, with # @ALLOWED_SERVERS and @BANNED_SERVERS. # . Similarly, you can specify allowed and denied server lists for # both cookies and scripts. # . For security, you can ban access to private IP ranges, with # @BANNED_NETWORKS. # . If filtering ads, you can customize this with a few settings. # . To insert your own block of HTML into each page, set $INSERT_HTML # or $INSERT_FILE. # . As a last resort, if you really can't run this script as NPH, # you can try to run it as non-NPH by setting $NOT_RUNNING_AS_NPH=1. # BUT, read the notes and warnings above that line. Caveat surfor. # . For crude load-balancing among a set of proxies, set @PROXY_GROUP. # . Other config is possible; see the user configuration section. # . If heavy use of this proxy puts a load on your server, see the # "NOTES ON PERFORMANCE" section below. # # For more info, read the comments above any config options you set. # # This script MUST be installed as a non-parsed header (NPH) script. # In Apache and many other servers, this is done by simply starting the # filename with "nph-". It MAY be possible to fake it as a non-NPH # script, MOST of the time, by using the $NOT_RUNNING_AS_NPH feature. # This is not advised. See the comments by that option for warnings. # # # TO USE: # Start a browsing session by calling the script with no parameters. # You can bookmark pages you browse to through the proxy, or link to # the URLs that are generated. # # # NOTES ON PERFORMANCE: # Unfortunately, this has gotten slower through the versions, mostly # because of optional new features. Configured equally, version 1.3 # takes 25% longer to run than 1.0 or 1.1 (based on *cough* highly # abbreviated testing). Compiling takes about 50% longer. # Leaving $REMOVE_SCRIPTS=1 adds 25-50% to the running time. # Remember that we're talking about tenths of a second here. Most of # the delay experienced by the user is from waiting on two network # connections. These performance issues only matter if your server # CPU is getting overloaded. Also, these mostly matter when retrieving # JavaScript and Flash, because modifying those is what takes most of the # time. # If you can, use mod_perl. Starting with version 1.3.1, this should # work under mod_perl, which requires Perl 5.004 or later. If you use # mod_perl, be careful to install this as an NPH script, i.e. set the # "PerlSendHeader Off" configuration directive (or "PerlOptions -ParseHeaders" # if using mod_perl 2.x). For more info, see the mod_perl documentation. # If you can't use mod_perl, try using FastCGI. Configure the section # "FastCGI configuration" below, and run nph-proxy.cgi from the command # line to see a usage message. You'll also need to configure your # Web server to use FastCGI. # If you can't use mod_perl or FastCGI, try running CGIProxy as its own # embedded server. Configure the section "Embedded server configuration", # and run nph-proxy.cgi from the command line to see a usage message. # You'll also need a key pair (certificate and private key). # If you use mod_perl, FastCGI, or the embedded server, and modify this # script, see the note near the "reset 'a-z'" line below, regarding # UPPER_CASE and lower_case variable names. # # # TO DO: # What I want to hear about: # . Any HTML tags not being converted here. # . Any method of introducing JavaScript or other script, that's not # being handled here. # . Any script MIME types other than those already in @SCRIPT_MIME_TYPES. # . Any MIME types other than text/html that have links that need to # be converted. # plug any other script holes (e.g. MSIE-proprietary, other MIME types?) # This could use cleaner URL-encoding all over ($base_url, etc.) # more error checking? # find a simple encryption technique for proxy_encode() # support more protocols, like mailto: or gopher: # For ad filtering, add option to disable images from servers other than # that of the containing HTML page? Is it worth it? # # # BUGS: # Anonymity may not not perfect. In particular, there may be some remaining # JavaScript or Flash holes. Please let me know if you find any. # Since ALL of your cookies are sent to this script (which then chooses # the relevant ones), some cookies could be dropped if you accumulate a # lot, resulting in "Bad Request" errors. To fix this, use a database # server for cookies. # # # I first wrote this in 1996 as an experiment to allow indirect browsing. # The original seed was a program I wrote for Rich Morin's article # in the June 1996 issue of Unix Review, online at # http://www.cfcl.com/tin/P/199606.shtml. # # Confession: I didn't originally write this with the spec for HTTP # proxies in mind, and there are probably some violations of the protocol # (at least for proxies). This whole thing is one big violation of the # proxy model anyway, so I hereby rationalize that the spec can be widely # interpreted here. If there is demand, I can make it more conformant. # The HTTP client and server components should be fine; it's just the # special requirements for proxies that may not be followed. # #-------------------------------------------------------------------------- use strict ; use warnings ; no warnings qw(uninitialized) ; # use defaults all the time use Socket ; use Encode ; use IO::Handle ; use IO::Select ; use File::Spec ; use Time::Local ; use Getopt::Long ; use Net::Domain qw(hostfqdn) ; use Fcntl qw(:DEFAULT :flock) ; use POSIX qw(:sys_wait_h setsid); use Time::HiRes qw(gettimeofday tv_interval) ; use Errno qw(EINTR EAGAIN EWOULDBLOCK ENOBUFS EPIPE) ; # First block below is config variables, second block is sort-of config # variables, third block is persistent constants, fourth block is would-be # persistent constants (not set until needed), fifth block is constants for # JavaScript processing (mostly regular expressions), and last block is # variables. # Removed $RE_JS_STRING_LITERAL to help with Perl's long-literal-string bug, # but can replace it later if/when that is fixed. Added # $RE_JS_STRING_LITERAL_START, $RE_JS_STRING_REMAINDER_1, and # $RE_JS_STRING_REMAINDER_2 as part of the workaround. use vars qw( $PROXY_DIR $SECRET_PATH $LOCAL_LIB_DIR $FCGI_SOCKET $FCGI_MAX_REQUESTS_PER_PROCESS $FCGI_NUM_PROCESSES $PRIVATE_KEY_FILE $CERTIFICATE_FILE $RUN_AS_USER $EMB_USERNAME $EMB_PASSWORD $DB_DRIVER $DB_NAME $DB_USER $DB_PASS $USE_DB_FOR_COOKIES %REDIRECTS $TEXT_ONLY $REMOVE_COOKIES $REMOVE_SCRIPTS $FILTER_ADS $HIDE_REFERER $INSERT_ENTRY_FORM $ALLOW_USER_CONFIG $ENCODE_DECODE_BLOCK_IN_JS @ALLOWED_SERVERS @BANNED_SERVERS @BANNED_NETWORKS $NO_COOKIE_WITH_IMAGE @ALLOWED_COOKIE_SERVERS @BANNED_COOKIE_SERVERS @ALLOWED_SCRIPT_SERVERS @BANNED_SCRIPT_SERVERS @BANNED_IMAGE_URL_PATTERNS $RETURN_EMPTY_GIF $USER_IP_ADDRESS_TEST $DESTINATION_SERVER_TEST $INSERT_HTML $INSERT_FILE $ANONYMIZE_INSERTION $FORM_AFTER_INSERTION $INSERTION_FRAME_HEIGHT $RUNNING_ON_SSL_SERVER $NOT_RUNNING_AS_NPH $HTTP_PROXY $SSL_PROXY $NO_PROXY $PROXY_AUTH $SSL_PROXY_AUTH $MINIMIZE_CACHING $SESSION_COOKIES_ONLY $COOKIE_PATH_FOLLOWS_SPEC $RESPECT_THREE_DOT_RULE @PROXY_GROUP $USER_AGENT $USE_PASSIVE_FTP_MODE $SHOW_FTP_WELCOME $PROXIFY_SCRIPTS $PROXIFY_SWF $ALLOW_RTMP_PROXY $ALLOW_UNPROXIFIED_SCRIPTS $PROXIFY_COMMENTS $USE_POST_ON_START $ENCODE_URL_INPUT $REMOVE_TITLES $NO_BROWSE_THROUGH_SELF $NO_LINK_TO_START $MAX_REQUEST_SIZE @TRANSMIT_HTML_IN_PARTS_URLS $QUIETLY_EXIT_PROXY_SESSION $OVERRIDE_SECURITY @SCRIPT_MIME_TYPES @OTHER_TYPES_TO_REGISTER @TYPES_TO_HANDLE $NON_TEXT_EXTENSIONS $PROXY_VERSION $RUN_METHOD @MONTH @WEEKDAY %UN_MONTH @BANNED_NETWORK_ADDRS $DBH $STH_UPD_COOKIE $STH_INS_COOKIE $STH_SEL_COOKIE $STH_SEL_ALL_COOKIES $STH_DEL_COOKIE $STH_UPD_SESSION $STH_INS_SESSION $STH_SEL_IP $STH_PURGE_SESSIONS $STH_PURGE_COOKIES $USER_IP_ADDRESS_TEST_H $DESTINATION_SERVER_TEST_H $RUNNING_ON_IIS @NO_PROXY $NO_CACHE_HEADERS @ALL_TYPES %MIME_TYPE_ID $SCRIPT_TYPE_REGEX $TYPES_TO_HANDLE_REGEX $THIS_HOST $ENV_SERVER_PORT $ENV_SCRIPT_NAME $THIS_SCRIPT_URL $RTMP_SERVER_PORT %ENV_UNCHANGING $HAS_INITED $CUSTOM_INSERTION %IN_CUSTOM_INSERTION $RE_JS_WHITE_SPACE $RE_JS_LINE_TERMINATOR $RE_JS_COMMENT $RE_JS_IDENTIFIER_START $RE_JS_IDENTIFIER_PART $RE_JS_IDENTIFIER_NAME $RE_JS_PUNCTUATOR $RE_JS_DIV_PUNCTUATOR $RE_JS_NUMERIC_LITERAL $RE_JS_ESCAPE_SEQUENCE $RE_JS_STRING_LITERAL $RE_JS_STRING_LITERAL_START $RE_JS_STRING_REMAINDER_1 $RE_JS_STRING_REMAINDER_2 $RE_JS_REGULAR_EXPRESSION_LITERAL $RE_JS_TOKEN $RE_JS_INPUT_ELEMENT_DIV $RE_JS_INPUT_ELEMENT_REG_EXP $RE_JS_SKIP $RE_JS_SKIP_NO_LT %RE_JS_SET_TRAPPED_PROPERTIES %RE_JS_SET_RESERVED_WORDS_NON_EXPRESSION %RE_JS_SET_ALL_PUNCTUATORS $JSLIB_BODY $HTTP_VERSION $HTTP_1_X $URL $STDIN $STDOUT $now $session_id $session_id_persistent $session_cookies $packed_flags $encoded_URL $doing_insert_here $env_accept $e_remove_cookies $e_remove_scripts $e_filter_ads $e_insert_entry_form $e_hide_referer $images_are_banned_here $scripts_are_banned_here $cookies_are_banned_here $scheme $authority $path $host $port $username $password $cookie_to_server %auth $script_url $url_start $url_start_inframe $url_start_noframe $is_in_frame $expected_type $base_url $base_scheme $base_host $base_path $base_file $base_unframes $default_style_type $default_script_type $status $headers $body $charset $is_html $response_sent %in_mini_start_form $needs_jslib $does_write $swflib $AVM2_BYTECODES $debug ) ; #-------------------------------------------------------------------------- # user configuration #-------------------------------------------------------------------------- # [As of 2.1.6, this is only needed if using the embedded server.] # For certain purposes, CGIProxy may need to create files. This is where # those will go. For example, use "/home/username/cgiproxy", where "username" # is replaced by your username. # This must be an absolute path to the directory, i.e. a path starting with # "/" or "\" (possibly after a drive letter and ":" if using Windows). Note that # you need to use "\\" to represent a single backslash. # Leading drive letters (e.g. for Windows) are allowed. # The default will use the directory "cgiproxy" under your home directory (which # varies with your operating system). If it doesn't work, manually set # $PROXY_DIR to an absolute path. You can name it whatever you want. # Note that in Unix or Mac, using a directory on a mounted filesystem (which often # includes home directories) may prevent that filesystem from being unmounted, # which may bother your sysadmin. If so, try setting this to something starting # with "/tmp/", like "/tmp/.yourname/". # If you get "mkdir" permission errors, create the directory yourself with mkdir. # You may also need to "chmod 777 directoryname" to make the directory writable # by the Web server, but note that this makes it readable and writable by # everybody. You might ask your webmaster if they provide a safe way for CGI # scripts to read and write files in your directories. $PROXY_DIR= $^O=~ /win/i ? "$ENV{HOMEDRIVE}$ENV{HOMEPATH}\\cgiproxy" # Windows : "$ENV{HOME}/cgiproxy" ; # Unix and Mac #$PROXY_DIR= '/absolute/path/here/cgiproxy' ; # if you need to set it manually # IMPORTANT: CHANGE THIS IF USING FASTCGI OR THE EMBEDDED SERVER! # If using FastCGI or the embedded server, the path in the URL will begin with a # fixed alphanumeric sequence (string) to help conceal the proxy. You can set # this to any alphanumeric string. The URL of your proxy will be # "https://example.com/secret" (replace "secret" with your actual secret). # If we didn't do this, then a censor could check if a site hosts a proxy by # merely accessing "https://example.com" . # Note that this is not a secret from the users, just from anyone watching # network traffic. Also, it won't be kept secret if your server is insecure. $SECRET_PATH= 'secret' ; # If this script is not running as your user ID (such as a Web server running # as its own user ID), and you're using the local::lib module, then # set this to the directory where your modules are installed with local::lib . # This is normally just the "perl5" directory under your home directory, unless # you renamed it or configured lib::local to use a different directory. # If you set this before installing modules, then modules will be installed # into this directory. #$LOCAL_LIB_DIR= '/home/your-username/perl5' ; # this example works for Unix or Mac #---- FastCGI configuration --------------------- # FastCGI is a mechanism that can speed up CGI-like scripts. It's purely # optional and requires some web server configuration as well, and if you # don't use it you can ignore this section. # FastCGI uses a Unix-domain socket to communicate between the FastCGI client # (e.g. the web server software) and the FastCGI server (e.g. a CGI script # that has been converted to run as a listening daemon, such as CGIProxy). # A "Unix-domain socket" looks like a file in a directory listing, but is # actually a mechanism to allow different processes to communicate with # each other. # If that's confusing, just set this to an absolute path/filename in a # directory that can be read and written by the web server. The default is # usually fine on Unix or Mac systems. You'll also need to configure your # web server to use the same path/filename. # It's also possible to use a normal Internet socket on the same machine, # though that's less efficient than a Unix-domain socket. If you do use # an Internet socket, set $FCGI_SOCKET equal to a colon followed by a # local port, e.g. ":1234". $FCGI_SOCKET= '/tmp/cgiproxy.fcgi.socket' ; # FastCGI uses multiple processes to listen on its socket, where each # process can handle one request at a time. This is a performance tuning # parameter, so the optimal number depends on your server environment # (hardware and software). # If you don't understand this, the default should be fine. You can experiment # with different numbers if performance is an issue. # This can be overridden with the "-n" command-line parameter. $FCGI_NUM_PROCESSES= 100 ; # As a FastCGI process gets used for many requests, it slowly takes more and # more memory, due to the copy-on-write behavior of forked processes. Thus, # it's cleaner if you kill a process and restart a fresh one after it handles # some number of requests. This is a performance tuning parameter, so the # optimal number depends on your server environment (hardware and software). # If you don't understand this, the default should be fine. You can experiment # with different numbers if performance is an issue. # This can be overridden with the "-m" command-line parameter. $FCGI_MAX_REQUESTS_PER_PROCESS= 1000 ; #---- End of FastCGI configuration -------------- # Much initialization of unchanging values is now in this routine. (Ignore # this if you don't know what it means.) sub init { #---- Embedded server configuration ------------- # For the embedded server, you need to a) put a certificate and private key, # in PEM format, into the $PROXY_DIR directory, and b) set these two # variables to the two file names. (A "certificate" is the same thing as # a public key.) # You can either pay a certificate authority for a key pair, or you can # generate your own "self-signed" key pair. The disadvantage of using a # self-signed key pair is that your users will see a browser warning about # an untrusted certificate. This is all true of any secure server. #$CERTIFICATE_FILE= 'plain-cert.pem' ; #$PRIVATE_KEY_FILE= 'plain-rsa.pem' ; # If you run this as the root user to use port 443, it's a good idea to change # the user ID to something with fewer permissions. You can set this to either # a username, or a numeric user ID. # This probably won't work on Windows. $RUN_AS_USER= 'nobody' ; # It's important to use $SECRET_PATH, but you can require a username and # password too. All users must login with whatever you set below, using # HTTP Basic authentication. Leave these commented out to disable # password protection. # This is very simple right now. In the future there will likely be # more authentication methods, including support for multiple users. #$EMB_USERNAME= 'free' ; #$EMB_PASSWORD= 'speech' ; #---- End of embedded server configuration ------ #---- Database configuration -------------------- # Database use is optional, and if you don't use one you can ignore this # section. But if you're getting "Bad Request" errors, you can fix it # by using a database; also, see the $USE_DB_FOR_COOKIES option below. # Database use is optional. It's most efficient when this script is running # under mod_perl or FastCGI. # To use a database, create a database account for this program to use, or ask # your database administrator to do it. Set $DB_USER and $DB_PASS to the # username and password, below. This program will try to create the required # database, named $DB_NAME as set below, but if your DBA isn't willing to grant the # permission to create databases to the CGIProxy user, then you or the DBA will # need to create the database. # # If you are using a database, it must be purged periodically. In Unix or # Mac, do this with a cron job. In Windows, use the Task Scheduler. # In Unix or Mac, the command to purge the database is # "/path/to/script/nph-proxy.cgi -c purge-db". (Replace "/path/to/script/" # with the actual path to the script.) Edit your crontab with "crontab -e", # and add a line like: # "0 * * * * /path/to/script/nph-proxy.cgi -c purge-db" (without quotes) # to purge the database at the top of every hour, or: # "0 2 * * * /path/to/script/nph-proxy.cgi -c purge-db" (without quotes) # to purge it every night at 2:00am. # This is the name of the "database driver" for the database software you're using. # Currently supported values are "MySQL" and "Oracle". # Leave this empty or commented out to not use a database, or set it to use # a database. #$DB_DRIVER= 'MySQL' ; # CGIProxy creates (if possible) and uses its own database. If you want to name # the database something else, change this value. If you need a database # administrator to create the database, tell him or her this database name. $DB_NAME= 'cgiproxy' ; # These are the username and password of the database account, as described above. $DB_USER= 'proxy' ; $DB_PASS= '' ; # If set, then use the server-side database to store cookies. This gets around # the problem of too many total cookies causing "Bad Request" errors. # Set this to 1 to use the database (if it's configured), or to 0 to NOT use # the database. $USE_DB_FOR_COOKIES= 1 ; #---- End of database configuration ------------- # If set, then proxy traffic will be restricted to text data only, to save # bandwidth (though it can still be circumvented with uuencode, etc.). # To replace images with a 1x1 transparent GIF, set $RETURN_EMPTY_GIF below. $TEXT_ONLY= 0 ; # set to 1 to allow only text data, 0 to allow all # If set, then prevent all cookies from passing through the proxy. To allow # cookies from some servers, set this to 0 and see @ALLOWED_COOKIE_SERVERS # and @BANNED_COOKIE_SERVERS below. You can also prevent cookies with # images by setting $NO_COOKIE_WITH_IMAGE below. # Note that this only affects cookies from the target server. The proxy # script sends its own cookies for other reasons too, like to support # authentication. This flag does not stop these cookies from being sent. $REMOVE_COOKIES= 0 ; # If set, then remove as much scripting as possible. If anonymity is # important, this is strongly recommended! Better yet, turn off script # support in your browser. # On the HTTP level: # . prevent transmission of script MIME types (which only works if the server # marks them as such, so a malicious server could get around this, but # then the browser probably wouldn't execute the script). # . remove Link: headers that link to a resource of a script MIME type. # Within HTML resources: # . remove . # . remove intrinsic event attributes from tags, i.e. attributes whose names # begin with "on". # . remove where "type" attribute is a script MIME type. # . remove various HTML tags that appear to link to a script MIME type. # . remove script macros (aka Netscape-specific "JavaScript entities"), # i.e. any attributes containing the string "&{" . # . remove "JavaScript conditional comments". # . remove MSIE-specific "dynamic properties". # To allow scripts from some sites but not from others, set this to 0 and # see @ALLOWED_SCRIPT_SERVERS and @BANNED_SCRIPT_SERVERS below. # See @SCRIPT_MIME_TYPES below for a list of which MIME types are filtered out. # I do NOT know for certain that this removes all script content! It removes # all that I know of, but I don't have a definitive list of places scripts # can exist. If you do, please send it to me. EVEN RUNNING A SINGLE # JAVASCRIPT STATEMENT CAN COMPROMISE YOUR ANONYMITY! Just so you know. # Richard Smith has a good test site for anonymizing proxies, at # http://users.rcn.com/rms2000/anon/test.htm # Note that turning this on removes most popup ads! :) $REMOVE_SCRIPTS= 0 ; # If set, then filter out images that match one of @BANNED_IMAGE_URL_PATTERNS, # below. Also removes cookies attached to images, as if $NO_COOKIE_WITH_IMAGE # is set. # To remove most popup advertisements, also set $REMOVE_SCRIPTS=1 above. $FILTER_ADS= 0 ; # If set, then don't send a Referer: [sic] header with each request # (i.e. something that tells the server which page you're coming from # that linked to it). This is a minor privacy issue, but a few sites # won't send you pages or images if the Referer: is not what they're # expecting. If a page is loading without images or a link seems to be # refused, then try turning this off, and a correct Referer: header will # be sent. # This is only a problem in a VERY small percentage of sites, so few that # I'm kinda hesitant to put this in the entry form. Other arrangements # have their own problems, though. $HIDE_REFERER= 0 ; # If set, insert a compact version of the URL entry form at the top of each # page. This will also display the URL currently being viewed. # When viewing a page with frames, then a new top frame is created and the # insertion goes there. # If you want to customize the appearance of the form, modify the routine # mini_start_form() near the end of the script. # If you want to insert something other than this form, see $INSERT_HTML and # $INSERT_FILE below. # Users should realize that options changed via the form only take affect when # the form is submitted by entering a new URL or pressing the "Go" button. # Selecting an option, then following a link on the page, will not cause # the option to take effect. # Users should also realize that anything inserted into a page may throw # off any precise layout. The insertion will also be subject to # background colors and images, and any other page-wide settings. $INSERT_ENTRY_FORM= 1 ; # If set, then allow the user to control $REMOVE_COOKIES, $REMOVE_SCRIPTS, # $FILTER_ADS, $HIDE_REFERER, and $INSERT_ENTRY_FORM. Note that they # can't fine-tune any related options, such as the various @ALLOWED... and # @BANNED... lists. $ALLOW_USER_CONFIG= 1 ; # If you want to encode the URLs of visited pages so that they don't show # up within the full URL in your browser bar, then use proxy_encode() and # proxy_decode(). These are Perl routines that transform the way the # destination URL is included in the full URL. You can either use # some combination of the example encodings below, or you can program your # own routines. The encoded form of URLs should only contain characters # that are legal in PATH_INFO. This varies by server, but using only # printable chars and no "?" or "#" works on most servers. Don't let # PATH_INFO contain the strings "./", "/.", "../", or "/..", or else it # may get compressed like a pathname somewhere. Try not to make the # resulting string too long, either. # Of course, proxy_decode() must exactly undo whatever proxy_encode() does. # Make proxy_encode() as fast as possible-- it's a bottleneck for the whole # program. The speed of proxy_decode() is not as important. # If you're not a Perl programmer, you can use the example encodings that are # commented out, i.e. the lines beginning with "#". To use them, merely # uncomment them, i.e. remove the "#" at the start of the line. If you # uncomment a line in proxy_encode(), you MUST uncomment the corresponding # line in proxy_decode() (note that "corresponding lines" in # proxy_decode() are in reverse order of those in proxy_encode()). You # can use one, two, or all three encodings at the same time, as long as # the correct lines are uncommented. # Starting in version 2.1beta9, don't call these functions directly. Rather, # call wrap_proxy_encode() and wrap_proxy_decode() instead, which handle # certain details that you shouldn't have to worry about in these functions. # IMPORTANT: If you modify these routines, and if $PROXIFY_SCRIPTS is set # below (on by default), then you MUST modify $ENCODE_DECODE_BLOCK_IN_JS # below!! (You'll need to write corresponding routines in JavaScript to do # the same as these routines in Perl, used when proxifying JavaScript.) # Because of the simplified absolute URL resolution in full_url(), there may # be ".." segments in the default encoding here, notably in the first path # segment. Normally, that's just an HTML mistake, but please tell me if # you see any privacy exploit with it. # Note that a few sites have embedded applications (like applets or Shockwave) # that expect to access URLs relative to the page's URL. This means they # may not work if the encoded target URL can't be treated like a base URL, # e.g. that it can't be appended with something like "../data/foo.data" # to get that expected data file. In such cases, the default encoding below # should let these sites work fine, as should any other encoding that can # support URLs relative to it. sub proxy_encode { my($URL)= @_ ; $URL=~ s#^([\w+.-]+)://#$1/# ; # http://xxx -> http/xxx # $URL=~ s/(.)/ sprintf('%02x',ord($1)) /ge ; # each char -> 2-hex # $URL=~ tr/a-zA-Z/n-za-mN-ZA-M/ ; # rot-13 return $URL ; } sub proxy_decode { my($enc_URL)= @_ ; # $enc_URL=~ tr/a-zA-Z/n-za-mN-ZA-M/ ; # rot-13 # $enc_URL=~ s/([\da-fA-F]{2})/ sprintf("%c",hex($1)) /ge ; $enc_URL=~ s#^([\w+.-]+)/#$1://# ; # http/xxx -> http://xxx return $enc_URL ; } # Encode cookies before they're sent back to the user. # The return value must only contain characters that are legal in cookie # names and values, i.e. only printable characters, and no ";", ",", "=", # or white space. # cookie_encode() is called twice for each cookie: once to encode the cookie # name, and once to encode the cookie value. The two are then joined with # "=" and sent to the user. # cookie_decode() must exactly undo whatever cookie_encode() does. # Also, cookie_encode() must always encode a given input string into the # same output string. This is because browsers need the cookie name to # identify and manage a cookie, so the name must be consistent. # This is not a bottleneck like proxy_encode() is, so speed is not critical. # IMPORTANT: If you modify these routines, and if $PROXIFY_SCRIPTS is set # below (on by default), then you MUST modify $ENCODE_DECODE_BLOCK_IN_JS # below!! (You'll need to write corresponding routines in JavaScript to do # the same as these routines in Perl, used when proxifying JavaScript.) sub cookie_encode { my($cookie)= @_ ; # $cookie=~ s/(.)/ sprintf('%02x',ord($1)) /ge ; # each char -> 2-hex # $cookie=~ tr/a-zA-Z/n-za-mN-ZA-M/ ; # rot-13 $cookie=~ s/(\W)/ '%' . sprintf('%02x',ord($1)) /ge ; # simple URL-encoding return $cookie ; } sub cookie_decode { my($enc_cookie)= @_ ; $enc_cookie=~ s/%([\da-fA-F]{2})/ pack('C', hex($1)) /ge ; # URL-decode # $enc_cookie=~ tr/a-zA-Z/n-za-mN-ZA-M/ ; # rot-13 # $enc_cookie=~ s/([\da-fA-F]{2})/ sprintf("%c",hex($1)) /ge ; return $enc_cookie ; } # If $PROXIFY_SCRIPTS is true, and if you modify the routines above that # encode cookies and URLs, then you need to modify $ENCODE_DECODE_BLOCK_IN_JS # here. Explanation: When proxifying JavaScript, a library of JavaScript # functions is used. In that library are a few JavaScript routines that do # the same as their Perl counterparts in this script. Four of those routines # are proxy_encode(), proxy_decode(), cookie_encode(), and cookie_decode(). # Thus, unfortunately, when you write your own versions of those Perl routines # (or modify what's already there), you also need to write (or modify) these # corresponding JavaScript routines to do the same thing. Put the routines in # this long variable $ENCODE_DECODE_BLOCK_IN_JS, and it will be included in # the JavaScript library when needed. Prefix the function names with # "_proxy_jslib_", as below. # The commented examples in the JavaScript routines below correspond exactly to # the commented examples in the Perl routines above. Thus, if you modify the # Perl routines by merely uncommenting the examples, you can do the same in # these JavaScript routines. (JavaScript comments begin with "//".) # [If you don't know Perl: Note that everything up until the line "EOB" is one # long string value, called a "here document". $ENCODE_DECODE_BLOCK_IN_JS is # set to the whole thing.] $ENCODE_DECODE_BLOCK_IN_JS= <<'EOB' ; function _proxy_jslib_proxy_encode(URL) { URL= URL.replace(/^([\w\+\.\-]+)\:\/\//, '$1/') ; // URL= URL.replace(/(.)/g, function (s,p1) { return p1.charCodeAt(0).toString(16) } ) ; // URL= URL.replace(/([a-mA-M])|[n-zN-Z]/g, function (s,p1) { return String.fromCharCode(s.charCodeAt(0)+(p1?13:-13)) }) ; return URL ; } function _proxy_jslib_proxy_decode(enc_URL) { // enc_URL= enc_URL.replace(/([a-mA-M])|[n-zN-Z]/g, function (s,p1) { return String.fromCharCode(s.charCodeAt(0)+(p1?13:-13)) }) ; // enc_URL= enc_URL.replace(/([\da-fA-F]{2})/g, function (s,p1) { return String.fromCharCode(eval('0x'+p1)) } ) ; enc_URL= enc_URL.replace(/^([\w\+\.\-]+)\//, '$1://') ; return enc_URL ; } function _proxy_jslib_cookie_encode(cookie) { // cookie= cookie.replace(/(.)/g, function (s,p1) { return p1.charCodeAt(0).toString(16) } ) ; // cookie= cookie.replace(/([a-mA-M])|[n-zN-Z]/g, function (s,p1) { return String.fromCharCode(s.charCodeAt(0)+(p1?13:-13)) }) ; cookie= cookie.replace(/(\W)/g, function (s,p1) { return '%'+p1.charCodeAt(0).toString(16) } ) ; return cookie ; } function _proxy_jslib_cookie_decode(enc_cookie) { enc_cookie= enc_cookie.replace(/%([\da-fA-F]{2})/g, function (s,p1) { return String.fromCharCode(eval('0x'+p1)) } ) ; // enc_cookie= enc_cookie.replace(/([a-mA-M])|[n-zN-Z]/g, function (s,p1) { return String.fromCharCode(s.charCodeAt(0)+(p1?13:-13)) }) ; // enc_cookie= enc_cookie.replace(/([\da-fA-F]{2})/g, function (s,p1) { return String.fromCharCode(eval('0x'+p1)) } ) ; return enc_cookie ; } EOB # Use @ALLOWED_SERVERS and @BANNED_SERVERS to restrict which servers a user # can visit through this proxy. Any URL at a host matching a pattern in # @BANNED_SERVERS will be forbidden. In addition, if @ALLOWED_SERVERS is # not empty, then access is allowed *only* to servers that match a pattern # in it. In other words, @BANNED_SERVERS means "ban these servers", and # @ALLOWED_SERVERS (if not empty) means "allow only these servers". If a # server matches both lists, it is banned. # These are each a list of Perl 5 regular expressions (aka patterns or # regexes), not literal host names. To turn a hostname into a pattern, # replace every "." with "\.", add "^" to the beginning, and add "$" to the # end. For example, 'www.example.com' becomes '^www\.example\.com$'. To # match *every* host ending in something, leave out the "^". For example, # '\.example\.com$' matches every host ending in ".example.com". For more # details about Perl regular expressions, see the Perl documentation. (They # may seem cryptic at first, but they're very powerful once you know how to # use them.) # Note: Use single quotes around each pattern, not double qoutes, unless you # understand the difference between the two in Perl. Otherwise, characters # like "$" and "\" may not be handled the way you expect. @ALLOWED_SERVERS= () ; @BANNED_SERVERS= () ; # If @BANNED_NETWORKS is set, then forbid access to these hosts or networks. # This is done by IP address, not name, so it provides more certain security # than @BANNED_SERVERS above. # Specify each element as a decimal IP address-- all four integers for a host, # or one to three integers for a network. For example, '127.0.0.1' bans # access to the local host, and '192.168' bans access to all IP addresses # in the 192.168 network. Sorry, no banning yet for subnets other than # 8, 16, or 24 bits. # IF YOU'RE RUNNING THIS ON OR INSIDE A FIREWALL, THIS SETTING IS STRONGLY # RECOMMENDED!! In particular, you should ban access to other machines # inside the firewall that the firewall machine itself may have access to. # Otherwise, external users will be able to access any internal hosts that # the firewall can access. Even if that's what you intend, you should ban # access to any hosts that you don't explicitly want to expose to outside # users. # In addition to the recommended defaults below, add all IP addresses of your # server machine if you want to protect it like this. # After you set this, YOU SHOULD TEST to verify that the proxy can't access # the IP addresses you're banning! # NOTE: According to RFC 1918, network address ranges reserved for private # networks are 10.x.x.x, 192.168.x.x, and 172.16.x.x-172.31.x.x, i.e. with # respective subnet masks of 8, 16, and 12 bits. Since we can't currently # do a 12-bit mask, we'll exclude the entire 172 network here. If this # causes a problem, let me know and I'll add subnet masks down to 1-bit # resolution. # Also included are 169.254.x.x (per RFC 3927) and 244.0.0.x (used for # routing), as recommended by Waldo Jaquith. # On some systems, 127.x.x.x all point to localhost, so disallow all of "127". # This feature is simple now but may be more complete in future releases. # How would you like this to be extended? What would be useful to you? @BANNED_NETWORKS= ('127', '192.168', '172', '10', '169.254', '244.0.0') ; # Settings to fine-tune cookie filtering, if cookies are not banned altogether # (by user checkbox or $REMOVE_COOKIES above). # Use @ALLOWED_COOKIE_SERVERS and @BANNED_COOKIE_SERVERS to restrict which # servers can send cookies through this proxy. They work like # @ALLOWED_SERVERS and @BANNED_SERVERS above, both in how their precedence # works, and that they're lists of Perl 5 regular expressions. See the # comments there for details. # If non-empty, only allow cookies from servers matching one of these patterns. # Comment this out to allow all cookies (subject to @BANNED_COOKIE_SERVERS). #@ALLOWED_COOKIE_SERVERS= ('\bslashdot\.org$') ; # Reject cookies from servers matching these patterns. @BANNED_COOKIE_SERVERS= ( '\.doubleclick\.net$', '\.preferences\.com$', '\.imgis\.com$', '\.adforce\.com$', '\.focalink\.com$', '\.flycast\.com$', '\.avenuea\.com$', '\.linkexchange\.com$', '\.pathfinder\.com$', '\.burstnet\.com$', '\btripod\.com$', '\bgeocities\.yahoo\.com$', '\.mediaplex\.com$', ) ; # Set this to reject cookies returned with images. This actually prevents # cookies returned with any non-text resource. # This helps prevent tracking by ad networks, but there are also some # legitimate uses of attaching cookies to images, such as captcha, so # by default this is off. $NO_COOKIE_WITH_IMAGE= 0 ; # Settings to fine-tune script filtering, if scripts are not banned altogether # (by user checkbox or $REMOVE_SCRIPTS above). # Use @ALLOWED_SCRIPT_SERVERS and @BANNED_SCRIPT_SERVERS to restrict which # servers you'll allow scripts from. They work like @ALLOWED_SERVERS and # @BANNED_SERVERS above, both in how their precedence works, and that # they're lists of Perl 5 regular expressions. See the comments there for # details. @ALLOWED_SCRIPT_SERVERS= () ; @BANNED_SCRIPT_SERVERS= () ; # Various options to help filter ads and stop cookie-based privacy invasion. # These are only effective if $FILTER_ADS is set above. # @BANNED_IMAGE_URL_PATTERNS uses Perl patterns. If an image's URL # matches one of the patterns, it will not be downloaded (typically for # ad-filtering). For more information on Perl regular expressions, see # the Perl documentation. # Note that most popup ads will be removed if scripts are removed (see # $REMOVE_SCRIPTS above). # If ad-filtering is your primary motive, consider using one of the many # proxies that specialize in that. The classic is from JunkBusters, at # http://www.junkbusters.com . # Reject images whose URL matches any of these patterns. This is just a # sample list; add more depending on which sites you visit. @BANNED_IMAGE_URL_PATTERNS= ( 'ad\.doubleclick\.net/ad/', '\b[a-z](\d+)?\.doubleclick\.net(:\d*)?/', '\.imgis\.com\b', '\.adforce\.com\b', '\.avenuea\.com\b', '\.go\.com(:\d*)?/ad/', '\.eimg\.com\b', '\bexcite\.netscape\.com(:\d*)?/.*/promo/', '/excitenetscapepromos/', '\.yimg\.com(:\d*)?.*/promo/', '\bus\.yimg\.com/[a-z]/(\w\w)/\1', '\bus\.yimg\.com/[a-z]/\d-/', '\bpromotions\.yahoo\.com(:\d*)?/promotions/', '\bcnn\.com(:\d*)?/ads/', 'ads\.msn\.com\b', '\blinkexchange\.com\b', '\badknowledge\.com\b', '/SmartBanner/', '\bdeja\.com/ads/', '\bimage\.pathfinder\.com/sponsors', 'ads\.tripod\.com', 'ar\.atwola\.com/image/', '\brealcities\.com/ads/', '\bnytimes\.com/ad[sx]/', '\busatoday\.com/sponsors/', '\busatoday\.com/RealMedia/ads/', '\bmsads\.net/ads/', '\bmediaplex\.com/ads/', '\batdmt\.com/[a-z]/', '\bview\.atdmt\.com/', '\bADSAdClient31\.dll\b', ) ; # If set, replace banned images with 1x1 transparent GIF. This also replaces # all images with the same if $TEXT_ONLY is set. # Note that setting this makes the response a little slower, since the browser # must still retrieve the empty GIF. $RETURN_EMPTY_GIF= 0 ; # To use an external program to decide whether or not a user at a given IP # address may use this proxy (as opposed to using server configuration), set # $USER_IP_ADDRESS_TEST to either the name of a command-line program that # performs this test, or a queryable URL that performs this test (e.g. a CGI # script). # For a command-line program: The program should take a single argument, the # IP address of the user. The output of the program is evaluated as a # number, and if the number is non-zero then the IP address of the user is # allowed; thus, the output is typically either "1" or "0". Note that # depending on $ENV{PATH}, you may need to enter the path here explicitly. # For a queryable URL: Specify the start of the URL here (must begin with # "http://"), and the user's IP address will be appended. For example, the # value here may contain a "?", thus putting the IP address in the # QUERY_STRING; it could also be in PATH_INFO. The response body from the # URL should be a number like for a command line program, above. $USER_IP_ADDRESS_TEST= '' ; # To use an external program to decide whether or not a destination server is # allowed (as opposed to using @ALLOWED_SERVERS and @BANNED_SERVERS above), # set $DESTINATION_SERVER_TEST to either the name of a command-line program # that performs this test, or a queryable URL that performs this test (e.g. a # CGI script). # For a command-line program: The program should take a single argument, the # destination server's name or IP address (depending on how the user enters # it). The output of the program is evaluated as a number, and if the number # is non-zero then the destination server is allowed; thus, the output is # typically either "1" or "0". Note that depending on $ENV{PATH}, you may # need to enter the path here explicitly. # For a queryable URL: Specify the start of the URL here (must begin with # "http://"), and the destination server's name or IP address will be # appended. For example, the value here may contain a "?", thus putting the # name or address in the QUERY_STRING; it could also be in PATH_INFO. The # response body from the URL should be a number like for a command line # program, above. $DESTINATION_SERVER_TEST= '' ; # If either $INSERT_HTML or $INSERT_FILE is set, then that HTML text or the # contents of that named file (respectively) will be inserted into any HTML # page retrieved through this proxy. $INSERT_HTML takes precedence over # $INSERT_FILE. # When viewing a page with frames, a new top frame is created and the # insertions go there. # NOTE: Any HTML you insert should not have relative URLs in it! The problem # is that there is no appropriate base URL to resolve them with. So only use # absolute URLs in your insertion. (If you use relative URLs anyway, then # a) if $ANONYMIZE_INSERTION is set, they'll be resolved relative to this # script's URL, which isn't great, or b) if $ANONYMIZE_INSERTION==0, # they'll be unchanged and the browser will simply resolve them relative # to the current page, which is usually worse.) # The frame handling means that it's fairly easy for a surfer to bypass this # insertion, by pretending in effect to be in a frame. There's not much we # can do about that, since a page is retrieved the same way regardless of # whether it's in a frame. This script uses a parameter in the URL to # communicate to itself between calls, but the user can merely change that # URL to make the script think it's retrieving a page for a frame. Also, # many browsers let the user expand a frame's contents into a full window. # [The warning in earlier versions about setting $INSERT_HTML to '' when using # mod_perl and $INSERT_FILE no longer applies. It's all handled elsewhere.] # As with $INSERT_ENTRY_FORM, note that any insertion may throw off any # precise layout, and the insertion is subject to background colors and # other page-wide settings. #$INSERT_HTML= "

This is an inserted header


" ; #$INSERT_FILE= 'insert_file_name' ; # If your insertion has links that you want anonymized along with the rest # of the downloaded HTML, then set this to 1. Otherwise leave it at 0. $ANONYMIZE_INSERTION= 0 ; # If there's both a URL entry form and an insertion via $INSERT_HTML or # $INSERT_FILE on the same page, the entry form normally goes at the top. # Set this to put it after the other insertion. $FORM_AFTER_INSERTION= 0 ; # If the insertion is put in a top frame, then this is how many pixels high # the frame is. If the default of 80 or 50 pixels is too big or too small # for your insertion, change this. You can use percentage of screen height # if you prefer, e.g. "20%". (Unfortunately, you can't just tell the # browser to "make it as high as it needs to be", but at least the frame # will be resizable by the user.) # This affects insertions by $INSERT_ENTRY_FORM, $INSERT_HTML, and $INSERT_FILE. # The default here usually works for the inserted entry form, which varies in # size depending on $ALLOW_USER_CONFIG. It also varies by browser. $INSERTION_FRAME_HEIGHT= $ALLOW_USER_CONFIG ? 80 : 50 ; # NOTE THAT YOU SHOULD BE RUNNING CGIPROXY ON A SECURE SERVER! # Note also that the meaning of '' has changed-- now, all ports except 80 # are assumed to be using SSL. # Set this to 1 if the script is running on an SSL server, i.e. it is # accessed through a URL starting with "https:"; set this to 0 if it's not # running on an SSL server. This is needed to know how to route URLs back # through the proxy. Regrettably, standard CGI does not yet provide a way # for scripts to determine this without help. # If this variable is set to '' or left undefined, then the program will # guess: SSL is assumed if SERVER_PORT is not 80. This fails when using # an insecure server on a port other than 80, or (less commonly) an SSL server # uses port 80, but usually it works. Besides being a good default, it lets # you install the script where both a secure server and a non-secure server # will serve it, and it will work correctly through either server. # This has nothing to do with retrieving pages that are on SSL servers. $RUNNING_ON_SSL_SERVER= '' ; # If your server doesn't support NPH scripts, then set this variable to true # and try running the script as a normal non-NPH script. HOWEVER, this # won't work as well as running it as NPH; there may be bugs, maybe some # privacy holes, and results may not be consistent. It's a hack. # Try to install the script as NPH before you use this option, because # this may not work. NPH is supported on almost all servers, and it's # usually very easy to install a script as NPH (on Apache, for example, # you just need to name the script something starting with "nph-"). # One example of a problem is that Location: headers may get messed up, # because they mean different things in an NPH and a non-NPH script. # You have been warned. # For this to work, your server MUST support the "Status:" CGI response # header. $NOT_RUNNING_AS_NPH= 0 ; # Set HTTP and SSL proxies if needed. Also see $USE_PASSIVE_FTP_MODE below. # The format of the first two variables is "host:port", with the port being # optional. The format of $NO_PROXY is a comma-separated list of hostnames # or domains: any request for a hostname that ends in one of the strings in # $NO_PROXY will not use the HTTP or SSL proxy; e.g. use ".mycompany.com" to # avoid using the proxies to access any host in the mycompany.com domain. # The environment variables in the examples below are appropriate defaults, # if they are available. Note that earlier versions of this script used # the environment variables directly, instead of the $HTTP_PROXY and # $NO_PROXY variables we use now. # Sometimes you can use the same proxy (like Squid) for both SSL and normal # HTTP, in which case $HTTP_PROXY and $SSL_PROXY will be the same. # $NO_PROXY applies to both SSL and normal HTTP proxying, which is usually # appropriate. If there's demand to differentiate those, it wouldn't be # hard to make a separate $SSL_NO_PROXY option. #$HTTP_PROXY= $ENV{'http_proxy'} ; #$SSL_PROXY= 'firewall.example.com:3128' ; #$NO_PROXY= $ENV{'no_proxy'} ; # If your HTTP and SSL proxies require authentication, this script supports # that in a limited way: you can have a single username/password pair per # proxy to authenticate with, regardless of realm. In other words, multiple # realms aren't supported for proxy authentication (though they are for # normal server authentication, elsewhere). # Set $PROXY_AUTH and $SSL_PROXY_AUTH either in the form of "username:password", # or to the actual base64 string that gets sent in the Proxy-Authorization: # header. Often the two variables will be the same, when the same proxy is # used for both SSL and normal HTTP. #$PROXY_AUTH= 'Aladdin:open sesame' ; #$SSL_PROXY_AUTH= $PROXY_AUTH ; # This is one way to handle pages that don't work well, by redirecting to other working # versions of the pages (for example, to a mobile version or another version that # doesn't have much JavaScript). How it works: If the current domain matches one # of the keys of %REDIRECTS, then s/// (string substitution) is done on the URL, # using the match and replacement patterns in the 2-element value array. # The set of sites handled this way is Facebook and Gmail, since they doesn't # always work well, or are slow, through CGIProxy. If you want to access # them normally, then comment out or remove the line(s) below for that site. # If you want to redirect more sites, you can add records to the %REDIRECTS # hash in the following way: Set the hash key to the name of the server you # want to redirect, and the value to a reference to a 2-element array containing # the left and right sides of an s/// string substitution. If that doesn't make # sense, then try to emulate an example below. %REDIRECTS= ( 'www.facebook.com' => [qr#^https?://www\.facebook\.com#i, 'https://m.facebook.com'], 'mail.google.com' => [qr#^https?://mail\.google\.com/.*shva=\w*1.*$#i, 'https://mail.google.com/?ui=html'] ) ; # Here's an experimental feature that may or may not be useful. It's trivial # to add, so I added it. It was inspired in part by Mike Reiter's and Avi # Rubin's "Crowds", at http://www.research.att.com/projects/crowds/ . # Let me know if you find a use for it. # The idea is that you have a number of mutually-trusting, cooperating # proxies that you list in @PROXY_GROUP(). If that is set, then instead # of rerouting all URLs back through this proxy, the script will choose # one of these proxies at random to reroute all URLs through, for each # run. This could be used to balance the load among several proxies, for # example. Under certain conditions it could conceivably help privacy by # making it harder to track a user's session, but under certain other # conditions it could make it easier, depending on how many people, # proxies, and proxy servers are involved. For each page, both its # included images and followed links will go through the same proxy, so a # clever target server could determine which proxy servers are in each # group. # proxy_encode() and proxy_decode() must be the same for all proxies in the # group. Same goes for pack_flags() and unpack_flags() if you modified them, # and probably certain other routines and configuration options. # Cookies and Basic authentication can't be supported with this, sorry, since # cookies can only be sent back to the proxy that created them. # Set this to a list of absolute URLs of proxies, ending with "nph-proxy.cgi" # (or whatever you named the script). Be sure to include the URL of this # proxy, or it will never redirect back through here. Each proxy in the # group should have the same @PROXY_GROUP. # Alternately, you could set each proxy's @PROXY_GROUP differently for more # creative configuration, such as to balance the load unevenly, or to send # users through a "round-robin" cycle of proxies. #@PROXY_GROUP= ('http://www.example.com/~grommit/proxy/nph-proxy.cgi', # 'http://www.fnord.mil/langley/bavaria/atlantis/nph-proxy.cgi', # 'http://www.nothinghere.gov/No/Such/Agency/nph-proxy.cgi', # ) ; # Normally, your browser stores all pages you download in your computer's # hard drive and memory, in the "cache". This saves a lot of time and # bandwidth the next time you view the page (especially with images, which # are bigger and may be shared among several pages). However, in some # situations you may not want the pages you've visited to be stored. If # $MINIMIZE_CACHING is set, then this proxy will try its best to prevent any # caching of anything retrieved through it. # NOTE: This cannot guarantee that no caching will happen. All we can do is # instruct the browser not to cache anything. A faulty or malicious browser # could cache things anyway if it chose to. # NOTE: This has nothing to do with your browser's "history list", which may # also store a list of URLs you've visited. # NOTE: If you use this, you will use a lot more bandwidth than without it, # and pages will seemingly load slower, because if a browser can't cache # anything locally then it has to load everything across the network every # time it needs something. $MINIMIZE_CACHING= 0 ; # Normally, each cookie includes an expiration time/date, and the cookie stays # in effect until then, even after you exit your browser and restart it # (which normally means the cookie is stored on the hard drive). Any cookie # that has no explicit expiration date is a "session cookie", and stays in # effect only as long as the browser is running, and presumably is forgotten # after that. If you set $SESSION_COOKIES_ONLY=1, then *all* cookies that # pass through this proxy will be changed to session cookies. This is useful # at a public terminal, or wherever you don't want your cookies to remain # after you exit the browser. # NOTE: The clock on the server where this runs must be correct for this # option to work right! It doesn't have to be exact, but don't have it off # by hours or anything like that. The problem is that we must not alter any # cookies set to expire in the past, because that's how sites delete cookies. # If a cookie is being deleted, we DON'T want to turn it into a session # cookie. So this script will not alter any cookies set to expire before the # current time according to the system clock. $SESSION_COOKIES_ONLY= 0 ; # Cookies have a URL path associated with them; it determines which URLs on a # server will receive the cookie in requests. If the path is not specified # when the cookie is created, then the path is supposed to default to the # path of the URL that the cookie was retrieved with, according to the # cookie specification from Netscape. Unfortunately, most browsers seem # to ignore the spec and instead give cookies a default path of "/", i.e. # "send this cookie with all requests to this server". So, *sigh*, this # script uses "/" as the default path also. If you want this script to # follow the specification instead, then set this variable to true. $COOKIE_PATH_FOLLOWS_SPEC= 0 ; # Technically, cookies must have a domain containing at least two dots if the # TLD is one of the main non-national TLD's (.com, .net, etc.), and three # dots otherwise. This is to prevent malicious servers from setting cookies # for e.g. the entire ".co.uk" domain. Unfortunately, this prescribed # behavior does not accommodate domains like ".google.de". Thus, browsers # seem to not require three dots, and thus, this script will do the same by # default. Set $RESPECT_THREE_DOT_RULE if you want the strictly correct # behavior instead. $RESPECT_THREE_DOT_RULE= 0 ; # Set $USER_AGENT to something generic like this if you want to be extra # careful. Conceivably, revealing which browser you're using may be a # slight privacy or security risk. # However, note that some URLs serve different pages depending on which # browser you're using, so some pages will change if you set this. # This defaults to the user's HTTP_USER_AGENT. #$USER_AGENT= 'Mozilla/4.05 [en] (X11; I; Linux 2.0.34 i586)' ; # FTP transfers can happen in either passive or non-passive mode. Passive # mode works better if the client (this script) is behind a firewall. Some # people consider passive mode to be more secure, too. But in certain # network configurations, if this script has trouble connecting to FTP # servers, you can turn this off to try non-passive mode. # See http://cr.yp.to/ftp/security.html for a discussion of security issues # regarding passive and non-passive FTP. $USE_PASSIVE_FTP_MODE= 1 ; # Unlike a normal browser which can keep an FTP session open between requests, # this script must make a new connection with each request. Thus, the # FTP welcome message (e.g. the README file) will be received every time; # there's no way for this script to know if you've been here before. Set # $SHOW_FTP_WELCOME to true to always show the welcome message, or false # to never show it. $SHOW_FTP_WELCOME= 1 ; # If set, then modify script content (like JavaScript) as well as possible # such that network accesses go through this proxy script. If not set, then # allow script content to pass unmodified, assuming it's not being removed. # Currently, JavaScript is the only script content that's proxified. # If this is set, and if you modify proxy_encode() and proxy_decode(), then # you MUST modify the JavaScript routines in $ENCODE_DECODE_BLOCK_IN_JS also. # NOTE: This proxification of script content may not be perfect. It's pretty # good, but it may be possible to construct malicious JavaScript that reveals # your identity to the server. The purpose of this feature is more to allow # scripts to function through the proxy, than to provide bulletproof # anonymity. # The best advice remains: FOR BEST ANONYMITY, BROWSE WITH SCRIPTS TURNED OFF. $PROXIFY_SCRIPTS= 1 ; # If set, then modify ShockWave Flash resources as well as possible such that # network accesses go through this proxy script. If not set, then allow # SWF resources to pass unmodified. # NOTE: This is still experimental, and the modified SWF apps are sometimes # much slower than the unproxified SWF apps. If this is turned on, then # Web pages with SWF may run much more slowly and possibly bog down # your browser, even if the rest of the page is fast. Remember that SWF # apps are pretty common in ads and other places in the page that we tend # to ignore. $PROXIFY_SWF= 1 ; # To support video in Flash 9+, this program spawns a specialized RTMP proxy # daemon that listens on a port (1935 if possible) and dies after 10 minutes # of no connections. This is useful, but some sysadmins may not like it. # If you want to prevent the daemon, set $ALLOW_RTMP_PROXY=0 . Note that # Flash 9+ video won't always work if you do so. # As of release 2.1, the RTMP proxy isn't used yet, so turn it off. $ALLOW_RTMP_PROXY= 0 ; # Though JavaScript is by far the most common kind of script, there are other # kinds too, such as Microsoft's VBScript. This program proxifies JavaScript # content, but not other script content, which means those other scripts # could open privacy holes. Thus, the default behavior of this program is # to remove those other scripts. Set this variable to true if you'd rather # let those scripts through. # How this works with $REMOVE_SCRIPTS and the "remove scripts" user checkbox: # If $ALLOW_UNPROXIFIED_SCRIPTS is false, then unsupported scripts will # always be removed. If it is true, then it is subject to those other # settings, just like supported script types are. # For now, this also controls whether unproxified SWF (Flash) apps are allowed # through the proxy. This means that by default, SWF apps are removed # from pages. This is the safest, but may leave some pages looking # incomplete. If you want to display SWF apps, then you need to set either # $PROXIFY_SWF or $ALLOW_UNPROXIFIED_SCRIPTS . This arrangement can change # if there is demand. $ALLOW_UNPROXIFIED_SCRIPTS= 0 ; # Comments may contain HTML in them, which shouldn't be rendered but may be # relevant in some other way. Set this flag if you want the contents of # comments to be proxified like the rest of the page, i.e. proxify URLs, # stylesheets, scripts, etc. $PROXIFY_COMMENTS= 0 ; # Apparently, some censoring filters search outgoing request URIs, but not # POST request bodies. Set this to make the initial input form submit # using POST instead of GET. $USE_POST_ON_START= 1 ; # If this is set, then the URL the user enters in the start form or the top # form will be encoded by _proxy_jslib_proxy_encode() before it's submitted. # This can keep the URL the user visits private. # Note that if you set this, you need to modify proxy_encode() above (along # with proxy_decode() and the two analogous JavaScript routines) if you # want the URL to actually be encoded to something non-obvious. $ENCODE_URL_INPUT= 1 ; # Apparently, some censoring filters look at titles on HTML pages. Set this # to remove HTML page titles. # Note that this does NOT remove titles that are generated by script content, # since those would have no effect on a filter. $REMOVE_TITLES= 0 ; # If set, this option prevents a user from calling the proxy through the # proxy itself, i.e. looping. It's normally a mistake on the user's part, # and a waste of resources. # This isn't foolproof; it just catches the obvious mistakes. It's probably # pretty easy for a malicious user to make the script call itself, or s/he # can always use two proxies to call each other in a loop. This doesn't # account for IP addresses or multiple hostnames for the same server. $NO_BROWSE_THROUGH_SELF= 0 ; # Set this to leave out the "Restart" link at the bottom of error pages, etc. # In some situations this could make it harder for search engines to find the # start page. $NO_LINK_TO_START= 0 ; # For the obscure case when a POST must be repeated because of user # authentication, this is the max size of the request body that this # script will store locally. If CONTENT_LENGTH is bigger than this, # the body's not saved at all-- the first POST will be correct, but # the second will not happen at all (since a partial POST is worse than # nothing). $MAX_REQUEST_SIZE= 4194304 ; # that's 4 Meg to you and me # When handling HTML resources, CGIProxy downloads the entire resource before # modifying it and returning it to the client. However, some operations # (such as time-intensive queries) return the first part of a page while # still generating the last part. On such pages, the user might like to # see that first part without waiting for the entire response, which they # would normally have to do when using CGIProxy. So, if this option is set, # then CGIProxy will return proxified HTML parts as soon as it receives them # from the server. This is less efficient; for example, it means that every # page will have the JavaScript library inserted, even if it's not needed # (though that wouldn't be too bad since the library is normally cached # anyway). So, we want to do this only for certain pages and not for all. # Thus, set this to a list of patterns that match URLs you want to handle # this way. The patterns work like @ALLOWED_SERVERS and @BANNED_SERVERS # above, in that they're lists of Perl 5 regular expressions. See the # comments there for details. # The sample webfeat.org pattern is appropriate for libraries who use the # WebFeat service. #@TRANSMIT_HTML_IN_PARTS_URLS= ( # '^https?://search3\.webfeat\.org/cgi-bin/WebFeat\.dll', # ) ; # Normally, if a user tries to access a banned server or use an unsupported # scheme (protocol), this script will alert the user with a warning page, and # either allow the user to click through to the URL unprotected (i.e. without # using the proxy), or ban access altogether. However, in some VPN-like # installations, it may more desirable to let users follow links from # protected pages (e.g. within an intranet) that lead to unprotected, # unproxified pages (e.g. pages outside of the intranet), with no breaks in # the browsing experience. (This example assumes the proxy owner intends it # to be used for browsing only the intranet and not the Internet at large.) # Set $QUIETLY_EXIT_PROXY_SESSION to skip any warning message and let the # user surf directly to unproxified pages from proxified pages. Note that # this somewhat changes the meaning of @ALLOWED_SERVERS and @BANNED_SERVERS-- # they're not allowed or banned per se, it's just whether this proxy is # willing to handle their traffic. @BANNED_NETWORKS is unaffected, however, # since the IP ranges it contains often make no sense outside of the LAN. # WARNING: DO *NOT* SET THIS FLAG IF ANONYMITY IS IMPORTANT AT ALL!!! IT IS # NOT MEANT FOR THAT KIND OF INSTALLATION. IF THIS IS SET, THEN USERS WILL # SURF INTO UNPROXIFIED, UNANONYMIZED PAGES WITH NO WARNING, AND THEIR # PRIVACY WILL BE COMPROMISED; THEY MAY NOT EVEN NOTICE FOR A LONG TIME. # THIS IS EXACTLY WHAT ANONYMIZING PROXIES ARE CREATED TO AVOID. $QUIETLY_EXIT_PROXY_SESSION= 0 ; # WARNING: # EXCEPT UNDER RARE CIRCUMSTANCES, ANY PROXY WHICH HANDLES SSL REQUESTS # SHOULD *ONLY* RUN ON AN SSL SERVER!!! OTHERWISE, YOU'RE RETRIEVING # PROTECTED PAGES BUT SENDING THEM BACK TO THE USER UNPROTECTED. THIS # COULD EXPOSE ANY INFORMATION IN THOSE PAGES, OR ANY INFORMATION THE # USER SUBMITS TO A SECURE SERVER. THIS COULD HAVE SERIOUS CONSEQUENCES, # EVEN LEGAL CONSEQUENCES. IT UNDERMINES THE WHOLE PURPOSE OF SECURE # SERVERS. # THE *ONLY* EXCEPTION IS WHEN YOU HAVE *COMPLETE* TRUST OF THE LINK # BETWEEN THE BROWSER AND THE SERVER THAT RUNS THE SSL-HANDLING PROXY, # SUCH AS ON A CLOSED LAN, OR IF THE PROXY RUNS ON THE SAME MACHINE AS # THE BROWSER. # IF YOU ARE ABSOLUTELY SURE THAT YOU TRUST THE USER-TO-PROXY LINK, YOU # CAN OVERRIDE THE AUTOMATIC SECURITY MEASURE BY SETTING THE FLAG BELOW. # CONSIDER THE CONSEQUENCES VERY CAREFULLY BEFORE YOU RUN THIS SSL-ACCESSING # PROXY ON AN INSECURE SERVER!!! $OVERRIDE_SECURITY= 0 ; # Stuff below here you probably shouldn't modify unless you're messing with # the code. # This lists all MIME types that could identify a script, and which will be # filtered out as well as possible if removing scripts: HTTP responses with # Content-Type: set to one of these will be nixed, certain HTML which links # to one of these types will be removed, style sheets with a type here will # be removed, and other odds and ends. # These are used in matching, so can't contain special regex characters. # This list is also used for the the $PROXIFY_SCRIPTS function. # This list contains all script MIME types I know of, but I can't guarantee # it's a complete list. It's largely taken from the examples at # http://www.robinlionheart.com/stds/html4/scripts.html # That page describes only the first four below as valid. # The page at ftp://ftp.isi.edu/in-notes/iana/assignments/media-types/media-types # lists all media (MIME) types registered with the IANA, but unfortunately # many script types (especially proprietary ones) have not registered with # them, and that list doesn't specify which types are script content anyway. @SCRIPT_MIME_TYPES= ('application/x-javascript', 'application/x-ecmascript', 'application/x-vbscript', 'application/x-perlscript', 'application/javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'text/jscript', 'text/livescript', 'text/vbscript', 'text/vbs', 'text/perlscript', 'text/tcl', 'text/x-scriptlet', 'text/scriptlet', 'application/hta', 'application/x-shockwave-flash', ) ; # All MIME types in @SCRIPT_MIME_TYPES and @OTHER_TYPES_TO_REGISTER will be # "registered". Registration helps the script remember which MIME type is # expected by a page when downloading embedded URLs, e.g. style sheets. Any # MIME types that need special treatment should be listed here if they're not # already in @SCRIPT_MIME_TYPES. # If you write a handler for a new MIME type in proxify_block(), and that type # isn't already listed in @SCRIPT_MIME_TYPES, then add it here. # The Perl code in this program supports up to 64 registered MIME types, but # the JS _proxy_jslib_pack_flags() and _proxy_jslib_unpack_flags() routines # only support 26. Thus, fix the JS code if there's ever more than 26 types. # "x-proxy/xhr" is a special case-- it's used to support the JavaScript class # XMLHttpRequest . Data downloaded through that should not be proxified, # even if it's HTML data; it's proxified later when it's added to a document. # Using the "x-proxy/xhr" type is part of avoiding that first proxification. @OTHER_TYPES_TO_REGISTER= ('text/css', 'x-proxy/xhr') ; # These are MIME types that we *may* try to rewrite in proxify_block(), e.g. # to send all URLs back through this script. If a type isn't on this list, # then we know for certain it should be sent back to the user unchanged, # which saves time. # If you write a handler for a new MIME type in proxify_block(), then add the # type here. # NOT all the types here are actually supported at this time! # text/html is not on this list because currently it's handled specially. @TYPES_TO_HANDLE= ('text/css', 'application/x-javascript', 'application/x-ecmascript', 'application/javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'text/livescript', 'text/jscript', 'application/x-shockwave-flash', ) ; # This is a list of all file extensions that will be disallowed if # $TEXT_ONLY is set. It's an inexact science. If you want to ban # other file extensions, you can add more to this list. Note that # removing extensions from this list won't necessarily allow those # files through, since there are other ways $TEXT_ONLY is implemented, # such as only allowing MIME types of text/* . # The format of this list is one long string, with the extensions # separated by "|". This is because the string is actually used as # a regular expression. Don't worry if you don't know what that means. # Extensions are roughly taken from Netscape's "Helper Preferences" screen # (but that was in 1996). A more complete list might be made from a # mime.types file. $NON_TEXT_EXTENSIONS= 'gif|jpeg|jpe|jpg|tiff|tif|png|bmp|xbm' # images . '|mp2|mp3|wav|aif|aiff|au|snd' # audios . '|avi|qt|mov|mpeg|mpg|mpe' # videos . '|gz|Z|exe|gtar|tar|zip|sit|hqx|pdf' # applications . '|ram|rm|ra|swf' ; # others $PROXY_VERSION= '2.1.6' ; #-------------------------------------------------------------------------- # End of normal user configuration. # Now, set or adjust all globals that remain constant for all runs. #-------------------------------------------------------------------------- # First, set various constants. # Convert $RUN_AS_USER to a numeric UID if needed. no warnings 'numeric' ; $RUN_AS_USER= getpwnam($RUN_AS_USER) if $RUN_METHOD eq 'embedded' and $RUN_AS_USER==0 and $^O!~ /win/i ; use warnings 'numeric' ; # Use local::lib if so configured. Don't use it when installing modules # or when purging the database. if ($LOCAL_LIB_DIR and $ARGV[0] ne 'install-modules' and $ARGV[0] ne 'purge-db') { push(@INC, File::Spec->catdir($LOCAL_LIB_DIR, qw(lib perl5))) ; eval { require local::lib ; local::lib->import($LOCAL_LIB_DIR) } ; # ignore errors } # Allow installer to set $DB_DRIVER="MySQL" in config. $DB_DRIVER= 'mysql' if lc($DB_DRIVER) eq 'mysql' ; # These are used in rfc1123_date() and date_is_after(). @MONTH= qw(Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec) ; @WEEKDAY= qw(Sun Mon Tue Wed Thu Fri Sat Sun) ; %UN_MONTH= map { lc($MONTH[$_]), $_ } 0..$#MONTH ; # look up by month name # Create the sets of regular expressions we'll need if we proxify scripts. # So far, the only script type we proxify is JavaScript. &set_RE_JS if $PROXIFY_SCRIPTS ; # Next, make copies of any constant environment variables, and fix as needed. # SERVER_PORT and SCRIPT_NAME will be constant, and are used in several places. # Besides, we need SCRIPT_NAME fixed before setting $THIS_SCRIPT_URL. # SCRIPT_NAME should have a leading slash, but the old CGI "standard" from # NCSA was unclear on that, so some servers didn't give it a leading # slash. Here we ensure it has a leading slash. # Exception: If SCRIPT_NAME is empty, then we're using a daemon, so leave it empty. # Apache has a bug where SCRIPT_NAME is wrong if the PATH_INFO has "//" in it; # it's set to the script name plus all of PATH_INFO up until its final "//". # To work around this, truncate SCRIPT_NAME at the first place it matches $0. # PATH_INFO is also changed to collapse all multiple slashes into a single # slash, which is not worked around here. This bug should be fixed in # Apache 2.0.55 and later. # Some servers provide $0 as a complete path rather than just the filename, # so extract the filename. $ENV{SCRIPT_NAME}=~ s#^/?#/# if $ENV{SCRIPT_NAME} ne '' ; if ($ENV{SERVER_SOFTWARE}=~ /^Apache\b/i) { my($zero)= $0=~ m#([^/]*)$# ; ($ENV{SCRIPT_NAME})= $ENV{SCRIPT_NAME}=~ /^(.*?\Q$zero\E)/ if $zero ne '' ; } $ENV_SERVER_PORT= $ENV{SERVER_PORT} ; $ENV_SCRIPT_NAME= $ENV{SCRIPT_NAME} ; # The nginx server sets SCRIPT_NAME to the entire request-URI, so fix it. # Must do this only on $ENV_SCRIPT_NAME and not $ENV{SCRIPT_NAME}, because # later we'll need the latter to get PATH_INFO. :P if ($ENV{SERVER_SOFTWARE}=~ /^nginx\b/i) { if ($RUN_METHOD eq 'fastcgi') { $ENV_SCRIPT_NAME= '/' . $SECRET_PATH ; } else { my($zero)= $0=~ m#([^/]*)$# ; ($ENV_SCRIPT_NAME)= $ENV_SCRIPT_NAME=~ /^(.*?\Q$zero\E)/ if $zero ne '' ; } } # If we're running as the embedded server, use $SECRET_PATH . $ENV_SCRIPT_NAME= '/' . $SECRET_PATH if $RUN_METHOD eq 'embedded' ; # Next, adjust config variables as needed, or create any needed constants from # them. # Create @BANNED_NETWORK_ADDRS from @BANNED_NETWORKS. # No error checking; assumes the proxy owner set @BANNED_NETWORKS correctly. @BANNED_NETWORK_ADDRS= () ; for (@BANNED_NETWORKS) { push(@BANNED_NETWORK_ADDRS, pack('C*', /(\d+)/g)) ; } # For the external tests, create hashes of parsed URLs if the tests are CGI calls. # Note that the socket names must each be unique! @{$USER_IP_ADDRESS_TEST_H}{qw(host port path socket open)}= (lc($1), ($2 eq '' ? 80 : $2), $3, 'S_USERTEST', 0) if ($USER_IP_ADDRESS_TEST=~ m#http://([^/?:]*):?(\d*)(.*)#i) ; @{$DESTINATION_SERVER_TEST_H}{qw(host port path socket open)}= (lc($1), ($2 eq '' ? 80 : $2), $3, 'S_DESTTEST', 0) if ($DESTINATION_SERVER_TEST=~ m#http://([^/?:]*):?(\d*)(.*)#i) ; # Require a full path in $PROXY_DIR. # Currently only used when using embedded server, but that may change. # Use different patterns for Windows vs. everything else. die "Must use full directory path in \$PROXY_DIR setting; currently set to \"$PROXY_DIR\".\n" if $RUN_METHOD eq 'embedded' and $PROXY_DIR!~ ($^O=~ /win/i ? qr#^([a-zA-Z]:)?[/\\]# : qr#^/#) ; # If $RUNNING_ON_SSL_SERVER is '', then guess based on SERVER_PORT. $RUNNING_ON_SSL_SERVER= ($ENV_SERVER_PORT!=80) if $RUNNING_ON_SSL_SERVER eq '' ; # Or, if we're a daemon, then it's always true. $RUNNING_ON_SSL_SERVER= 1 if $RUN_METHOD eq 'embedded' ; # $DB_DRIVER is required for $USE_DB_FOR_COOKIES to be true. $USE_DB_FOR_COOKIES= 0 unless $DB_DRIVER ne '' ; # Set this constant based on whether the server is IIS, because we have to # test it later for every run to work around a bug in IIS. A constant here # saves time when using mod_perl. $RUNNING_ON_IIS= ($ENV{'SERVER_SOFTWARE'}=~ /IIS/) ; # FastCGI doesn't support NPH scripts. :P $NOT_RUNNING_AS_NPH= 1 if $RUN_METHOD eq 'fastcgi' ; # Create @NO_PROXY from $NO_PROXY for efficiency. @NO_PROXY= split(/\s*,\s*/, $NO_PROXY) ; # Base64-encode $PROXY_AUTH and $SSL_PROXY_AUTH if they're not encoded already. $PROXY_AUTH= &base64($PROXY_AUTH) if $PROXY_AUTH=~ /:/ ; $SSL_PROXY_AUTH= &base64($SSL_PROXY_AUTH) if $SSL_PROXY_AUTH=~ /:/ ; # Guarantee URLs in @PROXY_GROUP have no trailing slash. foreach (@PROXY_GROUP) { s#/$## } # Create $NO_CACHE_HEADERS depending on $MINIMIZE_CACHING setting; it is placed # in every response. Note that in all the "here documents" we use for error # messages, it has to go on the same line as another header to avoid a blank # line in the response. $NO_CACHE_HEADERS= $MINIMIZE_CACHING ? "Cache-Control: no-cache\015\012Pragma: no-cache\015\012" : '' ; # Canonicalize all MIME types to lowercase. for (@SCRIPT_MIME_TYPES) { $_= lc } for (@OTHER_TYPES_TO_REGISTER) { $_= lc } # Create @ALL_TYPES and %MIME_TYPE_ID, which are inverses of each other. # This is useful e.g. to identify the MIME type expected in a given download, # in a one-character flag. That's why we limit this to 64 types for now. # $ALL_TYPES[0] is '', so we can test e.g. "if $MIME_TYPE_ID{$id} ..." . @ALL_TYPES= ('', @SCRIPT_MIME_TYPES, @OTHER_TYPES_TO_REGISTER) ; &HTMLdie("Too many MIME types to register.") if @ALL_TYPES > 64 ; @MIME_TYPE_ID{@ALL_TYPES}= 0..$#ALL_TYPES ; # Regex that matches a script MIME type. $SCRIPT_TYPE_REGEX= '(' . join("|", @SCRIPT_MIME_TYPES) . ')' ; # Regex that tells us whether we handle a given MIME type. $TYPES_TO_HANDLE_REGEX= '(' . join("|", @TYPES_TO_HANDLE) . ')' ; # Only need to run this routine once $HAS_INITED= 1 ; # End of initialization of constants. } # sub init { #-------------------------------------------------------------------------- # Global constants are now set. Now do any initialization that is # required for every run. #-------------------------------------------------------------------------- # What used to be the "main" code has now been divided up between init() and # one_run() . sub one_run { # OK, let's time this thing #my $starttime= time ; #my($sutime,$sstime)= (times)[0,1] ; # This is needed to run an NPH script under mod_perl. # Other stuff needed for mod_perl: # must use at least Perl 5.004, or STDIN and STDOUT won't behave correctly; # cannot use exit(); # must initialize or reset all vars; # regex's with /o option retain state between calls, so be careful; # typeglobbing of *STDIN doesn't work, so must pass filehandles as strings. local($|)= 1 ; # In mod_perl, global variables are retained between calls, so they must # be initialized correctly. In this program, (most) UPPER_CASE variables # are persistent constants, i.e. they aren't changed after they're # initialized above (in the $HAS_BEGUN block). We also assume that no # lower_case variables are set before here. It's a little hacky and possibly # error-prone if user customizations don't follow these conventions, but it's # fast and simple. # So, if you're using mod_perl and you make changes to this script, don't # modify existing UPPER_CASE variables after the $HAS_BEGUN block above, # don't set lower_case variables before here, and don't use UPPER_CASE # variables for anything that will vary from run to run. reset 'a-z' ; $URL= '' ; # (almost) only uppercase variable that varies from run to run # Store $now rather than calling time() multiple times. $now= time ; # for (@goodmen) # Set $THIS_HOST to the best guess how this script was called-- use the # Host: request header if available; otherwise, use SERVER_NAME. # We don't bother with a $THIS_PORT, since it's more reliably set to the port # through which the script was called. SERVER_NAME is much more likely to # be different from the hostname that the user sees, since one server may # handle many domains or have many hostnames. # This has to be calculated every run, since there may be multiple hostnames. if ($ENV{'HTTP_HOST'} ne '') { ($THIS_HOST)= $ENV{'HTTP_HOST'}=~ m#^(?:[\w+.-]+://)?([^:/?]*)# ; $THIS_HOST= $ENV{'SERVER_NAME'} if $THIS_HOST eq '' ; } else { $THIS_HOST= $ENV{'SERVER_NAME'} ; } # Build the constant $THIS_SCRIPT_URL from environment variables. Only include # SERVER_PORT if it's not 80 (or 443 for SSL). $THIS_SCRIPT_URL= $RUNNING_ON_SSL_SERVER ? 'https://' . $THIS_HOST . ($ENV_SERVER_PORT==443 ? '' : ':' . $ENV_SERVER_PORT) . $ENV_SCRIPT_NAME : 'http://' . $THIS_HOST . ($ENV_SERVER_PORT==80 ? '' : ':' . $ENV_SERVER_PORT) . $ENV_SCRIPT_NAME ; # This script uses whatever version of HTTP the client is using. So far # only 1.0 and 1.1 are supported. ($HTTP_VERSION)= $ENV{'SERVER_PROTOCOL'}=~ m#^HTTP/(\d+\.\d+)#i ; $HTTP_VERSION= '1.0' unless $HTTP_VERSION=~ /^1\.[01]$/ ; # Hack to support non-NPH installation-- luckily, the format of a # non-NPH response is almost exactly the same as an NPH response. # The main difference is the first word in the status line-- something # like "HTTP/1.x 200 OK" can be simulated with "Status: 200 OK", as # long as the server supports the Status: CGI response header. So, # we set that first word to either "HTTP/1.x" or "Status:", and use # it for all responses throughout the script. # NOTE: This is not the only difference between an NPH and a non-NPH # response. For example, the Location: header has different semantics # between the two types of responses. This hack is only an approximation # that we hope works most of the time. It's better to install the script # as an NPH script if possible (which it almost always is). # Technically, the HTTP version in the response is supposed to be the highest # version supported by the server, even though the rest of the response may # be in the format of an earlier version. Unfortunately, CGI scripts do # not have access to that value; it's a hole in the CGI standard. $HTTP_1_X= $NOT_RUNNING_AS_NPH ? 'Status:' : "HTTP/$HTTP_VERSION" ; # Fix submitted by Alex Freed: Under some unidentified conditions, # instances of nph-proxy.cgi can hang around for many hours and drag the # system. So until we figure out why that is, here's a 10-minute timeout. # Please write me with any insight into this, since I can't reproduce the # problem. Under what conditions, on what systems, does it happen? # 9-9-1999: One theory is that it's a bug in older Apaches, and is fixed by # upgrading to Apache 1.3.6 or better. Julian Haight reports seeing the # same problem with other scripts on Apache 1.3.3, and it cleared up when # he upgraded to Apache 1.3.6. Let me know if you can confirm this. # alarm() is missing on some systems (such as Windows), so use eval{} to # avoid failing when alarm() isn't available. # As of version 2.1: We now only do this if we're running on Apache that is # earlier than version 1.3.6, to allow large downloads for everyone else. if ($ENV{'SERVER_SOFTWARE'}=~ m#^Apache/(\d+)\.(\d+)(?:\.(\d+))?#i) { if (($1<=>1 or $2<=>3 or $3<=>6) < 0) { $SIG{'ALRM'} = \&timeexit ; eval { alarm(600) } ; # use where it works, ignore where it doesn't } } # Exit upon timeout. If you wish, add code to clean up and log an error. sub timeexit { goto EXIT } # Fix any environment variables that the server may have set wrong. # Note that some constant environment variables are copied to variables above, # and fixed there. # The IIS server doesn't set PATH_INFO correctly-- it sets it to the entire # request URI, rather than just the part after the script name. So fix it # here if we're running on IIS. Thanks to Dave Moscovitz for the info! $ENV{'PATH_INFO'} =~ s/^$ENV_SCRIPT_NAME// if $RUNNING_ON_IIS ; # The nginx server also doesn't set PATH_INFO, or even SCRIPT_NAME, correctly-- # it sets SCRIPT_NAME to the entire request URI, and PATH_INFO to nothing. So fix it. # $ENV_SCRIPT_NAME has earlier been set correctly. ($ENV{PATH_INFO}= $ENV{SCRIPT_NAME})=~ s/^\Q$ENV_SCRIPT_NAME\E// if $ENV{SERVER_SOFTWARE}=~ /^nginx\b/i ; # PATH_INFO may or may not be URL-encoded when we get it; it seems to vary # by server. This script assumes it's still encoded. Thus, if it's not, # we need to re-encode it. # The only time this seems to come up is when spaces are in URLs, correctly # represented in the URL as %20 but decoded to " " in PATH_INFO. Thus, # this hack only focuses on space characters. It's a hack that I'm not at # all comfortable with. :P # Very yucky business, this encoding thing. if ($ENV{'PATH_INFO'}=~ / /) { $ENV{'PATH_INFO'} =~ s/%/%25/g ; $ENV{'PATH_INFO'} =~ s/ /%20/g ; } # Protect with $SECRET_PATH when appropriate. if ($RUN_METHOD eq 'embedded' and !($ENV{'PATH_INFO'}=~ s#^/\Q$SECRET_PATH\E($|/)#$1#)) { select((select($STDOUT), $|=1)[0]) ; # unbuffer the socket print $STDOUT "HTTP/1.1 404 Not Found\015\012\015\012" ; die "exiting" ; } # Copy often-used environment vars into scalars, for efficiency $env_accept= $ENV{'HTTP_ACCEPT'} || '*/*' ; # may be modified later # PATH_INFO consists of a path segment of flags, followed by the encoded # target URL. For example, PATH_INFO might be something like # "/010100A/http/www.example.com". The actual format of the flag segment # is defined in the routine pack_flags(). # Thanks to Mike Harding for the idea of using another flag for the # $is_in_frame parameter, instead of using two parallel scripts. # Extract flags and encoded URL from PATH_INFO. ($packed_flags, $encoded_URL)= $ENV{'PATH_INFO'}=~ m#^/([^/]*)/?(.*)# ; # Set all $e_xxx variables ("effective-xxx") and anything else from flag # segment of PATH_INFO. If user config is not allowed or if flag segment # is not present, then set $e_xxx variables from hard-coded config variables # instead (but still set anything else as needed from PATH_INFO). if ( $ALLOW_USER_CONFIG && ($packed_flags ne '') ) { ($e_remove_cookies, $e_remove_scripts, $e_filter_ads, $e_hide_referer, $e_insert_entry_form, $is_in_frame, $expected_type)= &unpack_flags($packed_flags) ; } else { # $is_in_frame is set in any case. It indicates whether the current # request will be placed in a frame. ($e_remove_cookies, $e_remove_scripts, $e_filter_ads, $e_hide_referer, $e_insert_entry_form, $is_in_frame, $expected_type)= ($REMOVE_COOKIES, $REMOVE_SCRIPTS, $FILTER_ADS, $HIDE_REFERER, $INSERT_ENTRY_FORM, (&unpack_flags($packed_flags))[5..6] ) ; } # Set any other $e_xxx variables not from flag segment [none currently]. # Flags are now set, and $encoded_URL now contains only the encoded target URL. # Create a one-flag test for whether we're inserting anything into THIS page. # This must happen after user flags are read, just above. $doing_insert_here= !$is_in_frame && ( $e_insert_entry_form || ($INSERT_FILE ne '') || ($INSERT_HTML ne '') ) ; # One user reported problems with binary files on certain other OS's, and # this seemed to fix it. Supposedly, either this or the "binmode S" # statements below the newsocketto() calls work, or all; I'm putting all in. # Tell me anything new you figure out about this. binmode $STDOUT ; #-------------------------------------------------------------------------- # parse URL, make checks, and set various globals #-------------------------------------------------------------------------- # Calculate $url_start for use later in &full_url() and elsewhere. It's an # integral part of &full_url(), placed here for speed, similar to the # variables set in &fix_base_vars. # $url_start is the first part of every proxified URL. A complete proxified # URL is made by appending &wrap_proxy_encode(URL) (and possibly a #fragment) to # $url_start. $url_start normally consists of the current script's URL # (or one from @PROXY_GROUP), plus a flag segment in PATH_INFO, complete # with trailing slash. For example, a complete $url_start might be # "http://www.example.com/path/nph-proxy.cgi/010110A/" . # $url_start_inframe and $url_start_noframe are used to force the frame flag # on or off, for example when proxifying a link that causes frames to be # entered or exited. Otherwise, most links inherit the current frame state. # $script_url is used later for Referer: support, and whenever a temporary # copy of $url_start has to be generated. # In earlier versions of CGIProxy, $url_start was called $this_url, which is # really what it was originally. Its semantics had drifted somewhat since # then, so they have been cleaned up, and $url_start is now more descriptive. # Set $url_start to a random element of @PROXY_GROUP, if that is set. if (@PROXY_GROUP) { # srand is automatically called in Perl 5.004 and later. It might be # desirable to seed based on the URL, so that multiple requests for # the same URL go through the same proxy, and may thus be cached. #srand( unpack('%32L*', $ENV{'PATH_INFO'}) ) ; # seed with URL+flags $script_url= $PROXY_GROUP[ rand(scalar @PROXY_GROUP) ] ; } else { $script_url= $THIS_SCRIPT_URL ; } # Create $url_start and any needed variants: "$script_url/flags/" $url_start_inframe= $script_url . '/' . &pack_flags($e_remove_cookies, $e_remove_scripts, $e_filter_ads, $e_hide_referer, $e_insert_entry_form, 1, '') . '/' ; $url_start_noframe= $script_url . '/' . &pack_flags($e_remove_cookies, $e_remove_scripts, $e_filter_ads, $e_hide_referer, $e_insert_entry_form, 0, '') . '/' ; $url_start= $is_in_frame ? $url_start_inframe : $url_start_noframe ; # If there's no $encoded_URL, then start a browsing session. &show_start_form() if $encoded_URL eq '' ; # Decode the URL. $URL= &wrap_proxy_decode($encoded_URL) ; # Set the query string correctly, from $ENV{QUERY_STRING} and what's already # in $URL. # The query string may exist either within the encoded URL or in the containing # URL, as $ENV{QUERY_STRING}. If the former, then the query string was # (definitely?) in a referenced URL, while the latter most likely implies a # GET form input. # With Flash apps adding e.g. "?range=100-1000" to proxified URLs, both # query strings may be valid, so append $ENV{'QUERY_STRING'} to the end # of the URL appropriately. # Note that Netscape does not pass any query string data that is part of the # URL in the
attribute, which is probably correct behaviour. # For this program to act exactly the same, it would need to strip the # query string when updating all URLs, way below. $URL.= ($URL=~ /\?/ ? '&' : '?') . $ENV{'QUERY_STRING'} if $ENV{'QUERY_STRING'} ne '' ; # Parse the URL, using a regex modelled from the one in RFC 2396 (URI syntax), # appendix B. # This assumes a hierarchical scheme; it won't work for e.g. mailto: # "authority" is the combination of host, port, and possibly other info. # Note that $path here will also contain any query component; it's more like # the request URI. # Note that $URL is guaranteed to be an absolute URL with no "#" fragment, # though this does little error-checking. Note also that the old ";" # parameters are now included in the path component. ($scheme, $authority, $path)= ($URL=~ m#^([\w+.-]+)://([^/?]*)(.*)$#i) ; $scheme= lc($scheme) ; $path= "/$path" if $path!~ m#^/# ; # if path is '' or contains only query # If so configured, handle session cookies. # This all has to be done before calling xproxy() below, because some is # used for cookie management. if ($USE_DB_FOR_COOKIES) { # Attempt to get session cookies from HTTP_COOKIE . get_session_cookies() ; # Now that we're using a database, we need session IDs. 20 random alphanumeric # characters means one collision in roughly 10^18 simultaneous uses. # One session ID is itself a session-length cookie, and is used to store # session cookies and anything else we need to expire when the session ends; # the other cookie is persistent, and is used to store all persistent cookies. $session_id_persistent= random_string(20) unless $session_id_persistent=~ /^[\dA-Za-z]{20}$/ ; my $secure_clause= $RUNNING_ON_SSL_SERVER ? ' secure;' : '' ; # The persistent session ID lasts one hour after last use (should time be configurable?), # so a Set-Cookie: header will be sent with every response. $session_cookies= "Set-Cookie: S2=$session_id_persistent; expires=" . &rfc1123_date($now+3600, 1) . "; domain=$THIS_HOST; path=$ENV_SCRIPT_NAME/;$secure_clause HttpOnly\015\012" ; # Create and return non-persistent session cookie, if needed. if (!($session_id=~ /^[\dA-Za-z]{20}$/)) { $session_id= random_string(20) ; $session_cookies.= "Set-Cookie: S=$session_id; " . "domain=$THIS_HOST; path=$ENV_SCRIPT_NAME/;$secure_clause HttpOnly\015\012" ; } # Set $DBH, creating database if needed. connect_to_db() ; # Insert or update session records. update_session_record($session_id) ; update_session_record($session_id_persistent) ; # Verify their IP address hasn't changed. &HTMLdie("Connecting from wrong IP address.") unless verify_ip_address($session_id) ; &HTMLdie("Connecting from wrong IP address.") unless verify_ip_address($session_id_persistent) ; } # Magic here-- if $URL uses special scheme "x-proxy", immediately call the # general-purpose xproxy() routine. &xproxy($URL) if $scheme eq 'x-proxy' ; # Set $is_html if $path (minus query) ends in .htm or .html . # MSIE has a bug (and privacy hole) whereby URLs with QUERY_STRING ending # in .htm or .html are mistakenly treated as HTML, and thus could have # untranslated links, # or tags. This is most likely what the HTML author expects # anyway, though it violates the HTML spec. In this script, we should # over-proxify rather than under-proxify, so we'll end those blocks on # those end tags as browsers (erroneously) do. # Worse, Konqueror allows the string "" inside JS literal strings, # i.e. doesn't end the script block on them. Netscape does end the block # there, and both browsers end style blocks on embedded strings. # Because it's a given that we can't anonymize scripts completely, but # we do want to anonymize HTML completely, we'd rather accidentally # treat script content as HTML than the other way around. So err on # ending the " regardless of whether it's in a string. # (We'd end on " blocks, conditional comments, # intrinsic event attributes ("on___" attributes), script macros, and # the MSIE-specific "dynamic properties". These can be removed or # proxified, depending on the settings of $scripts_are_banned_here and # $PROXIFY_SCRIPTS. # Script content can also exist elsewhere when its MIME type is explicitly # given (for example, in a ') ; # Handle any declarations. # Declarations can contain URLs, such as for DTD's. Most legitimate # declarations would be safe if left unconverted, but if we don't # convert URLs then a malicious document could use this mechanism # to break privacy. Here we use a simple method to handle virtually # all existing cases and close all privacy holes. } elsif ($decl_bang) { my($inside, @words, $q, $rebuild) ; ($inside)= $decl_bang=~ /^]*)/ ; @words= $inside=~ /\s*("[^">]*"?|'[^'>]*'?|[^'"][^\s>]*)/g ; # Instead of handling all SGML declarations, the quick hack here is # to convert any "word" in it that looks like an absolute URL. It # handles virtually all existing cases well enough, and closes any # privacy hole regardless of the declaration. foreach (@words) { # Don't hammer on W3C's poor servers. next if m#^['"]?http://www\.w3\.org/#i ; if (m#^["']?[\w+.-]+://#) { if (/^"/) { $q= '"' ; s/^"|"$//g } elsif (/^'/) { $q= "'" ; s/^'|'$//g } else { $q= '' } $_= $q . &HTMLescape(&full_url(&HTMLunescape($_))) . $q ; $rebuild= 1 ; } } $decl_bang= '' if $rebuild ; push(@out, $decl_bang) ; # Handle any declarations, such as XML declarations. } elsif ($decl_question) { # Nothing needs to be done to these. push(@out, $decl_question) ; } # end of main if comment/script/style/declaration/tag block } continue { $first_script_pos= $out_start if $needs_jslib && !defined($first_script_pos) ; } # end of main while loop # @out now has proxified HTML # Finally, a few things might be inserted into the page, if we're proxifying # a full page and not just an HTML fragment. if ($is_full_page) { # Inserting anything (even a comment) before initial or # declarations confuses some browsers (like MSIE 6.0), so any # insertion should go after initial declarations. Thus, find # the point right after any such declarations. # Note that comments may be included in an XML prolog, so they're # matched here too. my($after_decl, $i) ; for ($i= 0; $i<@out; $i++) { next unless $out[$i]=~ /^ tag if available, else right after # the tag, else at the beginning. # Don't insert anything if there was no (non-whitespace) content, or # else tags won't work. splice(@out, ($body_pos || $html_pos || $after_decl), 0, $full_insertion) if $doing_insert_here && $has_content ; # If needed, insert " # strings within literal strings in JavaScript blocks. :P # NOTE: IF YOU MODIFY THIS ROUTINE, then be sure to review and possibly # modify the corresponding routine _proxy_jslib_proxify_block() in the # JavaScript library, far below in the routine return_jslib(). It is # a Perl-to-JavaScript translation of this routine. # ALSO: Depending what you change here, the routine _proxy_jslib_proxify_css() # may be affected. sub proxify_block { my($s, $type)= @_ ; if ($scripts_are_banned_here) { return undef if $type=~ /^$SCRIPT_TYPE_REGEX$/io ; } if ($type eq 'text/css') { # The only URIs in CSS2 are invoked with "url(...)" or "@import". # (Are there any more?) # Ugly regex, but gets virtually all real matches and is privacy-safe. # Hard part is handling "\"-escaping. See # http://www.w3.org/TR/REC-CSS2/syndata.html#uri # Hopefully we'll use a whole different approach in the new rewrite. $s=~ s/\burl\s*\(\s*(([^)]*\\\))*[^)]*)(\)|$)/ 'url(' . &css_full_url($1) . ')' /gie ; $s=~ s#\@import\s*("[^"]*"|'[^']*'|(?!url\s*\()[^;\s<]*)# '@import ' . &css_full_url($1) #gie ; # Proxify any strings inside "expression()" or "function()". $s= &proxify_expressions_in_css($s) if $s=~ /\b(?:expression|function)\s*\(/i ; return ($s, '') ; # JavaScript can be identified by any of these MIME types. :P The # "ecma" ones are the standard, the "javascript" and "livescript" ones # refer to Netscape's implementations, and the "jscript" one refers to # Microsoft's implementation. Until we need to differentiate, let's # treat them all the same here. } elsif ($type=~ m#^(application/x-javascript|application/x-ecmascript|application/javascript|application/ecmascript|text/javascript|text/ecmascript|text/livescript|text/jscript)$#i) { # Slight hack-- verify $PROXIFY_SCRIPTS is true, since this may be # called even when it's not true (e.g. style sheets of script type). return ($s, '') unless $PROXIFY_SCRIPTS ; return &proxify_js($s, 1) ; # ... which returns two values # Handle ShockWave Flash resources. } elsif ($type eq 'application/x-shockwave-flash') { return (&proxify_swf($s), '') if $PROXIFY_SWF ; # Remove if not $ALLOW_UNPROXIFIED_SCRIPTS . return ($s, '') if $ALLOW_UNPROXIFIED_SCRIPTS ; return ('', '') ; # For any non-supported script type, either remove it or pass it unchanged. } elsif ($type=~ /^$SCRIPT_TYPE_REGEX$/io) { return $ALLOW_UNPROXIFIED_SCRIPTS ? ($s, '') : ('', '') ; } else { # If we don't understand the type, return the block unchanged. # This would be a privacy hole, if we didn't check for script types # when $scripts_are_banned_here above. If later we want the option # of returning undef for an unknown type, we can add a parameter to # specify that. return ($s, '') ; } } # For CSS only: takes entire contents between parentheses in "url(...)", # extracts the URL therein (accounting for quotes, "\"-escaped chars, etc.), # and returns the full_url() of that, suitable for placing back inside # "url(...)", including all "\"-escaping, quotes, etc. :P # Preserve correct quotes, because this may be embedded in a larger quoted # context. # In external style sheets, relative URLs are resolved relative to the style # sheet, not the source HTML document. This makes it easy for us-- no # special $base_url handling. # NOTE: IF YOU MODIFY THIS ROUTINE, then be sure to review and possibly # modify the corresponding routine _proxy_jslib_css_full_url() in the # JavaScript library, far below in the routine return_jslib(). It is # (almost) a Perl-to-JavaScript translation of this routine. sub css_full_url { my($url)= @_ ; my($q) ; $url=~ s/\s+$// ; # leading spaces already stripped above if ($url=~ /^"/) { $q= '"' ; $url=~ s/^"|"$//g } # strip quotes elsif ($url=~ /^'/) { $q= "'" ; $url=~ s/^'|'$//g } $url=~ s/\\(.)/$1/g ; # "\"-unescape $url=~ s/^\s+|\s+$//g ; # finally, strip spaces once more $url= &full_url($url) ; $url=~ s/([(),\s'"\\])/\\$1/g ; # put "\"-escaping back in return $q . $url . $q ; } # Some CSS (MSIE-only?) may use the "expression" or "function" constructs, # whose contents inside "()" are to be interpreted and executed as # JavaScript. We have to handle nested parentheses, so we utilize the # already-existing get_next_js_expr() to read the JS code inside the "()". # jsm-- this may need to be done in JS too. sub proxify_expressions_in_css { my($s)= @_ ; my(@out) ; while ($s=~ /(\G.*?(?:expression|function)\s*\()/gcis) { push(@out, $1) ; push(@out, (&proxify_js(&get_next_js_expr(\$s, 1)))[0]) ; return undef unless $s=~ /\G\)/gc ; push(@out, ')') ; } return join('', @out, substr($s, pos($s))) ; } # This is a hack for supporting Flash apps that use Adobe's shared/cached .swz # libraries, which are downloaded from Adobe's site but which we don't know # how to parse yet. So proxify any fields in the flashvars string that might # be URLs. To start with, "src" and "poster" are used by Adobe's Strobe. # jsm-- must do this in JS too.... sub proxify_flashvars { my($fv)= @_ ; return $fv unless $fv ne '' ; my %fv= getformvars($fv) ; my $rebuild ; $fv{src}= full_url($fv{src}), $rebuild= 1 if defined $fv{src} ; $fv{poster}= full_url($fv{poster}), $rebuild= 1 if defined $fv{poster} ; return $fv unless $rebuild ; my($name, $value, @ret) ; while (($name, $value)= each %fv) { $name=~ s/(\W)/ '%' . sprintf('%02x',ord($1)) /ge ; $value=~ s/(\W)/ '%' . sprintf('%02x',ord($1)) /ge ; push(@ret, "$name=$value") ; } return join('&', @ret) ; # should use ";" but not all apps read that correctly } #-------------------------------------------------------------------------- # Scheme-specific routines #-------------------------------------------------------------------------- # # _get: get resource at URL and set globals $status, $headers, $body, # and $is_html. Optionally, set $response_sent to signal that the response # has already been sent. These are all globals for speed, to prevent # unneeded copying of huge strings. # # http_get: actually supports both GET and POST. Also, it is used for # https:// (SSL) URLs in addition to normal http:// URLs. sub http_get { my($default_port, $portst, $realhost, $realport, $request_uri, $realm, $tried_realm, $auth, $proxy_auth_header, $accept_language_header, $content_type, $lefttoget, $postblock, @postbody, $body_too_big, $rin, $status_code, $footers) ; local($/)= "\012" ; # Localize filehandles-- safer for when using mod_perl, early exits, etc. # But unfortunately, it doesn't work well with tied variables. :( local(*S, *S_PLAIN) ; # If using SSL, then verify that we're set up for it. if ($scheme eq 'https') { eval { require Net::SSLeay } ; # don't check during compilation &no_SSL_warning($URL) if $@ ; # Fail if we're being asked to use SSL, and we're not on an SSL server. # Do NOT remove this code; instead, see note above where # $OVERRIDE_SECURITY is set. &insecure_die if !$RUNNING_ON_SSL_SERVER && !$OVERRIDE_SECURITY ; } $default_port= $scheme eq 'https' ? 443 : 80 ; $port= $default_port if $port eq '' ; # Some servers don't like default port in a Host: header, so use $portst. $portst= ($port==$default_port) ? '' : ":$port" ; $realhost= $host ; $realport= $port ; $request_uri= $path ; $request_uri=~ s/ /%20/g ; # URL-encode spaces for now; maybe more in the future # there must be a smoother way to handle proxies.... if ($scheme eq 'http' && $HTTP_PROXY) { my($dont_proxy) ; foreach (@NO_PROXY) { $dont_proxy= 1, last if $host=~ /\Q$_\E$/i ; } unless ($dont_proxy) { ($realhost, $realport)= $HTTP_PROXY=~ m#^(?:http://)?([^/?:]*):?([^/?]*)#i ; $realport= 80 if $realport eq '' ; $request_uri= "$scheme://$authority$request_uri" ; # rebuild to include encoded path $proxy_auth_header= "Proxy-Authorization: Basic $PROXY_AUTH\015\012" if $PROXY_AUTH ne '' ; } } # Apparently, some servers don't handle a blank Accept-Language: header, # so only include it in the request if it's not blank. $accept_language_header= "Accept-Language: $ENV{HTTP_ACCEPT_LANGUAGE}\015\012" if $ENV{HTTP_ACCEPT_LANGUAGE} ne '' ; #------ Connect socket to host; send request; wait with select() ------ # To be able to retry on a 401 Unauthorized response, put the whole thing # in a labeled block. Note that vars have to be reinitialized. HTTP_GET: { # Open socket(s) as needed, taking into account possible SSL, proxy, etc. # Whatever the situation, S will be the socket to handle the plaintext # HTTP exchange (which may be encrypted by a lower level). # If using SSL, then open a plain socket S_PLAIN to the server and # create an SSL socket handle S tied to the plain socket, such that # whatever we write to S will be written encrypted to S_PLAIN (and # similar for reads). If using an SSL proxy, then connect to that # instead and establish an encrypted tunnel to the destination server # using the CONNECT method. if ($scheme eq 'https') { my($dont_proxy) ; if ($SSL_PROXY) { foreach (@NO_PROXY) { $dont_proxy= 1, last if $host=~ /$_$/i ; } } # If using an SSL proxy, then connect to it and use the CONNECT # method to establish an encrypted tunnel. The CONNECT method # is an HTTP extension, documented in RFC 2817. # This block is modelled after code sent in by Grant DeGraw. if ($SSL_PROXY && !$dont_proxy) { ($realhost, $realport)= $SSL_PROXY=~ m#^(?:http://)?([^/?:]*):?([^/?]*)#i ; $realport= 80 if $realport eq '' ; &newsocketto('S_PLAIN', $realhost, $realport) ; # Send CONNECT request. print S_PLAIN "CONNECT $host:$port HTTP/$HTTP_VERSION\015\012", 'Host: ', $host, $portst, "\015\012" ; print S_PLAIN "Proxy-Authorization: Basic $SSL_PROXY_AUTH\015\012" if $SSL_PROXY_AUTH ne '' ; print S_PLAIN "\015\012" ; # Wait a minute for the response to start vec($rin= '', fileno(S_PLAIN), 1)= 1 ; select($rin, undef, undef, 60) || &HTMLdie("No response from SSL proxy") ; # Read response to CONNECT. All we care about is the status # code, but we have to read the whole response. my($response, $status_code) ; do { $response= '' ; do { $response.= $_= ; } until (/^(\015\012|\012)$/) ; #lines end w/ LF or CRLF ($status_code)= $response=~ m#^HTTP/\d+\.\d+\s+(\d+)# ; } until $status_code ne '100' ; # Any 200-level response is OK; fail otherwise. &HTMLdie("SSL proxy error; response was:

$response
") unless $status_code=~ /^2/ ; # If not using a proxy, then open a socket directly to the server. } else { &newsocketto('S_PLAIN', $realhost, $realport) ; } # Either way, make an SSL socket S tied to the plain socket S_PLAIN. my $ssl_obj= tie(*S, 'SSL_Handle', \*S_PLAIN) ; Net::SSLeay::connect($ssl_obj->{SSL}) or &HTMLdie("Can't SSL connect: $!") ; # If not using SSL, then just open a normal socket. Any proxy is # already set in $realhost and $realport, above. } else { &newsocketto('S', $realhost, $realport) ; } binmode S ; # see note with "binmode STDOUT", above # Send the request. # The Host: header is required in HTTP 1.1 requests. Also include # Accept: and User-Agent: because they affect results. # We're anonymously browsing, so don't include the From: header. The # User-Agent: header is a very teensy privacy risk, but some pages # load differently with different browsers. Referer: is handled # below, depending on the user option. # Ultimately, we may want to check ALL possible request headers-- see # if they're provided in $ENV{HTTP_xxx}, and include them in our # request if appropriate as per the HTTP spec regarding proxies, and # if they don't violate our goals here (e.g. privacy); some may need # to be appropriately modified to pass through this proxy. Each # request header would have to be considered and handled individually. # That's probably not all necessary, but we can take that approach as # priorities dictate. # Note that servers are NOT required to provide request header values # to CGI scripts! Some do, but it must not be relied on. Apache does # provide them, and even provides unknown headers-- e.g. a "Foo: bar" # request header will literally set HTTP_FOO to "bar". (But some # headers are explicitly discouraged from being given to CGI scripts, # such as Authorization:, because that would be a security hole.) print S $ENV{'REQUEST_METHOD'}, ' ', $request_uri, " HTTP/$HTTP_VERSION\015\012", 'Host: ', $host, $portst, "\015\012", # needed for multi-homed servers 'Accept: ', $env_accept, "\015\012", # possibly modified 'User-Agent: ', $USER_AGENT || $ENV{'HTTP_USER_AGENT'}, "\015\012", $accept_language_header, $proxy_auth_header ; # empty if not needed # Handle potential gzip encoding and the Accept-Encoding: header. # Currently, we only handle the gzip encoding, not compress or deflate. # A blank Accept-Encoding: header indicates that we don't support any # encoding (like gzip). Unfortunately, though, at least one server # (Boa) chokes on an empty Accept-Encoding: header, so let's make it # a "," here. That effectively still means an empty value, according # to the rules of HTTP header values. if ($ENV{HTTP_ACCEPT_ENCODING}=~ /\bgzip\b/i) { eval { require IO::Uncompress::Gunzip } ; # don't check during compilation print S ($@ ? "Accept-Encoding: ,\015\012" : "Accept-Encoding: gzip\015\012") ; } else { print S "Accept-Encoding: ,\015\012" ; } # Create Referer: header if so configured. # Only include Referer: if we successfully remove $script_url+flags from # start of referring URL. Note that flags may not always be there. # If using @PROXY_GROUP, loop through them until one fits. This could # only be ambiguous if one proxy in @PROXY_GROUP is called through # another proxy in @PROXY_GROUP, which you really shouldn't do anyway. # Do not send Referer: beginning with "https" unless the requested # URL also begins with "https"! Security hole otherwise. if (!$e_hide_referer) { my($referer)= $ENV{'HTTP_REFERER'} ; if (@PROXY_GROUP) { foreach (@PROXY_GROUP) { if ($referer=~ s#^$_(/[^/]*/?)?## && $referer ne '') { my $decoded_referer= &wrap_proxy_decode($referer) ; print S 'Referer: ', $decoded_referer, "\015\012" unless $decoded_referer=~ /^https\b/i && $scheme eq 'http'; last ; } last if $referer eq '' ; } } else { if ($referer=~ s#^$THIS_SCRIPT_URL(/[^/]*/?)?## && ($referer ne '')) { my $decoded_referer= &wrap_proxy_decode($referer) ; print S 'Referer: ', $decoded_referer, "\015\012" unless $decoded_referer=~ /^https\b/i && $scheme eq 'http'; } } } # Add "Connection: close" header if we're using HTTP 1.1 and aren't running as a daemon. print S "Connection: close\015\012" if $HTTP_VERSION eq '1.1' and ($RUN_METHOD eq 'mod_perl' or $RUN_METHOD eq 'cgi') ; # Add the cookie if it exists and cookies aren't banned here. print S 'Cookie: ', $cookie_to_server, "\015\012" if !$cookies_are_banned_here && ($cookie_to_server ne '') ; # Add Pragma: and Cache-Control: headers if they were given in the # request, to allow caches to behave properly. These two headers # need no modification. # As explained above, we can't rely on request headers being provided # to the script via environment variables. print S "Pragma: $ENV{HTTP_PRAGMA}\015\012" if $ENV{HTTP_PRAGMA} ne '' ; print S "Cache-Control: $ENV{HTTP_CACHE_CONTROL}\015\012" if $ENV{HTTP_CACHE_CONTROL} ne '' ; # Add Authorization: header if we've had a challenge. if ($realm ne '') { # If we get here, we know $realm has a defined $auth and has not # been tried. print S 'Authorization: Basic ', $auth{$realm}, "\015\012" ; $tried_realm= $realm ; } else { # If we have auth information for this server, what the hey, let's # try one, it may save us a request/response cycle. # First case is for rare case when auth info is in URL. Related # block 100 lines down needs no changes. if ($username ne '') { print S 'Authorization: Basic ', &base64($username . ':' . $password), "\015\012" ; } elsif ( ($tried_realm,$auth)= each %auth ) { print S 'Authorization: Basic ', $auth, "\015\012" ; } } # Some old XMLHTTPRequest server apps require this non-standard header. # Thanks to Devesh Parekh for the patch. print S "X-Requested-With: $ENV{HTTP_X_REQUESTED_WITH}\015\012" if $expected_type eq 'x-proxy/xhr' and $ENV{HTTP_X_REQUESTED_WITH} eq 'XMLHttpRequest' ; # Another non-standard HTTP request header. print S "X-Do-Not-Track: 1\015\012" if $ENV{HTTP_X_DO_NOT_TRACK} eq '1' ; # Another non-standard HTTP request header. print S "DNT: 1\015\012" if $ENV{HTTP_DNT} eq '1' ; # A little problem with authorization and POST requests: If auth # is required, we won't know which realm until after we make the # request and get part of the response. But to make the request, # we have to send the entire POST body, because some servers # mistakenly require that before returning even an error response. # So this means we have to send the entire POST body, and be # prepared to send it a second time, thus we have to store it # locally. Either that, or fail to send the POST body a second # time. Here, we let the owner of this proxy set $MAX_REQUEST_SIZE: # store and post a second time if a request is smaller, or else # die with 413 the second time through. # If request method is POST, copy content headers and body to request. # The first time through here, save body to @postbody, if the body's # not too big. if ($ENV{'REQUEST_METHOD'} eq 'POST') { if ($body_too_big) { # Quick 'n' dirty response for an unlikely occurrence. # 413 is not actually an HTTP/1.0 response... &HTMLdie("Sorry, this proxy can't handle a request larger " . "than $MAX_REQUEST_SIZE bytes at a password-protected" . " URL. Try reducing your submission size, or submit " . "it to an unprotected URL.", 'Submission too large', '413 Request Entity Too Large') ; } # Otherwise... $lefttoget= $ENV{'CONTENT_LENGTH'} ; print S 'Content-type: ', $ENV{'CONTENT_TYPE'}, "\015\012", 'Content-length: ', $lefttoget, "\015\012\015\012" ; if (@postbody) { print S @postbody ; } else { $body_too_big= ($lefttoget > $MAX_REQUEST_SIZE) ; # Loop to guarantee all is read from $STDIN. do { $lefttoget-= read($STDIN, $postblock, $lefttoget) ; print S $postblock ; # efficient-- only doing test when input is slow anyway. push(@postbody, $postblock) unless $body_too_big ; } while $lefttoget && ($postblock ne '') ; } # For GET or HEAD requests, just add extra blank line. } else { print S "\015\012" ; } # Wait a minute for the response to start vec($rin= '', fileno(S), 1)= 1 ; select($rin, undef, undef, 60) || &HTMLdie("No response from $realhost:$realport") ; #------ Read full response into $status, $headers, and $body ---- # Support both HTTP 1.x and HTTP 0.9 $status= ; # first line, which is the status line in HTTP 1.x # HTTP 0.9 # Ignore possibility of HEAD, since it's not defined in HTTP 0.9. # Do any HTTP 0.9 servers really exist anymore? unless ($status=~ m#^HTTP/#) { $is_html= 1 ; # HTTP 0.9 by definition implies an HTML response $content_type= 'text/html' ; local($/)= undef ; $body= $status . ; $status= '' ; close(S) ; untie(*S) if $scheme eq 'https' ; return ; } # After here, we know we're using HTTP 1.x # Be sure to handle case when server doesn't send blank line! It's # rare and erroneous, but a couple servers out there do that when # responding with a redirection. This can cause some processes to # linger and soak up resources, particularly under mod_perl. # To handle this, merely check for eof(S) in until clause below. # ... except that for some reason invoking eof() on a tied SSL_Handle # makes later read()'s fail with unlikely error messages. :( # So instead of eof(S), test "$_ eq ''". # Loop to get $status and $headers until we get a non-100 response. do { ($status_code)= $status=~ m#^HTTP/\d+\.\d+\s+(\d+)# ; $headers= '' ; # could have been set by first attempt do { $headers.= $_= ; # $headers includes last blank line # } until (/^(\015\012|\012)$/) || eof(S) ; # lines end w/ LF or CRLF } until (/^(\015\012|\012)$/) || $_ eq '' ; #lines end w/ LF or CRLF $status= if $status_code == 100 ; # re-read for next iteration } until $status_code != 100 ; # Unfold long header lines, a la RFC 822 section 3.1.1 $headers=~ s/(\015\012|\012)[ \t]+/ /g ; # Check for 401 Unauthorized response if ($status=~ m#^HTTP/\d+\.\d+\s+401\b#) { ($realm)= $headers=~ /^WWW-Authenticate:\s*Basic\s+realm="([^"\n]*)/mi ; &HTMLdie("Error by target server: no WWW-Authenticate header.") unless $realm ne '' ; if ($auth{$realm} eq '') { &get_auth_from_user("$host$portst", $realm, $URL) ; } elsif ($realm eq $tried_realm) { &get_auth_from_user("$host$portst", $realm, $URL, 1) ; } # so now $realm exists, has defined $auth, and has not been tried close(S) ; untie(*S) if $scheme eq 'https' ; redo HTTP_GET ; } # Extract $content_type, used in several places ($content_type, $charset)= $headers=~ m#^Content-Type:\s*([\w/.+\$-]*)\s*;?\s*(?:charset\s*=\s*([\w-]+))?#mi ; $content_type= lc($content_type) ; # If we're text only, then cut off non-text responses (but allow # unspecified types). if ($TEXT_ONLY) { if ( ($content_type ne '') && ($content_type!~ m#^text/#i) ) { &non_text_die ; } } # If we're removing scripts, then disallow script MIME types. if ($scripts_are_banned_here) { &script_content_die if $content_type=~ /^$SCRIPT_TYPE_REGEX$/io ; # Note that the non-standard Link: header, which may link to a # style sheet, is handled in http_fix(). } # If URL matches one of @BANNED_IMAGE_URL_PATTERNS, then skip the # resource unless it's clearly a text type. if ($images_are_banned_here) { &skip_image unless $content_type=~ m#^text/#i ; } # Keeping $base_url and its related variables up-to-date is an # ongoing job. Here, we look in appropriate headers. Note that if # Content-Base: doesn't exist, Content-Location: is an absolute URL. if ($headers=~ m#^Content-Base:\s*([\w+.-]+://\S+)#mi) { $base_url= $1, &fix_base_vars ; } elsif ($headers=~ m#^Content-Location:\s*([\w+.-]+://\S+)#mi) { $base_url= $1, &fix_base_vars ; } elsif ($headers=~ m#^Location:\s*([\w+.-]+://\S+)#mi) { $base_url= $1, &fix_base_vars ; } # Now, fix the headers with &http_fix(). It uses &full_url(), and # may modify the headers we just extracted the base URL from. # This also includes cookie support. &http_fix ; # If configured, make this response as non-cacheable as possible. # This means remove any Expires: and Pragma: headers (the latter # could be using extensions), strip Cache-Control: headers of any # unwanted directives and add the "no-cache" directive, and add back # to $headers the new Cache-Control: header and a "Pragma: no-cache" # header. # A lot of this is documented in the HTTP 1.1 spec, sections 13 as a # whole, 13.1.3, 13.4, 14.9, 14.21, and 14.32. The Cache-Control: # response header has eight possible directives, plus extensions; # according to section 13.4, all except "no-cache", "no-store", and # "no-transform" might indicate cacheability, so remove them. Remove # extensions for the same reason. Remove any parameter from # "no-cache", because that would limit its effect. This effectively # means preserve only "no-store" and "no-transform" if they exist # (neither have parameters), and add "no-cache". # We use a quick method here that works for all but cases both faulty # and obscure, but opens no privacy holes; in the future we may fully # parse the header value(s) into its comma-separated list of # directives. if ($MINIMIZE_CACHING) { my($new_value)= 'no-cache' ; $new_value.= ', no-store' if $headers=~ /^Cache-Control:.*?\bno-store\b/mi ; $new_value.= ', no-transform' if $headers=~ /^Cache-Control:.*?\bno-transform\b/mi ; my($no_cache_headers)= "Cache-Control: $new_value\015\012Pragma: no-cache\015\012" ; $headers=~ s/^Cache-Control:[^\012]*\012?//mig ; $headers=~ s/^Pragma:[^\012]*\012?//mig ; $headers=~ s/^Expires:[^\012]*\012?//mig ; $headers= $no_cache_headers . $headers ; } # Add the 1-2 session cookies if so configured. $headers= $session_cookies . $headers if $session_cookies ; # Set $is_html if headers indicate HTML response. # Question: are there any other HTML-like MIME types, including x-... ? $is_html= 1 if $content_type eq 'text/html' or $content_type eq 'application/xhtml+xml' ; # Some servers return HTML content without the Content-Type: header. # These MUST be caught, because Netscape displays them as HTML, and # a user could lose their anonymity on these pages. # According to the HTTP 1.1 spec, section. 7.2.1, browsers can choose # how to deal with HTTP bodies with no Content-Type: header. See # http://www.ietf.org/rfc/rfc2616.txt # In such a case, Netscape seems to always assume "text/html". # Konqueror seems to guess the MIME type by using the Unix "file" # utility on the first 1024 bytes, and possibly other clues (e.g. # resource starts with "

"). # In any case, we must interpret as HTML anything that *may* be # interpreted as HTML by the browser. So if there is no # Content-Type: header, set $is_html=1 . The worst that would # happen would be the occasional content mangled by modified URLs, # which is better than a privacy hole. $is_html= 1 if ($content_type eq '') ; # If the expected type is "x-proxy/xhr", then the resource is being # downloaded via a JS XMLHttpRequest object and should not be # proxified, even if it's HTML data (it would be proxified later # when the data is written to or inserted in a document). To # indicate this, we set $is_html to false. $is_html= 0 if ($expected_type eq 'x-proxy/xhr') ; # To support non-NPH hack, replace first part of $status with # "Status:" if needed. $status=~ s#^\S+#Status:# if $NOT_RUNNING_AS_NPH ; # A bug in some Sun servers returns "text/plain" for SWF files when # responding to certain SWF method calls. my $may_be_swf= ($content_type eq 'text/plain' and $headers=~ /^Server:\s*Sun-ONE/mi) ; # To support streaming media and large files, read the data from # the server and send it immediately to the client. The exception # is HTML content, which still must be read fully to be converted # in the main block. HTML content is not normally streaming or # very large. # This requires $status and $headers to be returned now, which is # OK since headers have been completely cleaned up by now. This # also means that changes after this point to $body won't # have any effect, which in fact is fine in the case of non-HTML # resources. Set $response_sent to prevent the main block from # sending a response. # Also, handle any non-HTML types here which must be proxified. # This is a bit sloppy now, just a quick hack to get rudimentary # handling of multiple types working and released. It will be # rewritten more cleanly at some point, when the whole proxifying # of different types is modularized better. # Only read body if the request method is not HEAD if ($ENV{'REQUEST_METHOD'} ne 'HEAD') { # Because of the erroneous way some browsers use the expected # MIME type instead of the actual Content-Type: header, check # $expected_type first. # Since style sheets tend to be automatically loaded, whereas other # types (like scripts) are more user-selected, plus the fact that # CSS can be safely proxified and scripts cannot, we treat a # resource as CSS if it *may* be treated as CSS by the browser. # This is relevant when $expected_type and Content-Type: differ. # Again, anything retrieved via a JS XMLHttpRequest object should # not be proxified, regardless of $content_type . if ( ($expected_type ne 'x-proxy/xhr') && ( ($expected_type=~ /^$TYPES_TO_HANDLE_REGEX$/io) || ($content_type=~ /^$TYPES_TO_HANDLE_REGEX$/io) || $may_be_swf ) ) { my($type) ; if ( ($expected_type eq 'text/css') || ($content_type eq 'text/css') ) { $type= 'text/css' ; } elsif ($expected_type=~ /^$TYPES_TO_HANDLE_REGEX$/io) { $type= $expected_type ; } else { $type= $content_type ; } # If response is chunked, then dechunk it before processing. # Not perfect (it loses the benefit of chunked encoding), but it # works and will seldom be a problem. # Append $footers into $headers, and remove any Transfer-Encoding: header. if ($headers=~ /^Transfer-Encoding:[ \t]*chunked\b/mi) { ($body, $footers)= &get_chunked_body('S') ; &HTMLdie(&HTMLescape("Error reading chunked response from $URL .")) unless defined($body) ; $headers=~ s/^Transfer-Encoding:[^\012]*\012?//mig ; $headers=~ s/^(\015\012|\012)/$footers$1/m ; # Handle explicitly sized response. } elsif ($headers=~ /^Content-Length:[ \t]*(\d+)/mi) { $body= &read_socket('S', $1) ; # If not chunked or sized, read entire input into $body. } else { local($/)= undef ; $body= ; } shutdown(S, 0) if $RUNNING_ON_IIS ; # without this, IIS+MSIE hangs # If $body is gzipped, then gunzip it. # Change $headers to maintain consistency, even though it will # probably just be compressed again later. if ($headers=~ /^Content-Encoding:.*\bgzip\b/mi) { eval { require IO::Uncompress::Gunzip } ; &no_gzip_die if $@ ; my $zout ; IO::Uncompress::Gunzip::gunzip(\$body => \$zout) or HTMLdie("Couldn't gunzip: $IO::Uncompress::Gunzip::GunzipError") ; $body= $zout ; $headers=~ s/^Content-Encoding:.*?\012//gims ; } # A body starting with "\xEF\xBB\xBF" (non-standardly) indicates # a UTF-8 resource. We can only know this after reading # $body, thus it's done here and not above. # The string "\xEF\xBB\xBF" is sort of like a non-standard BOM # for UTF-8, though UTF-8 doesn't need a BOM. Some systems # don't handle it, so remove it if found. $charset= 'UTF-8' if $body=~ s/^\xef\xbb\xbf// ; # Decode $body if $charset is defined. if (defined $charset) { eval { $body= decode($charset, $body) } ; &malformed_unicode_die($charset) if $@ ; } # If $body looks like it's in UTF-16 encoding, then convert it # to UTF-8 before proxifying. un_utf16(\$body), $charset= 'UTF-8' if ($body=~ /^(?:\376\377|\377\376)/) ; # Part of workaround for Sun servers (see $may_be_swf above). if ($may_be_swf && $body=~ /^[FC]WS[\x01-\x09]/) { $type= 'application/x-shockwave-flash' ; } # If Content-Type: is "text/html" and body looks like HTML, # then treat it as HTML. This helps with sites that play # fast and loose with MIME types (e.g. hotmail). Hacky. # Remove leading HTML comments before testing for text/html; # e.g. hotmail puts HTML comments at start of JS resources, # and even gives Content-Type as text/html . :P my($leading_html_comments)= $body=~ /^(\s*(?:\s*)*)/ ; $body= substr($body, length($leading_html_comments)) if $leading_html_comments ; if (($content_type eq 'text/html') and $body=~ /^\s*<(?:\!(?!--\s*\n)|html)/) { $type= 'text/html' ; $is_html= 1 ; $body= $leading_html_comments . $body ; } else { $body= (&proxify_block($body, $type))[0] ; # Re-enbyte $body if needed. if (defined $charset) { eval { $body= encode($charset, $body) } ; &malformed_unicode_die($charset) if $@ ; } # gzip the response body if we're allowed and able. if ($ENV{HTTP_ACCEPT_ENCODING}=~ /\bgzip\b/i) { eval { require IO::Compress::Gzip } ; if (!$@) { my $zout ; IO::Compress::Gzip::gzip(\$body, \$zout) or HTMLdie("Couldn't gzip: $IO::Compress::Gzip::GzipError"); $body= $zout ; $headers= "Content-Encoding: gzip\015\012" . $headers ; } } $headers=~ s/^Content-Length:.*/ 'Content-Length: ' . length($body) /mie ; print $STDOUT $status, $headers, $body ; $response_sent= 1 ; } } elsif ($is_html) { my($transmit_in_parts) ; foreach (@TRANSMIT_HTML_IN_PARTS_URLS) { $transmit_in_parts= 1, last if $URL=~ /$_/ ; } # Transmit the HTML in parts if so configured. if ($transmit_in_parts) { print $STDOUT $status ; &transmit_html_in_parts($headers, 'S') ; $response_sent= 1 ; } else { # If response is chunked, handle as above; see comments there. if ($headers=~ /^Transfer-Encoding:[ \t]*chunked\b/mi) { ($body, $footers)= &get_chunked_body('S') ; &HTMLdie(&HTMLescape("Error reading chunked response from $URL .")) unless defined($body) ; $headers=~ s/^Transfer-Encoding:[^\012]*\012?//mig ; $headers=~ s/^(\015\012|\012)/$footers$1/m ; # Handle explicitly sized response. } elsif ($headers=~ /^Content-Length:[ \t]*(\d+)/mi) { $body= &read_socket('S', $1) ; # If not chunked or sized, read entire input into $body. } else { undef $/ ; $body= ; } shutdown(S, 0) if $RUNNING_ON_IIS ; # without this, IIS+MSIE hangs # If $body is gzipped, then gunzip it. # Change $headers to maintain consistency, even though it will # probably just be compressed again later. if ($headers=~ /^Content-Encoding:.*\bgzip\b/mi) { eval { require IO::Uncompress::Gunzip } ; # don't check during compilation &no_gzip_die if $@ ; my $zout ; IO::Uncompress::Gunzip::gunzip(\$body => \$zout) or HTMLdie("Couldn't gunzip: $IO::Uncompress::Gunzip::GunzipError") ; $body= $zout ; $headers=~ s/^Content-Encoding:.*?\012//gims ; } # Due to a bug in (at least some) captcha systems, where they label # the test image as "text/html", we test for the image here by # examining the first 1000 chars for non-printable chars. if ($env_accept=~ m#^\s*image/#i) { my $binchars= substr($body, 0, 1000)=~ tr/\x00-\x08\x0b\x0c\x0e-\x1b\x80-\xff/\x00-\x08\x0b\x0c\x0e-\x1b\x80-\xff/ ; if ($binchars > ( (length($body)<1000) ? length($body)*0.25 : 250 )) { print $STDOUT $status, $headers, $body ; $response_sent= 1 ; close(S) ; untie(*S) if $scheme eq 'https' ; return ; } } if (defined $charset) { eval { $body= decode($charset, $body) } ; &malformed_unicode_die($charset) if $@ ; } # If $body looks like it's in UTF-16 encoding, then convert # it to UTF-8 before proxifying. un_utf16(\$body), $charset= 'UTF-8' if ($body=~ /^(?:\376\377|\377\376)/) ; } # This is for when the resource is passed straight through without # modification. # We don't care whether it's chunked or not here, or gzipped or not. # Except: some servers leave a persistent connection open even when # we send "Connection: close", so we must close the connection # after reading the response, so we still must be careful to read the # correct number of bytes, so we respect Content-Length: and chunked # encoding for this. # Ideally, we'd use recv() to get "read all available but block until # something available" behavior, but that fails because of mixing # buffered and non-buffered input. Also, select() doesn't work well # on buffered input, and is unreliable even on unbuffered input on # some systems. So, the best we can do is set up a read() loop. # Note that read() blocks until the entire requested input is read, # or at EOF. } else { my($buf) ; print $STDOUT $status, $headers ; # Use Content-Length: if available. if ($headers=~ /^Content-Length:[ \t]*(\d+)/mi) { my $lefttoget= $1 ; my $thisread ; while ($lefttoget>0 and $thisread= read(S, $buf, ($lefttoget<16384) ? $lefttoget : 16384)) { &HTMLdie("read() error: $!") unless defined $thisread ; print $STDOUT $buf ; $lefttoget-= $thisread ; } # Pass through response if chunked. } elsif ($headers=~ /^Transfer-Encoding:[ \t]*chunked\b/mi) { # Get chunks. my $hex_size ; while ($hex_size= ) { print $STDOUT $hex_size ; no warnings 'digit' ; # to let hex() operate without warnings last unless $lefttoget= hex($hex_size) ; my $thisread ; while ($lefttoget>0 and $thisread= read(S, $buf, ($lefttoget<16384) ? $lefttoget : 16384)) { &HTMLdie("chunked read() error: $!") unless defined $thisread ; print $STDOUT $buf ; $lefttoget-= $thisread ; } print $STDOUT scalar ; # clear CRLF after chunk } # Get footers. while () { print $STDOUT $_ ; last if /^(\015\012|\012)/ || $_ eq '' ; # lines end w/ LF or CRLF } # If no indication of response length, just pass all socket data through. } else { # If using SSL, read() could return 0 and truncate data. :P print $STDOUT $buf while read(S, $buf, 16384) ; } close(S) ; $response_sent= 1 ; } } else { $body= '' ; } close(S) ; untie(*S) if $scheme eq 'https' ; } # HTTP_GET: } # sub http_get() # This package defines a SSL filehandle, complete with all the functions # needed to tie a filehandle to. This lets us use the routine http_get() # above for SSL (https) communication too, which means we only have one # routine to maintain instead of two-- big win. # The idea was taken from Net::SSLeay::Handle, which is a great idea, but the # current implementation of that module isn't suitable for this application. # This implementation uses an input buffer, which lets us write a moderately # efficient READLINE() routine here. Net::SSLeay::ssl_read_until() would be # the natural function to use for that, but it reads and tests all input one # character at a time. # This is in a BEGIN block to make sure any initialization is done. "use" # would effectively do a BEGIN block too. # These are all socket functions used by http_get(): print(), read(), <>, # close(), fileno() for select(), eof(), binmode() BEGIN { package SSL_Handle ; use vars qw($SSL_CONTEXT $DEFAULT_READ_SIZE) ; $DEFAULT_READ_SIZE= 512 ; # Only used for <> style input, so doesn't need to be big. # Create an SSL socket with e.g. "tie(*S_SSL, 'SSL_Handle', \*S_PLAIN)", # where S_PLAIN is an existing open socket to be used by S_SSL. # S_PLAIN must remain in scope for the duration of the use of S_SSL, or # else you'll get OpenSSL errors like "bad write retry". # If $unbuffered is set, then the socket input will be read one character # at a time (probably slower). sub TIEHANDLE { my($class, $socket, $is_server, $unbuffered)= @_ ; my($ssl) ; create_SSL_CONTEXT($is_server) ; $ssl = Net::SSLeay::new($SSL_CONTEXT) or &main::HTMLdie("Can't create SSL connection: $!"); Net::SSLeay::set_fd($ssl, fileno($socket)) or &main::HTMLdie("Can't set_fd: $!") ; bless { SSL => $ssl, socket => $socket, readsize => ($unbuffered ? 0 : $DEFAULT_READ_SIZE), buf => '', eof => '', }, $class ; # returns reference } sub create_SSL_CONTEXT { my($is_server)= @_ ; # $SSL_CONTEXT only needs to be created once (e.g. with mod_perl or daemon). unless ($SSL_CONTEXT) { # load_error_strings() isn't worth the effort if running as a CGI script. Net::SSLeay::load_error_strings() if $main::RUN_METHOD ne 'cgi' ; Net::SSLeay::SSLeay_add_ssl_algorithms() ; Net::SSLeay::randomize() ; # Create the reusable SSL context $SSL_CONTEXT= Net::SSLeay::CTX_new() or &main::HTMLdie("Can't create SSL context: $!") ; # Need this to cope with bugs in some other SSL implementations. Net::SSLeay::CTX_set_options($SSL_CONTEXT, &Net::SSLeay::OP_ALL) ; # Makes life easier if using blocking IO. Flag 0x04 is SSL_MODE_AUTO_RETRY . Net::SSLeay::CTX_set_mode($SSL_CONTEXT, 4) ; } # Set SSL key and certificate for server socket handles. # jsm-- must make UI for keys.... if ($is_server) { Net::SSLeay::CTX_use_RSAPrivateKey_file($SSL_CONTEXT, File::Spec->catfile($main::PROXY_DIR, $main::PRIVATE_KEY_FILE), &Net::SSLeay::FILETYPE_PEM) or Net::SSLeay::die_if_ssl_error("error with private key: $!") ; Net::SSLeay::CTX_use_certificate_file($SSL_CONTEXT, File::Spec->catfile($main::PROXY_DIR, $main::CERTIFICATE_FILE), &Net::SSLeay::FILETYPE_PEM) or Net::SSLeay::die_if_ssl_error("error with certificate: $!") ; } } # For the print() function. Respect $, and $\ settings. sub PRINT { my($self)= shift ; my($written, $errs)= Net::SSLeay::ssl_write_all($self->{SSL}, join($, , @_) . $\ ) ; # jsm-- following line generates OpenSSL warnings... need to debug. # die "Net::SSLeay::ssl_write_all error: $errs" if $errs ne '' ; return 1 ; # to keep consistent with standard print() } # For read() and sysread() functions. # Note that unlike standard read() or sysread(), this function can return # 0 even when not at EOF, and when select() on the underlying socket # indicates there is data to be read. :( This is because of SSL # buffering issues: OpenSSL processes data in chunks (records), so a # socket may have some data available but not enough for a full record, # i.e. enough to release decrypted data to the reader. # So how can an application distinguish between an empty read() and EOF? # Note that eof() is problematic too (see notes there). :( # jsm-- may be possible to handle this by looking for SSL_ERROR_WANT_READ # in the error code; http://www.openssl.org/docs/ssl/SSL_get_error.html # has some info, then look in the source code of Net::SSLeay. sub READ { my($self)= shift ; return 0 if $self->{eof} ; # Can't use my(undef) in some old versions of Perl, so use $dummy. my($dummy, $len, $offset)= @_ ; # $_[0] is handled explicitly below my($read, $errs) ; # this could be cleaned up.... if ($len > length($self->{buf})) { if ( $offset || ($self->{buf} ne '') ) { $len-= length($self->{buf}) ; #$read= Net::SSLeay::ssl_read_all($self->{SSL}, $len) ; ($read, $errs)= &ssl_read_all_fixed($self->{SSL}, $len) ; &main::HTMLdie("ssl_read_all_fixed() error: $errs") if $errs ne '' ; return undef unless defined($read) ; $self->{eof}= 1 if length($read) < $len ; my($buflen)= length($_[0]) ; $_[0].= "\0" x ($offset-$buflen) if $offset>$buflen ; substr($_[0], $offset)= $self->{buf} . $read ; $self->{buf}= '' ; return length($_[0])-$offset ; } else { # Streamlined block for the most common case. #$_[0]= Net::SSLeay::ssl_read_all($self->{SSL}, $len) ; ($_[0], $errs)= &ssl_read_all_fixed($self->{SSL}, $len) ; &main::HTMLdie("ssl_read_all_fixed() error: $errs") if $errs ne '' ; return undef unless defined($_[0]) ; $self->{eof}= 1 if length($_[0]) < $len ; return length($_[0]) ; } } else { # Here the ?: operator returns an lvar. ($offset ? substr($_[0], $offset) : $_[0])= substr($self->{buf}, 0, $len) ; substr($self->{buf}, 0, $len)= '' ; return $len ; } } # For <> style input. # In Perl, $/ as the input delimiter can have two special values: undef # reads all input as one record, and "" means match on multiple blank # lines, like the regex "\n{2,}". Net::SSLeay doesn't support these, # but here we support the undef value (though not the "" value). # See the note with READ(), above, about possible SSL buffering issues. # It's not as big a problem here, since <> returns undef at EOF. Note # that ssl_read_all() blocks until all requested data is read. # Net::SSLeay::ssl_read_until() would normally be the natural function for # this, but it reads and tests all input one character at a time, which # is potentially very inefficient. Thus we implement this package with # an input buffer. sub READLINE { my($self)= shift ; my($read, $errs) ; if (defined($/)) { if (wantarray) { return () if $self->{eof} ; ($read, $errs)= &ssl_read_all_fixed($self->{SSL}) ; &main::HTMLdie("ssl_read_all_fixed() error: $errs") if $errs ne '' ; # Prepend current buffer, and split to end items on $/ or EOS; # this regex prevents final '' element. $self->{eof}= 1 ; return ($self->{buf} . $read)=~ m#(.*?\Q$/\E|.+?\Z(?!\n))#sg ; } else { return '' if $self->{eof} ; my($pos, $read, $ret) ; while ( ($pos= index($self->{buf}, $/)) == -1 ) { $read= Net::SSLeay::read($self->{SSL}, $self->{readsize} || 1 ) ; #return undef if $errs = Net::SSLeay::print_errs('SSL_read') ; &main::HTMLdie("Net::SSLeay::read error: $errs") if $errs = Net::SSLeay::print_errs('SSL_read') ; $self->{eof}= 1, return $self->{buf} if $read eq '' ; $self->{buf}.= $read ; } $pos+= length($/) ; $ret= substr($self->{buf}, 0, $pos) ; substr($self->{buf}, 0, $pos)= '' ; return $ret ; } } else { return '' if $self->{eof} ; ($read, $errs)= &ssl_read_all_fixed($self->{SSL}) ; &main::HTMLdie("ssl_read_all_fixed() error: $errs") if $errs ne '' ; $self->{eof}= 1 ; return $self->{buf} . $read ; } } # Used when closing socket, or from UNTIE() or DESTROY() if needed. # Calling Net::SSLeay::free() twice on the same object causes a crash, # so be careful not to do that. sub CLOSE { my($self)= shift ; my($errs) ; $self->{eof}= 1 ; $self->{buf}= '' ; if (defined($self->{SSL})) { Net::SSLeay::free($self->{SSL}) ; delete($self->{SSL}) ; # to detect later if we've free'd it or not &main::HTMLdie("Net::SSLeay::free error: $errs") if $errs= Net::SSLeay::print_errs('SSL_free') ; close($self->{socket}) ; } } # In case the SSL filehandle is not closed correctly, this will deallocate # as needed. Without this, memory could be eaten up under mod_perl. # Some versions of Perl seem to have trouble with the scoping of tied # variables and their objects, so define both UNTIE() and DESTROY() here. sub UNTIE { my($self)= shift ; $self->CLOSE ; } sub DESTROY { my($self)= shift ; $self->CLOSE ; } # FILENO we define to be the fileno() of the underlying socket. # This is our best guess as to what will work with select(), which is # the only thing fileno() is used for here. # See the note with READ(), above, about possible issues with select(). sub FILENO { my($self)= shift ; return fileno($self->{socket}) ; } # For EOF we first check the fields we set ({eof} and {buf}), then test the # eof() value of the underlying socket. # Note that there may still be data coming through the socket even # though a read() returns nothing; see the note with READ() above. # It may be more accurate here to try "Net::SSLeay::read($self->{SSL},1)" # into {buf} before using eof(). # This routine causes a weird problem: If Perl's eof() is used on a tied # SSL_Handle, it causes later read()'s on that filehandle to fail with # "SSL3_GET_RECORD:wrong version number", which seems inappropriate. # So, avoid use of eof(). :( Maybe test a read result against ''. sub EOF { my($self)= shift ; return 1 if $self->{eof} ; # overrides anything left in {buf} return 0 if $self->{buf} ne '' ; return eof($self->{socket}) ; } # BINMODE we define to be the same as binmode() on the underlying socket. # Only ever relevant on non-Unix machines. sub BINMODE { my($self)= shift ; binmode($self->{socket}) ; } # In older versions of Net::SSLeay, there was a bug in ssl_read_all() # and ssl_read_until() where pages were truncated on any "0" character. # To work with those versions, here we use a fixed copy of ssl_read_all(). # Earlier versions of CGIProxy had older copies of the two routines but # fixed; now we just copy ssl_read_all() in from the new Net::SSLeay # module and tweak it as needed. (ssl_read_until() is no longer needed # now that this package uses an input buffer.) sub ssl_read_all_fixed { my ($ssl,$how_much) = @_; $how_much = 2000000000 unless $how_much; my ($got, $errs); my $reply = ''; while ($how_much > 0) { # read($ssl, 2000000000) would eat up memory. $got = Net::SSLeay::read($ssl, ($how_much>32768) ? 32768 : $how_much); last if $errs = Net::SSLeay::print_errs('SSL_read'); $how_much -= Net::SSLeay::blength($got); last if $got eq ''; # EOF $reply .= $got; } return wantarray ? ($reply, $errs) : $reply; } # end of package SSL_Handle } # ftp_get: sub ftp_get { my($is_dir, $rcode, @r, $dataport, $remote_addr, $ext, $content_type, %content_type, $content_length, $enc_URL, @welcome, @cwdmsg) ; local($/)= "\012" ; $port= 21 if $port eq '' ; # List of file extensions and associated MIME types, or at least the ones # a typical browser distinguishes from a nondescript file. # I'm open to suggestions for improving this. One option is to read the # file mime.types if it's available. %content_type= ('txt', 'text/plain', 'text', 'text/plain', 'htm', 'text/html', 'html', 'text/html', 'css', 'text/css', 'png', 'image/png', 'jpg', 'image/jpeg', 'jpeg', 'image/jpeg', 'jpe', 'image/jpeg', 'gif', 'image/gif', 'xbm', 'image/x-bitmap', 'mpg', 'video/mpeg', 'mpeg', 'video/mpeg', 'mpe', 'video/mpeg', 'qt', 'video/quicktime', 'mov', 'video/quicktime', 'aiff', 'audio/aiff', 'aif', 'audio/aiff', 'au', 'audio/basic', 'snd', 'audio/basic', 'wav', 'audio/x-wav', 'mp2', 'audio/x-mpeg', 'mp3', 'audio/mpeg', 'ram', 'audio/x-pn-realaudio', 'rm', 'audio/x-pn-realaudio', 'ra', 'audio/x-pn-realaudio', 'gz', 'application/x-gzip', 'zip', 'application/zip', ) ; $is_dir= $path=~ m#/$# ; $is_html= 0 if $is_dir ; # for our purposes, do not treat dirs as HTML # Set $content_type based on file extension. # Hmm, still unsure how best to handle unknown file types. This labels # them as text/plain, so that README's, etc. will display right. ($ext)= $path=~ /\.(\w+)$/ ; # works for FTP, not for URLs with query etc. $content_type= ($is_html || $is_dir) ? 'text/html' : $content_type{lc($ext)} || 'text/plain' ; # If we're removing scripts, then disallow script MIME types. if ($scripts_are_banned_here) { &script_content_die if $content_type=~ /^$SCRIPT_TYPE_REGEX$/io ; } # Hack to help handle spaces in pathnames. :P # $path should be delivered to us here with spaces encoded as "%20". # But that's not what the FTP server wants (or what we should display), # so translate them back to spaces in a temporary copy of $path. # Hopefully the FTP server will allow spaces in the FTP commands below, # like "CWD path with spaces". local($path)= $path ; $path=~ s/%20/ /g ; # Create $status and $headers, and leave $body and $is_html as is. # Directories use an HTML response, though $is_html is false when $is_dir. $status= "$HTTP_1_X 200 OK\015\012" ; $headers= $session_cookies . $NO_CACHE_HEADERS . "Date: " . &rfc1123_date($now,0) . "\015\012" . ($content_type ? "Content-type: $content_type\015\012" : '') . "\015\012" ; # Open the control connection to the FTP server &newsocketto('S', $host, $port) ; binmode S ; # see note with "binmode STDOUT", above # Luckily, RFC 959 (FTP) has a really good list of all possible response # codes to all possible commands, on pages 50-53. # Connection establishment ($rcode)= &ftp_command('', '120|220') ; &ftp_command('', '220') if $rcode==120 ; # Login ($rcode, @welcome)= &ftp_command("USER $username\015\012", '230|331') ; ($rcode, @welcome)= &ftp_command("PASS $password\015\012", '230|202') if $rcode==331 ; # Set transfer parameters &ftp_command("TYPE I\015\012", '200') ; # If using passive FTP, send PASV command and parse response. RFC 959 # isn't clear on the response format, but here we assume that the first # six integers separated by commas are the host and port. if ($USE_PASSIVE_FTP_MODE) { my(@p) ; ($rcode, @r)= &ftp_command("PASV\015\012", '227') ; @p= (join('',@r))=~ /(\d+),\s*(\d+),\s*(\d+),\s*(\d+),\s*(\d+),\s*(\d+)/ ; $dataport= ($p[4]<<8) + $p[5] ; # Open the data socket to $dataport. This is conceptually paired # with the accept() for non-passive mode below, but we have to # open the socket here first to allow for 125/150 responses to # LIST and RETR commands in passive mode. &newsocketto('DATA_XFER', $host, $dataport) ; binmode DATA_XFER ; # see note with "binmode STDOUT", above # If not using passive FTP, listen on open port and send PORT command. # See notes by newsocketto() about replacing pack('S n a4 x8') usage. } else { # Create and listen on data socket socket(DATA_LISTEN, AF_INET, SOCK_STREAM, (getprotobyname('tcp'))[2]) || &HTMLdie("Couldn't create FTP data socket: $!") ; # bind(DATA_LISTEN, pack('S n a4 x8', AF_INET, 0, "\0\0\0\0") ) bind(DATA_LISTEN, pack_sockaddr_in(0, INADDR_ANY)) || &HTMLdie("Couldn't bind FTP data socket: $!") ; # $dataport= (unpack('S n a4 x8', getsockname(DATA_LISTEN)))[1] ; $dataport= (unpack_sockaddr_in(getsockname(DATA_LISTEN)))[0] ; listen(DATA_LISTEN,1) || &HTMLdie("Couldn't listen on FTP data socket: $!") ; select((select(DATA_LISTEN), $|=1)[0]) ; # unbuffer the socket # Tell FTP server which port to connect to &ftp_command( sprintf("PORT %d,%d,%d,%d,%d,%d\015\012", unpack('C4', substr(getsockname(S),4,4)), $dataport>>8, $dataport & 255), '200') ; } # Do LIST for directories, RETR for files. # Unfortunately, the FTP spec in RFC 959 doesn't define a standard format # for the response to LIST, but most servers use the equivalent of # Unix's "ls -l". Response to the NLST command is designed to be # machine-readable, but it has nothing but file names. So we use # LIST and parse it as best we can later. if ($is_dir) { # If we don't CWD first, then symbolic links won't be followed. ($rcode, @cwdmsg)= &ftp_command("CWD $path\015\012", '250') ; ($rcode, @r)= &ftp_command("LIST\015\012", '125|150') ; # was: ($rcode, @r)= &ftp_command("LIST $path\015\012", '125|150') ; } else { ($rcode, @r)= &ftp_command("RETR $path\015\012", '125|150|550') ; # If 550 response, it may be a symlink to a directory. # Try to CWD to it; if successful, do a redirect, else die with the # original error response. Note that CWD is required by RFC 1123 # (section 4.1.2.13), which updates RFC 959. if ($rcode==550) { ($rcode)= &ftp_command("CWD $path\015\012", '') ; &ftp_error(550,@r) unless $rcode==250 ; ($enc_URL= $URL)=~ s/ /%20/g ; # URL-encode any spaces # Redirect the browser to the same URL with a trailing slash print $STDOUT "$HTTP_1_X 301 Moved Permanently\015\012", $session_cookies, $NO_CACHE_HEADERS, "Date: ", &rfc1123_date($now,0), "\015\012", "Location: ", $url_start, &wrap_proxy_encode($enc_URL . '/'), "\015\012\015\012" ; close(S) ; close(DATA_LISTEN) ; close(DATA_XFER) ; goto ONE_RUN_EXIT ; } } # If not using passive FTP, accept the connection. if (!$USE_PASSIVE_FTP_MODE) { ($remote_addr= accept(DATA_XFER, DATA_LISTEN)) || &HTMLdie("Error accepting FTP data socket: $!") ; select((select(DATA_XFER), $|=1)[0]) ; # unbuffer the socket close(DATA_LISTEN) ; &HTMLdie("Intruder Alert! Someone other than the server is trying " . "to send you data.") unless (substr($remote_addr,4,4) eq substr(getpeername(S),4,4)) ; } # Read the data into $body. # Streaming support added in 1.3. For notes about streaming, look near # the end of the http_get() routine. Basically, as long as a resource # isn't HTML (or a directory listing, in the case of FTP), we can pass # the data immediately to the client, since it won't be modified. Be # sure to set $response_sent here. # This first block is for the rare case when an FTP resource is a special # type that needs to be converted, e.g. a style sheet. The block is # copied in from http_get() and modified. It will be cleaner and # handled differently in a future version. if ( !$is_dir && !$is_html && ( ($expected_type=~ /^$TYPES_TO_HANDLE_REGEX$/io) || ($content_type=~ /^$TYPES_TO_HANDLE_REGEX$/io) ) ) { my($type) ; if ( ($expected_type eq 'text/css') || ($content_type eq 'text/css') ) { $type= 'text/css' ; } elsif ($expected_type=~ /^$TYPES_TO_HANDLE_REGEX$/io) { $type= $expected_type ; } else { $type= $content_type ; } undef $/ ; $body= ; $body= (&proxify_block($body, $type))[0] ; $headers= "Content-Length: " . length($body) . "\015\012" . $headers ; print $STDOUT $status, $headers, $body ; $response_sent= 1 ; } elsif ($is_html) { undef $/ ; $body= ; } elsif ($is_dir) { undef $/ ; # This was used for all non-HTML before streaming $body= ; # was supported. } else { # Stick a Content-Length: header into the headers if appropriate (often # there's a "(xxx bytes)" string in a 125 or 150 response line). # Be careful about respecting previous value of $headers, which may # already end in a blank line. foreach (grep(/^(125|150)/, @r)) { if ( ($content_length)= /\((\d+)[ \t]+bytes\)/ ) { $headers= "Content-Length: $content_length\015\012" .$headers ; last ; } } # This is the primary change to support streaming media. my($buf) ; print $STDOUT $status, $headers ; print $STDOUT $buf while read(DATA_XFER, $buf, 16384) ; $response_sent= 1 ; } close(DATA_XFER) ; # Get the final completion response &ftp_command('', '226|250') ; &ftp_command("QUIT\015\012") ; # don't care how they answer close(S) ; # Make a user-friendly directory listing. Add Content-Length: header. if ($is_dir) { &ftp_dirfix(\@welcome, \@cwdmsg) ; $headers= "Content-Length: " . length($body) . "\015\012" . $headers ; } } # sub ftp_get() # Send $cmd and return response code followed by full lines of FTP response. # Die if response doesn't match the regex $ok_response. # Assumes the FTP control connection is in socket S. sub ftp_command { my($cmd, $ok_response)= @_ ; my(@r, $rcode) ; local($/)= "\012" ; print S $cmd ; $_= $r[0]= ; $rcode= substr($r[0],0,3) ; until (/^$rcode /) { # this catches single- and multi-line responses push(@r, $_=) ; } &ftp_error($rcode,@r) if $ok_response ne '' && $rcode!~ /$ok_response/ ; return $rcode, @r ; } # Convert a directory listing to user-friendly HTML. # The text in $body is the output of the FTP LIST command, which is *usually* # the equivalent of Unix's "ls -l" command. See notes in ftp_get() about # why we use LIST instead of NLST. # A couple of tangles here to handle spaces in filenames. We should probably # handle spaces in other protocols too, but URLs normally prohibit spaces-- # it's only relative paths within a scheme (like FTP) that would have them. sub ftp_dirfix { my($welcome_ref, $cwdmsg_ref)= @_ ; my($newbody, $parent_link, $max_namelen, @f, $is_dir, $is_link, $link, $name, $size, $size_type, $file_type, $welcome, $cwdmsg, $insertion, $enc_path) ; # Set minimum name column width; longer names will widen the column $max_namelen= 16 ; # each file should have name/, size, date my(@body)= split(/\015?\012/, $body) ; foreach (@body) { # Hack to handle leading spaces in filenames-- only allow a single # space after the 8th field before filename starts. # @f= split(" ", $_, 9) ; # Note special use of " " pattern. # next unless $#f>=8 ; @f= split(" ", $_, 8) ; # Note special use of " " pattern. next unless $#f>=7 ; @f[7,8]= $f[7]=~ /^(\S*) (.*)/ ; # handle leading spaces in filenames next if $f[8]=~ /^\.\.?$/ ; $file_type= '' ; $is_dir= $f[0]=~ /^d/i ; $is_link= $f[0]=~ /^l/i ; $file_type= $is_dir ? 'Directory' : $is_link ? 'Symbolic link' : '' ; $name= $f[8] ; $name=~ s/^(.*) ->.*$/$1/ if $is_link ; # remove symlink's " -> xxx" $name.= '/' if $is_dir ; $max_namelen= length($name) if length($name)>$max_namelen ; if ($is_dir || $is_link) { ($size, $size_type)= () ; } else { ($size, $size_type)= ($f[4], 'bytes') ; ($size, $size_type)= ($size>>10, 'Kb') if $size > 10240 ; } # Easy absolute URL calculation, because we know it's a relative path. ($enc_path= $base_path . $name)=~ s/ /%20/g ; # URL-encode any spaces $link= &HTMLescape( $url_start . &wrap_proxy_encode($enc_path) ) ; $newbody.= sprintf(" %s%s %5s %-5s %3s %2s %5s %s\012", $link, $name, "\0".length($name), $size, $size_type, @f[5..7], $file_type) ; } # A little hack to get filenames to line up right-- replace embedded # "\0"-plus-length with correct number of spaces. $newbody=~ s/\0(\d+)/ ' ' x ($max_namelen-$1) /ge ; if ($path eq '/') { $parent_link= '' ; } else { ($enc_path= $base_path)=~ s#[^/]*/$## ; $enc_path=~ s/ /%20/g ; # URL-encode any spaces $link= &HTMLescape( $url_start . &wrap_proxy_encode($enc_path) ) ; $parent_link= "Up to higher level directory" ; } if ($SHOW_FTP_WELCOME && $welcome_ref) { $welcome= &HTMLescape(join('', grep(s/^230-//, @$welcome_ref))) ; # Make links of any URLs in $welcome. Imperfect regex, but does OK. $welcome=~ s#\b([\w+.-]+://[^\s"']+[\w/])(\W)# '$1$2" #ge ; $welcome.= "
" if $welcome ne '' ; } else { $welcome= '' ; } # If CWD returned a message about this directory, display it. Make links # a la $welcome, above. if ($cwdmsg_ref) { $cwdmsg= &HTMLescape(join('', grep(s/^250-//, @$cwdmsg_ref))) ; $cwdmsg=~ s#\b([\w+.-]+://[^\s"']+[\w/])(\W)# '$1$2" #ge ; $cwdmsg.= "
" if $cwdmsg ne '' ; } # Create the top insertion if needed. $insertion= &full_insertion($URL,0) if $doing_insert_here ; $body= < FTP directory of $URL $insertion

FTP server at $host

Current directory is $path


$welcome$cwdmsg
$parent_link
$newbody

EOS } # Return a generalized FTP error page. # For now, respond with 200. In the future, give more appropriate codes. sub ftp_error { my($rcode,@r)= @_ ; close(S) ; close(DATA_LISTEN) ; close(DATA_XFER) ; my($date_header)= &rfc1123_date($now, 0) ; my $response= < FTP Error

FTP Error

The FTP server at $host returned the following error response:

EOR
    $response.= join('', @r, "
\n") . footer() ; my $cl= length($response) ; print $STDOUT <&', \*SSL2CLIENT) or die "rewire STDOUT failed: $!\n" ; $STDOUT= \*SSL2CLIENT ; # Accept SSL connection. #Net::SSLeay::accept($ssl_obj->{SSL}) or Net::SSLeay::die_if_ssl_error("SSL accept() error: $!"); #$Net::SSLeay::trace= 1 ; my $rv ; while (1) { # jsm-- busy loop here-- fix. $rv= Net::SSLeay::accept($ssl_obj->{SSL}) ; last if $rv>0 ; my $err= Net::SSLeay::get_error($ssl_obj->{SSL}, $rv) ; next if $err==Net::SSLeay::ERROR_WANT_READ() or $err==Net::SSLeay::ERROR_WANT_WRITE() ; return if $rv==0 and $err==Net::SSLeay::ERROR_SYSCALL() ; # EOF that violates protocol die "Net::SSLeay::accept() failed; err=[$err]\n" ; } # SSL connection is now set up. # Else we're using unencrypted pipes. } else { $STDIN= $SS ; $STDOUT= $out_fh || $SS ; } local($/)= "\012" ; # Support HTTP/1.1 pipelining. while (1) { my $request_line= <$STDIN> ; return unless defined $request_line ; # If line starts with a digit, it's the remote IP address. # A valid HTTP request line doesn't start with a digit. chomp($remote_address= $request_line), next if $request_line=~ /^\d/ ; my($method, $request_uri, $client_http_version)= $request_line=~ /^(\w+)\s+(.*)\s+(HTTP\/[\d.]+)\015?\012\z/s ; $request_uri=~ s#(?:^[\w+.-]+:)?//(?:[^/]*)## ; # strip leading scheme and host:port if there # Read headers into @headers . my($header, @headers) ; while (($header= <$STDIN>)!~ /^\015?\012\z/) { $header=~ s/\015?\012\z// ; # remove trailing CRLF last unless $header ne '' ; # Unfold long headers as needed. if ($header=~ s/^\s+/ / and @headers) { $headers[$#headers].= $header ; } else { push(@headers, $header) ; } } # For now, don't return any favicon.ico . if ($request_uri eq '/favicon.ico') { my($date_header)= &rfc1123_date($now, 0) ; print $STDOUT "HTTP/1.1 404 Not Found\015\012Date: $date_header\015\012\015\012" ; next ; } # Set %ENV . %ENV= %ENV_UNCHANGING ; my($name, $value) ; foreach (@headers) { ($name, $value)= split(/:\s*/, $_, 2) ; $name=~ s/-/_/g ; $ENV{'HTTP_' . uc($name)}= $value ; } foreach (qw(CONTENT_LENGTH CONTENT_TYPE)) { $ENV{$_}= $ENV{"HTTP_$_"} ; delete $ENV{"HTTP_$_"} ; } my $auth= $ENV{HTTP_AUTHORIZATION} ; delete $ENV{HTTP_AUTHORIZATION} ; # Set AUTH_TYPE, and authenticate! my($up64, $u, $p) ; if ($auth) { ($ENV{AUTH_TYPE}, $up64)= split(/\s+/, $auth) ; ($u, $p)= split(/:/, unbase64($up64)) if defined $up64 ; } else { $ENV{AUTH_TYPE}= '' ; } return_401_response($client_http_version), last unless &daemon_authenticate($ENV{AUTH_TYPE}, $u, $p) ; $ENV{PATH_INFO}= $request_uri ; # Skip PATH_TRANSLATED; it's messy and we don't use it. $ENV{REMOTE_ADDR}= $remote_address ; # Skip REMOTE_HOST; it's expensive and we don't use it. $ENV{REMOTE_USER}= $u ; $ENV{REQUEST_METHOD}= $method ; $ENV{SERVER_PROTOCOL}= $client_http_version ; # Run it! eval { one_run() } ; if ($@=~ /^exiting\b/) { close(S) ; untie(*S) ; eval { alarm(0) } ; # use eval{} to avoid failing where alarm() is missing last ; } # Return if not pipelining. last if $ENV{HTTP_CONNECTION} eq 'close' ; } return 1 ; } # These are the CGI environment variables that don't change from run to run. sub set_ENV_UNCHANGING { my($port)= @_ ; $ENV_UNCHANGING{GATEWAY_INTERFACE}= 'CGI/1.1' ; ($ENV_UNCHANGING{SERVER_NAME}= hostfqdn()) =~ s/\.$// ; # bug in hostfqdn() may leave trailing dot $ENV_UNCHANGING{SERVER_PORT}= $port ; $ENV_UNCHANGING{SERVER_SOFTWARE}= 'Embedded' ; $ENV_UNCHANGING{QUERY_STRING}= '' ; # it's in PATH_INFO instead. $ENV_UNCHANGING{SCRIPT_NAME}= '' ; } # Very simple for now, but may be expanded later. sub daemon_authenticate { my($authtype, $u, $p)= @_ ; return 1 unless $EMB_USERNAME ne '' or $EMB_PASSWORD ne '' ; return ($u eq $EMB_USERNAME and $p eq $EMB_PASSWORD) ; } # This should be passed as a parameter to spawn_generic_server() (which # handles all listening, forking, etc.) to spawn our RTMP proxy. This # routine should handle one RTMP connection and then exit. # This routine tries to handle each chunk and message as quickly as possible, # including handling partial messages as the parts arrive. # jsm-- structure of this isn't the cleanest. sub rtmp_proxy { my($SS, $listen_port)= @_ ; $RTMP_SERVER_PORT= $listen_port ; # hacky... # First, do the handshake with the client. # Store our epoch. my $t0_SS= [gettimeofday] ; my $t0_SC ; # Read C0 (RTMP version). my $c0= read_socket($SS, 1) ; # Send S0. print $SS "\x03" ; # Read C1 (timestamp, zero, and 1528 random bytes). my $c1= read_socket($SS, 1536) ; my $c1_read_t= int(tv_interval($t0_SS)*1000) ; # in milliseconds my $client_t0= unpack('N', substr($c1, 0, 4)) ; my $remote1528= substr($c1, 8) ; # Send S1. my $local1528= join('', map {chr(int(rand 256))} 1..1528) ; print $SS pack('N', int(tv_interval($t0_SS)*1000)), "\0\0\0\0", $local1528 ; # Read C2 (mostly echo of S1 ). my $c2= read_socket($SS, 1536) ; die "Bad RTMP handshake" unless substr($c2, 0, 4) eq "\0\0\0\0" and substr($c2, 8) eq $local1528 ; # Send S2 (mostly echo of C1). print $SS substr($c1, 0, 4), pack('N', $c1_read_t), substr($c1, 8) ; # RTMP handshake with client complete. # Use parent process to handle client-to-server communication, and child # to handle server-to-client communication. fork() is later, inside # the chunk-handling while loop. Not the most efficient and a bit hacky, # but works for now. my($SC, $SR, $SW) ; # client socket, reading socket, and writing socket. $SR= $SS ; # Next, read each chunk, proxify/unproxify messages if needed, and write # to other side. my $chunk_size= 128 ; # default my($win_ack_size, $peer_win_ack_size) ; # default??? my $received_bytes= 0 ; # for Acknowledgement messages my($cin, $b1, $b2, $b3, $b23, $fmt, $csid, $cmh, $ts, $ext_ts, $msg_len, $msg_type, $msg_stream_id, $is_parent) ; my($c, $m)= ({}, {}) ; # hashes of chunks and messages while (1) { # Read chunk basic header. $b1= ord(read_socket($SR, 1)) ; $cin= chr($b1) ; ($fmt, $csid)= ($b1>>6, $b1&0x3f) ; if ($csid==0) { $cin.= $b2= read_socket($SR, 1) ; $csid= ord($b2) + 64 ; } elsif ($csid==1) { $cin.= $b23= read_socket($SR, 2) ; my($b2, $b3)= unpack('C2', $b23) ; $csid= $b3*256 + $b2 + 64 ; } # Create chunk list if not already created. $c->{$csid}{chunks}= [] unless $c->{$csid}{chunks} ; # Read chunk message header (none for $fmt==3). if ($fmt==0) { $cin.= $cmh= read_socket($SR, 11) ; $ts= substr($cmh, 0, 3) ; @{$c->{$csid}}{qw(mlen mtype msid)}= (unpack('N', "\0".substr($cmh, 3, 3)), ord(substr($cmh, 6, 1)), unpack('V', substr($cmh, 7)) ) ; } elsif ($fmt==1) { $cin.= $cmh= read_socket($SR, 7) ; $ts= substr($cmh, 0, 3) ; @{$c->{$csid}}{qw(mlen mtype)}= (unpack('N', "\0".substr($cmh, 3, 3)), ord(substr($cmh, 6)) ) ; } elsif ($fmt==2) { $cin.= $ts= $cmh= read_socket($SR, 3) ; } my $msid= $c->{$csid}{msid} ; # To multiplex messages within one chunk stream, must save mleft for # each message stream. $c->{$csid}{mleft}{$msid}= $c->{$csid}{mlen} unless defined $m->{$msid}{type} ; # Read extended timestamp, if needed. if ($ts eq "\xff\xff\xff") { # Extended timestamp seems to be uint32, though is undocumented. $cin.= $ext_ts= read_socket($SR, 4) ; } # Done reading chunk header; next, read data into message buffer or # message payload. my $cpayload= read_socket($SR, $c->{$csid}{mleft}{$msid} <= $chunk_size ? $c->{$csid}{mleft}{$msid} : $chunk_size ) ; $cin.= $cpayload ; $c->{$csid}{mleft}{$msid}-= length($cpayload) ; $m->{$msid}{complete}= 1 if $c->{$csid}{mleft}{$msid}==0 ; # Send acknowledgement if needed. # jsm-- can we count on getting complete chunks? $received_bytes+= length($cin) ; if ($received_bytes>=$win_ack_size) { send_acknowledgement($SR, $received_bytes, $t0_SS) ; $received_bytes= 0 ; # jsm-- do we send total bytes or bytes since last ack? } # End processing and print chunk if passthru. print $SW ($cin), next if $m->{$msid}{passthru} ; if (!defined $m->{$msid}{type}) { $m->{$msid}{mbuf}.= $cpayload ; } else { $m->{$msid}{payload}.= $cpayload ; } # Save complete chunks, in case we just need a pass-through. push(@{$c->{$csid}{chunks}}, $cin) ; # Initialize $m element if we have full message header. if (!defined $m->{$msid}{type} and length($m->{$msid}{mbuf})>=11) { @{$m->{$msid}}{qw(type len ts msid payload)}= (unpack('C', substr($m->{$msid}{mbuf}, 0, 1)), unpack('N', "\0".substr($m->{$msid}{mbuf}, 1, 3)), unpack('N', substr($m->{$msid}{mbuf}, 4, 4)), unpack('N', "\0".substr($m->{$msid}{mbuf}, 8, 3)), substr($m->{$msid}{mbuf}, 11) ) ; delete $m->{$msid}{mbuf} ; } # Chunk stream ID==2 means a protocol control message. if ($csid==2) { # Require a complete message to process protocol control messages. if ($m->{$msid}{complete}) { die("Invalid message stream ID [$msid] in RTMP stream") unless $msid==0 ; my($mtype, $payload)= @{$m->{0}}{qw(type payload)} ; # Set chunk size if ($mtype==1) { $chunk_size= unpack('N', $payload) ; # Abort message } elsif ($mtype==2) { delete $m->{$c->{unpack('N', $payload)}{msid}} ; # jsm-- need to delete part of %$c too? # Acknowledgement } elsif ($mtype==3) { my $seqno= unpack('N', $payload) ; # User control message can pass through } elsif ($mtype==4) { if (defined $SW) { print $SW @{$c->{2}{chunks}} ; $c->{2}{chunks}= [] ; } # Window acknowledgement size # Done by server after successful connect request from client, # or by either after receiving Set Peer Bandwidth message. # Must handle this separately for client and server, since we change data length. # Pass through these messages, since window size should be similar # for both connections. } elsif ($mtype==5) { $win_ack_size= unpack('N', $payload) ; if (defined $SW) { print $SW @{$c->{2}{chunks}} ; $c->{2}{chunks}= [] ; } # Set peer bandwidth # Pass through these messages, since window size should be similar # for both connections. } elsif ($mtype==6) { my($new_peer_was, $limit_type)= (unpack('N', substr($payload, 0, 4)), unpack('C', substr($payload, 4)) ) ; if ($new_peer_was!=$peer_win_ack_size) { $peer_win_ack_size= $new_peer_was ; send_win_ack_size($SR, $peer_win_ack_size, $is_parent ? $t0_SC : $t0_SS) ; } if (defined $SW) { print $SW @{$c->{2}{chunks}} ; $c->{2}{chunks}= [] ; } } else { die("Illegal PCM message type [$mtype] in RTMP stream") ; } delete $m->{0} ; delete $c->{2} ; } # Otherwise, handle message piece depending on its type. All are just # pass-through except command messages, and possibly a submessage # within an aggregate message. } else { my $mtype= $m->{$msid}{type} ; # Command message using AMF0 or AMF3 if ($mtype==20 or $mtype==17) { if ($m->{$msid}{complete}) { ($host, $port)= ('', '') ; # hacky # Note use of $reverse parameter, true when client-to-server. my $newmpl= ($mtype==20) ? proxify_RTMP_command_AMF0(\$m->{$msid}{payload}, $is_parent) : proxify_RTMP_command_AMF3(\$m->{$msid}{payload}, $is_parent) ; if (defined $newmpl) { # If $host set and in parent process, then connect to # the destination server and do the handshake. # This is hacky, but we can only start the server connection # after we've started processing messages from the client. if ($host and !defined $SC) { $SC= rtmp_connect_to($host, $port) ; $t0_SC= [gettimeofday] ; $is_parent= fork() ; ($SR, $SW)= $is_parent ? ($SS, $SC) : ($SC, $SS) ; ($c, $m)= ({}, {}), next unless $is_parent ; # restart loop if new child } my($newcbh, $newcmh0, $i) ; my $newm= chr($mtype) . substr(pack('N', length($newmpl)), 1, 3) . pack('N', $m->{$msid}{ts}) . substr(pack('N', $msid), 1, 3) . $newmpl ; # Build chunk basic header. if ($csid<=63) { $newcbh= chr($csid) ; } elsif ($csid<=319) { $newcbh= "\0" . chr($csid-64) ; } else { $newcbh= "\x01" . chr(($csid-64) & 0xff) . chr(($csid-64)>>8) ; } # Build chunk message header, possibly including extended timestamp. $newcmh0= $ts . substr(pack('N', length($newm)), 1, 3) . chr($mtype) . pack('V', $msid) ; $newcmh0.= $ext_ts if $ts eq "\xff\xff\xff" ; # Print new chunk(s) from $newm, a 0-type followed by 3-types. print $SW $newcbh, $newcmh0, substr($newm, 0, $chunk_size) ; substr($newcbh, 0, 1)||= "\xc0" ; # set chunk fmt to 3 henceforth print $SW $newcbh, substr($newm, $_*$chunk_size, $chunk_size) for 1..int((length($newm)-1)/$chunk_size) ; # Perl doesn't like line below.... # for ($i= $chunk_size ; $i{$csid}{chunks}} ; $c->{$csid}{chunks}= [] ; } delete $m->{$msid} ; } # Aggregate message } elsif ($mtype==22) { # jsm-- must implement # Data message using AMF0 or AMF3, shared object message using # AMF0 or AMF3, audio message, or video message } elsif (chr($mtype)=~ /[\x12\x0f\x13\x10\x08\x09]/) { print $SW @{$c->{$csid}{chunks}} ; $c->{$csid}{chunks}= [] ; $m->{$msid}{passthru}= 1 ; } else { die("Illegal message type [$mtype] in RTMP stream") ; } } } exit(0) ; } # rtmp_proxy # Open an RTMP connection to the given host and port, and perform the handshake. # Returns the open socket. sub rtmp_connect_to { my($host, $port)= @_ ; $port= 1935 if $port eq '' ; my $S ; # filehandle for socket &newsocketto($S, $host, $port) ; # Send C0 and C1 chunks. print $S "\x03" ; # C0 is RTMP version # C1 is timestamp, zero, and 1528 bytes of random data. my $local1528= join('', map {chr(int(rand 256))} 1..1528) ; my $t0= [gettimeofday] ; print $S "\0\0\0\0\0\0\0\0", $local1528 ; # Read S0 and S1 chunks. my $s0s1= read_socket($S, 1537) ; my $s0s1_time= pack('N', int(tv_interval($t0)*1000)) ; my $remote1528= substr($s0s1, 9) ; # Send C2 chunk. print $S substr($s0s1, 1, 4), $s0s1_time, $remote1528 ; # Read S2 chunk. my $s2= read_socket($S, 1536) ; die "Bad RTMP handshake" unless $local1528 eq substr($s2, 8) ; return $S ; } sub send_win_ack_size { my($S, $win_ack_size, $t0)= @_ ; my $ts= int(tv_interval($t0)*1000) ; my $ext_ts ; my $msg= "\x05\0\0\x04" . pack('N', $ts) . "\0\0\0" . pack('N', $win_ack_size) ; if ($ts>=0xffffff) { $ext_ts= pack('N', $ts-0xffffff) ; $ts= "\xff\xff\xff" ; } else { $ts= substr(pack('N', $ts), 1, 3) ; $ext_ts= '' ; } print $S "\x02" . $ts . "\0\0\x0f\x05\0\0\0\0" . $ext_ts . $msg ; # chunk header plus message } # Identical to send_win_ack_size() except for message type byte (in two places). sub send_acknowledgement { my($S, $seqno, $t0)= @_ ; my $ts= int(tv_interval($t0)*1000) ; my $ext_ts ; my $msg= "\x03\0\0\x04" . pack('N', $ts) . "\0\0\0" . pack('N', $seqno) ; if ($ts>=0xffffff) { $ext_ts= pack('N', $ts-0xffffff) ; $ts= "\xff\xff\xff" ; } else { $ts= substr(pack('N', $ts), 1, 3) ; $ext_ts= '' ; } print $S "\x02" . $ts . "\0\0\x0f\x03\0\0\0\0" . $ext_ts . $msg ; # chunk header plus message } # The next two routines follow the AMF0 and AMF3 specs at: # http://opensource.adobe.com/wiki/download/attachments/1114283/amf0_spec_121207.pdf # http://opensource.adobe.com/wiki/download/attachments/1114283/amf3_spec_05_05_08.pdf # Returns the proxified (or unproxified if $reverse) command object record, # or undef if unchanged. # Proxifying the app value is tricky, since it requires the value of tcUrl to # get the host and port. Save the original tcUrl, then proxify app at the # end, inserting it into @out. Hacky. sub proxify_RTMP_command_AMF0 { my($in, $reverse)= @_ ; my(@out, $len, $segstart, $tcUrl_orig, $appvalpos) ; # Proxify connect command, and nothing else. return unless $$in=~ /\G\x02\0\x07connect\0\x3f\xf0\0\0\0\0\0\0\x03/gc ; while ($$in=~ /G(..)/gcs && ($len= unpack('n', $1))) { my $name= get_next_substring($in, $len) ; # would normally UTF-decode name, but we're only worried about ASCII values if ($name=~ /^(?:app|swfUrl|tcUrl|pageUrl)$/) { push(@out, substr($in, $segstart, pos($$in)-$segstart)) ; $$in=~ /\G\x02(..)/gcs or die "connect.$name has wrong AMF0 type" ; my $value= get_next_substring($in, unpack('n', $1)) ; $tcUrl_orig= $value if $name eq 'tcUrl' ; $value= proxify_RTMP_value($name, $value, $reverse) ; $appvalpos= @out if $name eq 'app' ; push(@out, "\x02" . pack('n', length($value)) . $value) ; # must be one element $segstart= pos($$in) ; } else { skip_value_AMF0($in) ; } } # After all the others, proxify app value. Not needed when unproxifying. if (!$reverse and $tcUrl_orig ne '' and $appvalpos ne '') { my $papp= proxify_RTMP_value('app', undef, $reverse, $tcUrl_orig) ; splice(@out, $appvalpos, 1, "\x02" . pack('n', length($papp)) . $papp) ; } # As part of fork() hack in rtmp_proxy(), set $host and $port here. ($host, $port)= $tcUrl_orig=~ m#rtmp://([^/:])(?::([^/]))?#i if $reverse and $tcUrl_orig ne '' ; die "no AMF0 object end marker" unless $$in=~ /\G\x09$/ ; return unless @out ; # i.e. command is unchanged push(@out, substr($in, $segstart)) ; return join('', @out) ; } # Returns the proxified or unproxified if $reverse) command object record, # or undef if unchanged. # jsm-- this is mostly complete, but don't fully understand the AMF3 object # format. Should compare with actual AMF3 examples. sub proxify_RTMP_command_AMF3 { my($in, $reverse)= @_ ; my(@out, $segstart, @srefs, $tcUrl_orig, $appvalpos) ; # Proxify connect command, and nothing else. # jsm-- what if non-canonical U29 values are used? Or string reference? if ($$in=~ /\G\x06\x47connect\x04\x01\x0a([\x60-\x6f\xe0-\xef])/gc) { my($class_name, $byte1, $name, $value, $flag, $u28) ; $byte1= ord($1) ; # Traits # These apparently include a regular array of sealed trait member # names as well as an associative array of dynamic members. Store # it all in one hash, like the Array type. # jsm-- what is the difference between an object and a set of traits? my $is_dynamic= ($byte1 & 0x08)!=0 ; my $tcount= get_Uxx($in, 4, $byte1) ; for (1..$tcount) { ($flag, $u28)= get_flag_U28($in) ; pos($$in)+= $u28 if $flag ; # skip string if it's not a reference # jsm-- could sealed traits hold values we want to proxify? } skip_value_AMF3($in) for 1..$tcount ; if ($is_dynamic) { do { ($flag, $u28)= get_flag_U28($in) ; $name= $flag ? get_next_substring($in, $u28) : $srefs[$u28] ; if ($name=~ /^(?:app|swfUrl|tcUrl|pageUrl)$/) { $$in=~ /\G\x06/ or die "connect.$name has wrong AMF3 type" ; push(@out, substr($in, $segstart, pos($$in)-$segstart)) ; ($flag, $u28)= get_flag_U28($in) ; $value= $flag ? get_next_substring($in, $u28) : $srefs[$u28] ; $tcUrl_orig= $value if $name eq 'tcUrl' ; $value= proxify_RTMP_value($name, $value, $reverse) ; $appvalpos= @out if $name eq 'app' ; push(@out, U28(length($value)) . $value) ; # must be one element $segstart= pos($$in) ; } else { skip_value_AMF3($in) ; } } until $name eq '' ; } # After all the others, proxify app value. Not needed when unproxifying. if (!$reverse) { my $papp= proxify_RTMP_value('app', undef, $reverse, $tcUrl_orig) ; splice(@out, $appvalpos, 1, U28(length($papp)) . $papp) ; } return unless @out ; # i.e. command is unchanged push(@out, substr($in, $segstart)) ; return join('', @out) ; } else { return ; } } # Proxify (or unproxify, if $reverse) a value in an RTMP "connect" command object. # The format for a proxified "rtmp://host:port/app/instance" is hereby # "rtmp://proxy_host:proxy_port/host%3aport%2fapp/instance" . # The $tcUrl_orig parameter is part of a hack to proxify app . sub proxify_RTMP_value { my($name, $value, $reverse, $tcUrl_orig)= @_ ; if ($reverse) { if ($name eq 'app') { $value=~ s/%(..)/chr(hex($1))/ge ; $value=~ m#^[^/]*/(.*)#s ; return $1 ; } elsif ($name eq 'swfUrl') { $value=~ s#^\Q$THIS_SCRIPT_URL/[^/]*/## ; # jsm-- doesn't work with @PROXY_GROUP return wrap_proxy_decode($value) ; } elsif ($name eq 'tcUrl') { my($app, $instance)= $value=~ m#^rtmp://[^/]*/([^/]*)/(.*)#is ; $app=~ s/%(..)/chr(hex($1))/ge ; return "rtmp://$app/$instance" ; } elsif ($name eq 'pageUrl') { $value=~ s#^\Q$THIS_SCRIPT_URL/[^/]*/## ; # jsm-- doesn't work with @PROXY_GROUP return wrap_proxy_decode($value) ; } } else { if ($name eq 'app') { return $value unless $tcUrl_orig ; # skip proxifying until later-- part of hack my($papp)= $tcUrl_orig=~ m#rtmp://([^/]*/[^/]*)/#i ; die "invalid tcUrl value '$value' (doesn't support http:// URLs yet)" unless defined $papp ; $papp=~ s/([^\w.-])/ '%' . sprintf('%02x',ord($1)) /ge ; return $papp ; } elsif ($name eq 'swfUrl') { return full_url($value) ; } elsif ($name eq 'tcUrl') { my($papp, $instance)= $value=~ m#^rtmp://([^/]*/[^/]*)/(.*)#is ; die "invalid tcUrl value '$value' (doesn't support http:// URLs yet)" unless defined $papp ; $papp=~ s/([^\w.-])/ '%' . sprintf('%02x',ord($1)) /ge ; my $portst= $RTMP_SERVER_PORT==1935 ? '' : ':'.$RTMP_SERVER_PORT ; return "rtmp://$THIS_HOST$portst/$papp/$instance" ; } elsif ($name eq 'pageUrl') { return full_url($value) ; } } die "proxify_RTMP_value() called for '$name'" ; } # Convenience function to get substr() and advance pos(). sub get_next_substring { my($in, $len)= @_ ; my $ret= substr($$in, pos($$in), $len) ; pos($$in)+= $len ; return $ret ; } # Get a U29 value from $$in. U29 values are 1-4 bytes, have first bit set on # all bytes but the last, and each byte contributes 7 bits to the value, # except the possible fourth byte which contributes all 8 bits. sub get_U29 { my($in)= @_ ; $$in=~ /\G([\x80-\xff]{0,3})(.)/gcs ; return ord($2) unless $1 ; # shortcut for most common case my($last, @in)= ($2, split(//, $1)) ; my $ret= 0 ; $ret= ($ret<<7) + (ord($_)&0x7f) foreach @in ; return +($ret<<8) + ord($last) ; } # Like get_U29, but skip the first $skip_bits of the first byte. # Include optional leading byte $byte1, if it's been read. sub get_Uxx { my($in, $skip_bits, $byte1)= @_ ; $$in=~ /\G(.)/gcs, $byte1= ord($1) unless defined $byte1 ; my $ret= $byte1 & ((1<<(7-$skip_bits))-1) ; return $ret unless $byte1 & 0x80 ; $$in=~ /\G([\x80-\xff]{0,2})(.)/gcs ; my($last, @in)= ($2, split(//, $1)) ; $ret= 0 ; $ret= ($ret<<7) + (ord($_)&0x7f) foreach @in ; return +($ret<<8) + ord($last) ; } # Get a U29 value from $$in, and split it into a 1-bit flag in front followed # by a U28. Returns (flag, U28). sub get_flag_U28 { my($in)= @_ ; $$in=~ /\G([\x80-\xff]{0,3})(.)/gcs ; return (ord($2) & 0x40, ord($2) & 0x3f) unless $1 ; # most common case my($last, @in)= (ord($2), map {ord} split(//, $1)) ; my($flag, $ret)= ($in[0]&0x40, $in[0]&0x3f) ; shift(@in) ; $ret= ($ret<<7) + ($_&0x7f) foreach @in ; return ($flag, ($ret<<8) + $last) ; } sub U29 { my($value)= @_ ; return chr($value) if $value <= 0x7f ; return chr(($value>>7) | 0x80) . chr($value & 0x7f) if $value <= 0x3fff ; return chr(($value>>14) | 0x80) . chr((($value>>7) & 0x7f) | 0x80) . chr($value & 0x7f) if $value <= 0x1fffff ; return chr(($value>>22) | 0x80) . chr((($value>>15) & 0x7f) | 0x80) . chr((($value>>8) & 0x7f) | 0x80) . chr($value & 0xff) ; } # This assumes a 1st bit of 1 (e.g. indicating a string literal, not a reference). sub U28 { my($value)= @_ ; return chr($value | 0x40) if $value <= 0x3f ; return chr(($value>>7) | 0xc0) . chr($value & 0x7f) if $value <= 0x1fff ; return chr(($value>>14) | 0xc0) . chr((($value>>7) & 0x7f) | 0x80) . chr($value & 0x7f) if $value <= 0xfffff ; return chr(($value>>22) | 0xc0) . chr((($value>>15) & 0x7f) | 0x80) . chr((($value>>8) & 0x7f) | 0x80) . chr($value & 0xff) ; } # Skip past an AMF0 value in $in. No return value. sub skip_value_AMF0 { my($in)= @_ ; $$in=~ /\G(.)/gcs ; my $marker= ord($1) ; # Number if ($marker==0) { pos($$in)+= 8 ; # String } elsif ($marker==2) { $$in=~ /\G(..)/gcs ; pos($$in)+= unpack('n', $1) ; # Object } elsif ($marker==3) { while ($$in=~ /\G(..)/gcs) { pos($$in)+= unpack('n', $1) ; skip_value_AMF0($in) ; } die "no AMF0 object end marker" unless $$in=~ /\G\x09/gc ; # Reference } elsif ($marker==7) { pos($$in)+= 2 ; # ECMA array } elsif ($marker==8) { pos($$in)+= 4 ; while ($$in=~ /\G(..)/gcs) { pos($$in)+= unpack('n', $1) ; skip_value_AMF0($in) ; } die "no AMF0 object end marker" unless $$in=~ /\G\x09/gc ; # Object end # These should only happen as part of another value, so ignore here. #} elsif ($marker==9) { # Strict Array } elsif ($marker==0x0a) { $$in=~ /\G(....)/gcs ; skip_value_AMF0($in) for 1..unpack('N', $1) ; # Date } elsif ($marker==0x0b) { pos($$in)+= 10 ; # Long String } elsif ($marker==0x0c) { $$in=~ /\G(....)/gcs ; pos($$in)+= unpack('N', $1) ; # XML document } elsif ($marker==0x0f) { $$in=~ /\G(....)/gcs ; pos($$in)+= unpack('N', $1) ; # Typed object } elsif ($marker==0x10) { $$in=~ /\G(..)/gcs ; pos($$in)+= unpack('n', $1) ; while ($$in=~ /\G(..)/gcs) { pos($$in)+= unpack('n', $1) ; skip_value_AMF0($in) ; } die "no AMF0 object end marker" unless $$in=~ /\G\x09/gc ; # AVMplus object, i.e. use AMF3 } elsif ($marker==0x11) { skip_value_AMF3($in) ; # all other types are either 0-length or unsupported. } elsif ($marker>0x11) { die "unrecognized AVM0 marker: [$marker]" ; } } # Skip past an AMF3 value in $in. No return value. sub skip_value_AMF3 { my($in)= @_ ; my($flag, $u28) ; $$in=~ /\G(.)/gcs ; my $marker= ord($1) ; # Integer if ($marker==4) { $$in=~ /\G([\x80-\xff]{0,3})(.)/gcs ; # Double } elsif ($marker==5) { pos($$in)+= 8 ; # String } elsif ($marker==6) { ($flag, $u28)= get_flag_U28($in) ; pos($$in)+= $u28 if $flag ; # XMLDocument } elsif ($marker==7) { ($flag, $u28)= get_flag_U28($in) ; pos($$in)+= $u28 if $flag ; # Date } elsif ($marker==8) { ($flag)= get_flag_U28($in) ; pos($$in)+= 8 if $flag ; # Array } elsif ($marker==9) { ($flag, $u28)= get_flag_U28($in) ; if ($flag) { # First, skip associative array. while (!$$in=~ /\G\x01/gc) { ($flag, $u28)= get_flag_U28($in) ; pos($$in)+= $u28 if $flag ; skip_value_AMF3($in) ; } # Then, skip normal array, sized by first $u28. skip_value_AMF3($in) for 1..$u28 ; } # Object } elsif ($marker==0x0a) { $$in=~ /\G(.)/gcs ; my $byte1= ord($1) ; pos($$in)-- ; # Object reference if (($byte1 & 0x40)==0) { $$in=~ /\G([\x80-\xff]{0,3})(.)/gcs ; # Trait reference } elsif (($byte1 & 0x20)==0) { $$in=~ /\G([\x80-\xff]{0,3})(.)/gcs ; # Traits # These apparently include a regular array of sealed trait member # names as well as an associative array of dynamic members. Store # it all in one hash, like the Array type. # jsm-- what is the difference between an object and a set of traits? } elsif (($byte1 & 0x10)==0) { my $is_dynamic= ($byte1 & 0x08)!=0 ; my $tcount= get_Uxx($in, 4) ; ($flag, $u28)= get_flag_U28($in) ; pos($$in)+= $u28 if $flag ; for (1..$tcount) { ($flag, $u28)= get_flag_U28($in) ; pos($$in)+= $u28 if $flag ; } skip_value_AMF3($in) for 1..$tcount ; if ($is_dynamic) { do { ($flag, $u28)= get_flag_U28($in) ; pos($$in)+= $u28 if $flag ; skip_value_AMF3($in) unless $u28==0 ; } until $u28==0 ; # jsm-- is this right? Spec says 0x01.... } # Externalizable trait (not supported; handled by client/server agreement) } elsif (($byte1 & 0x10)!=0) { die "externalizable trait not supported" ; } # XML } elsif ($marker==0x0b) { my($flag, $u28)= get_flag_U28($in) ; pos($$in)+= $u28 if $flag ; # ByteArray } elsif ($marker==0x0c) { my($flag, $u28)= get_flag_U28($in) ; pos($$in)+= $u28 if $flag ; # all other types are either 0-length or unsupported. } elsif ($marker>0x11) { die "unrecognized AVM0 marker: [$marker]" ; } } # Fork off and start a generic listening TCP server, one that in turn forks # off client connections. # Invokes &$coderef($NEW_SOCKET_HANDLE) in each child process, after accept(). # Takes the listening socket, the lock filehandle, a code reference, a timeout, # and any additional arguments to the code reference as params. The timeout, # in seconds, applies to the daemon process; 0 means no timeout. # Returns daemon PID on success. # Be very careful to get rid of all instances that are started! # This routine used to include create_server_lock() and new_server_socket(), # but the port number returned from new_server_socket() is needed before # calling this. # This routine is liberal about die'ing. Consider using eval{} to trap those. # This is actually much more complicated than it needs to be for the HTTP # server, when run from the command line. We don't really need to # double-fork (and more) in that case, since the parent process is almost # immediately exiting. # jsm-- should we maintain a list of running daemons? sub spawn_generic_server { my($LISTEN, $LOCK_FH, $coderef, $timeout, @args)= @_ ; my $new_pid= double_fork_daemon($LOCK_FH, $LISTEN) ; return $new_pid if $new_pid ; my $port= (unpack_sockaddr_in(getsockname($LISTEN)))[0] ; # get the port bound to # Record port and PID in lockfile. select((select($LOCK_FH), $|=1)[0]) ; # make $LOCK_FH autoflush output seek($LOCK_FH, 0, 0) ; print $LOCK_FH "$port,$$\n" ; # Clear permissions mask, for easier file-handling. umask 0 ; $SIG{CHLD} = \&REAPER; # Daemon dies if not used for $timeout seconds. $SIG{ALRM}= sub {exit} ; eval { alarm($timeout) } ; # use eval{} to avoid failing where alarm() is missing # jsm-- should allow stopping process via x-proxy://admin/stop-daemon ? my $paddr ; while (1) { my($SS) ; $paddr= accept($SS, $LISTEN) ; next if !$paddr and $!==EINTR ; die "failed accept: $!" unless $paddr ; # Restart timer upon each incoming connection. eval { alarm($timeout) } ; # use eval{} to avoid failing where alarm() is missing my $pid= fork() ; die "failed fork: $!" unless defined($pid) ; close($SS), next if $pid ; # parent daemon process # After here is the per-connection process. # Processes handling connections don't have a timeout. eval { alarm(0) } ; # use eval{} to avoid failing where alarm() is missing # They also shouldn't hold the lock. close($LOCK_FH) ; exit(&$coderef($SS, $port, @args)) ; } # Kill zombie children spawned by the daemon's fork. sub REAPER { local $! ; 1 while waitpid(-1, WNOHANG)>0 and WIFEXITED($?) ; $SIG{CHLD} = \&REAPER; } } # Open and lock a file, creating it if needed. # Returns either: the lockfile handle, or (undef, port, pid) if the the file # is already locked (indicating that the server is already running). # Uses lock on $lock_file to ensure one instance only. Thus, use the same # $lock_file for all calls that spawn the same daemon. $lock_file also stores # the port and PID of the final daemon process. sub create_server_lock { my($lock_file)= @_ ; my($LOCK) ; # First, open and get lock on $lock_file, to avoid duplicates daemons. die "illegal lock_file name: [$lock_file]" if $lock_file=~ /\.\./ or $lock_file=~ m#^/# or $lock_file=~ /[^\w.-]/ or $lock_file eq '' ; -d $PROXY_DIR or mkdir($PROXY_DIR, 0755) or die "mkdir [$PROXY_DIR]: $!" ; open($LOCK, (-s "$PROXY_DIR/$lock_file" ? '+<' : '+>'), "$PROXY_DIR/$lock_file") || die "open: $!" ; if (!flock($LOCK, LOCK_EX|LOCK_NB)) { # daemon already started my($port, $pid)= ((scalar <$LOCK>)=~ /(\d+)/g) ; close($LOCK) ; return (undef, $port, $pid) ; } return ($LOCK) ; } # Opens a generic server socket and starts listening. Use $port if possible, # else use any available port. Returns (listening socket, port used). # This routine is liberal about die'ing. Consider using eval{} to trap those, # or returning undef. sub new_server_socket { my($port)= @_ ; # Create and listen on server socket. my($LISTEN) ; socket($LISTEN, AF_INET, SOCK_STREAM, (getprotobyname('tcp'))[2]) or die "socket: $!" ; setsockopt($LISTEN, SOL_SOCKET, SO_REUSEADDR, 1) or die "setsockopt: $!" ; bind($LISTEN, sockaddr_in($port, INADDR_ANY)) or bind($LISTEN, sockaddr_in(0, INADDR_ANY)) or die "bind: $!" ; $port= (unpack_sockaddr_in(getsockname($LISTEN)))[0] ; # get the port bound to listen($LISTEN, SOMAXCONN) or die "listen: $!" ; return ($LISTEN, $port) ; } # Double-forks a daemon process. Returns the resulting PID in the parent, # or 0 in the resulting grandchild daemon. sub double_fork_daemon { my($LOCK_FH, $LISTEN)= @_ ; # Open pipe to communicate PID back to caller. my($PIPE_P, $PIPE_C) ; pipe($PIPE_P, $PIPE_C) ; # First fork... my $pid= fork() ; die "fork: $!" unless defined($pid) ; # First parent process returns. if ($pid) { close($PIPE_C) ; close($LISTEN) ; close($LOCK_FH) if $LOCK_FH ; my $finalpid= <$PIPE_P> ; close($PIPE_P) ; return $finalpid ; } # Child process continues. close($PIPE_P) ; # Close filehandles in child process. close(S) ; # in case it's open from somewhere # This is required for a daemon, to disconnect from controlling terminal # and current process group. setsid() || die "setsid: $!" unless $^O=~ /win/i ; # Fork again to guarantee no controlling terminal. $pid= fork() ; die "fork: $!" unless defined($pid) ; # Send the PID to the parent process. print $PIPE_C "$pid\n" if $pid; close($PIPE_C) ; # Exit second parent process. exit(0) if $pid ; # Second child process continues. This is the daemon process. return 0 ; } #-------------------------------------------------------------------------- # # _fix: modify response as appropriate for given protocol (scheme). # # http_fix: modify headers as needed, including cookie support. # Note that headers have already been unfolded, when they were read in. # Some HTTP headers are defined as comma-separated lists of values, and they # should be split before being processed. According to the HTTP spec in # RFC 2616, such headers are: # Accept|Accept-Charset|Accept-Encoding|Accept-Language|Accept-Ranges| # Allow|Cache-Control|Connection|Content-Encoding|Content-Language| # If-Match|If-None-Match|Pragma|Public|Transfer-Encoding|Upgrade|Vary| # Via|Warning|WWW-Authenticate # As it turns out, none need to be handled in new_header_value(). Thus, we # don't need to split any standard headers before processing. See section # 4.2 of RFC 2616, plus the header definitions, for more info. # Conceivably, Via: and Warning: could be exceptions to this, since they # do contain hostnames. But a) these are primarily for diagnostic info and # not used to connect to those hosts, and b) we couldn't distinguish the # hostnames from pseudonyms anyway. # Unfortunately, the non-standard Link: and URI: headers may be lists, and # we *do* have to process them. Because of their unusual format and rarity, # these are handled as lists directly in new_header_value(). sub http_fix { my($name, $value, $new_value) ; my(@headers)= $headers=~ /^([^\012]*\012?)/mg ; # split into lines foreach (@headers) { next unless ($name, $value)= /^([\w.-]+):\s*([^\015\012]*)/ ; $new_value= &new_header_value($name, $value) ; $_= defined($new_value) ? "$name: $new_value\015\012" : '' ; } $headers= join('', @headers) ; } # Returns the value of an updated header, e.g. with URLs transformed to point # back through this proxy. Returns undef if the header should be removed. # This is used to translate both real headers and headers. # Special case for URI: and Link: -- these headers can be lists of values # (see the HTTP spec, and comments above in http_fix()). Thus, we must # process these headers as lists, i.e. transform each URL in the header. sub new_header_value { my($name, $value)= @_ ; $name= lc($name) ; # sanity check return undef if $name eq '' ; # These headers consist simply of a URL. # Note that all these are absolute URIs, except possibly Content-Location:, # which may be relative to Content-Base or the request URI-- notably, NOT # relative to anything in the content, like a tag. return &full_url($value) if $name eq 'content-base' || $name eq 'content-location' ; # Location: header should carry forward the expected type, since some sites # (e.g.. hotmail) may 302 forward to another URL and use the wrong # Content-Type:, and that retrieved resource may still be treated by the # browser as of the expected type. Here we just carry forward the entire # flag segment. if ($name eq 'location') { local($url_start)= $script_url . '/' . $packed_flags . '/' ; return &full_url($value) ; } # Modify cookies to point back through the script, or they won't work. # If they're banned from this server, or if $NO_COOKIE_WITH_IMAGE or # $e_filter_ads is set and the current resource isn't text, then filter # them all out. # We guess whether the current resource is text or not by using both # the Content-Type: response header and the Accept: header in the # original request. Content-Type: can be something text, something # non-text, or it can be absent; Accept: can either accept something # text or not. Our test here is that the resource is non-text either # if Accept: accepts no text, or if Content-Type: indicates non-text. # Put another way, it's text if Accept: can accept text, and # Content-Type: is either a text type, or is absent. # This test handles some cases that failed with earlier simpler tests. # One site had a cookie in a 302 response for a text page that didn't # include a Content-Type: header. Another site was sneakier-- # http://zdnet.com returns an erroneous response that surgically # bypassed an earlier text/no-text test here: a redirection # response to an image contains cookies along with a meaningless # "Content-Type: text/plain" header. They only do this on images that # look like Web bugs. So basically that means we can't trust # Content-Type: alone, because a malicious server has full control over # that header, whereas the Accept: header comes from the client. if ($name eq 'set-cookie') { return undef if $cookies_are_banned_here ; if ($NO_COOKIE_WITH_IMAGE || $e_filter_ads) { return undef if ($headers=~ m#^Content-Type:\s*(\S*)#mi && $1!~ m#^text/#i) || ! grep(m#^(text|\*)/#i, split(/\s*,\s*/, $env_accept)) ; } return &cookie_to_client($value, $path, $host) ; } # Extract $default_style_type as needed. # Strictly speaking, a MIME type is "token/token", where token is # ([^\x00-\x20\x7f-\xff()<>@,;:\\"/[\]?=]+) (RFCs 1521 and 822), # but this below covers all existing and likely future MIME types. if ($name eq 'content-style-type') { $default_style_type= lc($1) if $value=~ m#^\s*([/\w.+\$-]+)# ; return $value ; } # Extract $default_script_type as needed. # Same deal about "token/token" as above. if ($name eq 'content-script-type') { $default_script_type= lc($1) if $value=~ m#^\s*([/\w.+\$-]+)# ; return $value ; } # Handle P3P: header. P3P info may also exist in a tag (or # conceivably a Link: header), but those are already handled correctly # where tags (or Link: headers) are handled. if ($name eq 'p3p') { $value=~ s/\bpolicyref\s*=\s*['"]?([^'"\s]*)['"]?/ 'policyref="' . &full_url($1) . '"' /gie ; return $value ; } # And the non-standard Refresh: header... any others? $value=~ s/(;\s*URL\s*=)\s*((?>['"]?))(\S*)\2/ $1 . &full_url($3) /ie, return $value if $name eq 'refresh' ; # The deprecated URI: header may contain several URI's, inside <> brackets. $value=~ s/<(\s*[^>\015\012]*)>/ '<'.&full_url($1).'>' /gie, return $value if $name eq 'uri' ; # The non-standard Link: header is a little problematic. It's described # in the HTTP 1.1 spec, section 19.6.2.4, but it is not standard. Among # other things, it can be used to link to style sheets, but the mechanism # for indicating the style sheet type (=language, which could be a script # MIME type) is not defined. # The HTML 4.0 spec (section 14.6) gives a little more detail regarding # its use of the Link: header, but is still ambiguous-- e.g. their # examples don't specify the type, though elsewhere it's implied that's # required. # Generally speaking, we handle this like a tag. For notes about # this block, see the block above that handles tags. For a # description of the unusual format of this header, see the HTTP spec. # Note that this may be a list of values, and all URIs in it must be # handled. This gets a little messy, because we split on commas, but # don't split on commas that are inside <> brackets, because that's # the URL. if ($name eq 'link') { my($v, @new_values) ; my(@values)= $value=~ /(<[^>]*>[^,]*)/g ; foreach $v (@values) { my($type)= $v=~ m#[^\w.\/?&-]type\s*=\s*["']?\s*([/\w.+\$-]+)#i ; $type= lc($type) ; if ($type eq '') { my($rel) ; $rel= $+ if $v=~ /[^\w.\/?&-]rel\s*=\s*("([^"]*)"|'([^']*)'|([^'"][^\s]*))/i ; $type= 'text/css' if $rel=~ /\bstylesheet\b/i ; } return undef if $scripts_are_banned_here && $type=~ /^$SCRIPT_TYPE_REGEX$/io ; local($url_start)= $url_start ; if ($type ne '') { $url_start= $script_url . '/' . &pack_flags($e_remove_cookies, $e_remove_scripts, $e_filter_ads, $e_hide_referer, $e_insert_entry_form, $is_in_frame, $type) . '/' ; } $v=~ s/<(\s*[^>\015\012]*)>/ '<' . &full_url($1) . '>' /gie ; push(@new_values, $v) ; } return join(', ', @new_values) ; } # For all non-special headers, return $value return $value ; } #-------------------------------------------------------------------------- # Special admin routines, when called via the scheme type "x-proxy://" #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- # # I took the liberty of creating a general mechanism to let this proxy do # whatever tricks it needs to do, via the magic URL scheme "x-proxy://". # It was required to support HTTP Basic Authentication, and it's useful # for other things too. The mechanism uses a heirarchical URL space: a # function family is in the normal "hostname" location, then the functions # and subfunctions are where the path segments would be. A query string # is allowed on the end. # # Don't add functions to this that may compromise security, since anyone # can request a URL beginning with x-proxy://. For that matter, malicious # Web pages can automatically invoke these URLs, which could be annoying # if e.g. they clear your cookies without warning or other acts. # # Which URLs map to which functions should really be documented here. So, # # //auth/make_auth_cookie # receives the authorization form data, sends a formatted auth # cookie to the user, and redirects the user to the desired URL. # # //start # initiates a browsing session. # # //cookies/clear # clears all of a user's cookies. # # //cookies/manage # present the user with a page to manage her/his cookies # # //cookies/update # process whatever actions are requested from the //cookies/manage # page (currently only deletion of cookies). # # //cookies/set-cookie # set the cookie from the query string # # //frames/topframe # returns the special top frame with the entry form and/or the # other insertion. # # //frames/framethis # given a URL, returns a page that frames that URL in the lower # frame with the top frame above (not currently used). # # //scripts/jslib # returns the JavaScript library used when rewriting JavaScript. # Normally, this can be cached for efficiency. # #-------------------------------------------------------------------------- # A general-purpose routine to handle all x-proxy requests. # This is expected to exit when completed, so make sure any called routines # exit if needed. (By "exit", I mean "die 'exiting'".) sub xproxy { my($URL)= @_ ; $URL=~ s/^x-proxy://i ; # $qs will contain the query string in $URL, whether it was encoded with # the URL or came from QUERY_STRING. my($family, $function, $qs)= $URL=~ m#^//(\w+)(/?[^?]*)\??(.*)#i ; if ($family eq 'auth') { # For //auth/make_auth_cookie, return an auth cookie and redirect user # to the desired URL. The URL is already encoded in $in{'l'}. if ($function eq '/make_auth_cookie') { my(%in)= &getformvars() ; # must use () or will pass current @_! my($location)= $url_start . $in{'l'} ; # was already encoded my($cookie)= &auth_cookie(@in{'u', 'p', 'r', 's'}) ; &redirect_to($location, "Set-Cookie: $cookie\015\012") ; } } elsif ($family eq 'start') { &startproxy ; } elsif ($family eq 'cookies') { # Store in the database a cookie sent encoded in the query string. if ($function eq 'set-cookie') { # This does checks, then stores cookie in database. &cookie_to_client(cookie_decode($qs), $path, $host) if $USE_DB_FOR_COOKIES ; print $STDOUT "$HTTP_1_X 204 No Content\015\012", "Cache-Control: no-cache\015\012", "Pragma: no-cache\015\012\015\012" ; die 'exiting' ; # If pages could link to x-proxy:// URLs directly, this would be a # security hole in that malicious pages could clear or update one's # cookies. But full_url() prevents that. If that changes, then we # should consider requiring POST in /cookie/clear and /cookie/update # to minimize this risk. } elsif ($function eq '/clear') { my($location)= $url_start . &wrap_proxy_encode('x-proxy://cookies/manage') ; $location.= '?' . $qs if $qs ne '' ; &redirect_to($location, &cookie_clearer($ENV{'HTTP_COOKIE'})) ; } elsif ($function eq '/manage') { &manage_cookies($qs) ; # For //cookies/update, clear selected cookies and go to manage screen. } elsif ($function eq '/update') { my(%in)= &getformvars() ; # must use () or will pass current @_! my($location)= $url_start . &wrap_proxy_encode('x-proxy://cookies/manage') ; # Add encoded "from" parameter to URL if available. if ($in{'from'} ne '') { my($from_param)= $in{'from'} ; $from_param=~ s/([^\w.-])/ '%' . sprintf('%02x',ord($1)) /ge ; $location.= '?from=' . $from_param ; } # "delete=" input fields are in form &base64(&cookie_encode($name)). my(@cookies_to_delete)= map {cookie_decode(&unbase64($_))} split(/\0/, $in{'delete'}) ; if ($USE_DB_FOR_COOKIES) { &delete_cookies_from_db(@cookies_to_delete) ; &redirect_to($location) ; } else { &redirect_to($location, &cookie_clearer(@cookies_to_delete)) ; } } } elsif ($family eq 'frames') { my(%in)= &getformvars($qs) ; # Send the top proxy frame when a framed page is reframed. if ($function eq '/topframe') { &return_top_frame($in{'URL'}) ; # Not currently used } elsif ($function eq '/framethis') { &return_frame_doc($in{'URL'}, &HTMLescape(&wrap_proxy_decode($in{'URL'}))) ; } } elsif ($family eq 'scripts') { # Return the library needed for JavaScript rewriting. Normally, this # can be cached. if ($function eq '/jslib') { &return_jslib ; } } warn "no such function as x-proxy://$family$function\n" ; &HTMLdie("Sorry, no such function as //". &HTMLescape("$family$function."), '', '404 Not Found') ; } sub return_flash_vars { my($s)= @_ ; my($len)= length($s) ; my($date_header)= &rfc1123_date($now, 0) ; warn "in return_flash_vars($s)" ; # this indicates success... :? print $STDOUT <>24 & 255, $host>>16 & 255, $host>>8 & 255, $host & 255) if $host=~ /^\d+$/ ; # Allow shorthand for hostnames-- if no "." is in it, then add "www"+"com" # or "ftp"+"com". Don't do it if the host already exists on the LAN. if ($scheme eq 'http') { $host= "www.$host.com" if ($host!~ /\./) && !gethostbyname($host) ; } elsif ($scheme eq 'ftp') { # If there's username/password embedded (which you REALLY shouldn't do), # then don't risk sending that to an unintended host. $host= "ftp.$host.com" if ($auth eq '') && ($host!~ /\./) && !gethostbyname($host) ; } # Force $portst to ":" followed by digits, or ''. ($portst)= $portst=~ /^(:\d+)/ ; # Reassemble $authority after all changes are complete. $authority= $auth . $host . $portst ; # Prepend flag segment of PATH_INFO # This "erroneously" sets flags to "000000" when user config is not # allowed, but it doesn't really affect anything. $url_start=~ s#[^/]*/$## ; # remove old flag segment from $url_start $url_start.= &pack_flags(@in{'rc', 'rs', 'fa', 'br', 'if'}, $is_in_frame, '') . '/' ; &redirect_to( $url_start . &wrap_proxy_encode("$scheme://$authority$path$query") . $fragment ) ; } # Create the flag segment of PATH_INFO from the given flags, not including # slashes. Result should be a valid path segment (i.e. alphanumeric and # certain punctuation OK, but no slashes or white space). # This routine defines the structure of the flag segment. # Note that an $expected_type of '' explicitly means that no type in particular # is expected, which will be the case for almost all resources. # Note that any unrecognized MIME type (i.e. no element in %MIME_TYPE_ID) # is treated the same as '', i.e. element #0 -> "0" . # NOTE: IF YOU MODIFY THIS ROUTINE, then be sure to review and possibly # modify the corresponding routine _proxy_jslib_pack_flags() in the # JavaScript library, far below in the routine return_jslib(). It is # (almost) a Perl-to-JavaScript translation of this routine. sub pack_flags { my($remove_cookies, $remove_scripts, $filter_ads, $hide_referer, $insert_entry_form, $is_in_frame, $expected_type)= @_ ; my $total= !!$remove_cookies *32 + !!$remove_scripts *16 + !!$filter_ads *8 + !!$hide_referer *4 + !!$insert_entry_form *2 + !!$is_in_frame ; my $ret= chr($total).chr($MIME_TYPE_ID{lc($expected_type)}) ; $ret=~ tr/\x00-\x3f/0-9A-Za-z\-_/ ; return $ret ; } # The reverse of pack_flags()-- given a flag segment from PATH_INFO, break # out all flag info. The return list should match the input list for # pack_flags(). # NOTE: IF YOU MODIFY THIS ROUTINE, then be sure to review and possibly # modify the corresponding routine _proxy_jslib_unpack_flags() in the # JavaScript library, far below in the routine return_jslib(). It is # (almost) a Perl-to-JavaScript translation of this routine. sub unpack_flags { my($flags)= @_ ; my($remove_cookies, $remove_scripts, $filter_ads, $hide_referer, $insert_entry_form, $is_in_frame, $expected_type) ; $flags=~ tr/0-9A-Za-z\-_/\x00-\x3f/ ; ($flags, $expected_type)= map {ord} split(//, $flags) ; $remove_cookies= ($flags & 32) ? 1 : 0 ; $remove_scripts= ($flags & 16) ? 1 : 0 ; $filter_ads= ($flags & 8) ? 1 : 0 ; $hide_referer= ($flags & 4) ? 1 : 0 ; $insert_entry_form= ($flags & 2) ? 1 : 0 ; $is_in_frame= ($flags & 1) ? 1 : 0 ; # Extract expected MIME type from final one-character flag $expected_type= $ALL_TYPES[$expected_type] ; return ($remove_cookies, $remove_scripts, $filter_ads, $hide_referer, $insert_entry_form, $is_in_frame, $expected_type) ; } #-------------------------------------------------------------------------- # Cookie routines #-------------------------------------------------------------------------- # As of version 1.3, cookies are now a general mechanism for sending various # data to the proxy. So far that's only authentication info and actual # cookies, but more functions could be added. The new scheme essentially # divides up the cookie name space to accommodate many categories. # Explanation: Normally, a cookie is uniquely identified ("keyed") by the # domain, path, and name, but for us the domain and path will always be # that of the proxy script, so we need to embed all "key" information into # the cookie's name. Here, the general format for a cookie's name is # several fields, joined by ";". The first field is always a cookie type # identifier, like "AUTH" or "COOKIE", and the remaining fields vary # according to cookie type. This compound string is then URL-encoded as # necessary (cookie names and values can't contain semicolons, commas, or # white space). The cookie's value contains whatever you need to store, # also URL-encoded as necessary. # A general bug in cookie routines-- ports are not considered, which may # matter for both AUTH and COOKIE cookies. It only matters when two ports # on the same server are being used. # Returns all info we need from cookies. Right now, that means one composite # cookie with all cookies that match the domain and path (and no others!), # and an %auth hash to look up auth info by server and realm. Essentially, # this undoes the transformation done by the cookie creation routines. # @auth is used instead of %auth for slight speedup. # See notes where the various cookies are created for descriptions of their # format; currently, that's in cookie_to_client() and auth_cookie(). # NOTE: IF YOU MODIFY THIS ROUTINE, then be sure to review and possibly # modify the corresponding routine _proxy_jslib_cookie_from_client() in the # JavaScript library, far below in the routine return_jslib(). It is # a Perl-to-JavaScript translation of part of this routine. sub parse_cookie { my($cookie, $target_path, $target_server, $target_port, $target_scheme)= @_ ; my($name, $value, $type, $subtype, @n, $cname, $path, $domain, $cvalue, $secure, @matches, %pathlen, $realm, $server, @auth) ; foreach ( split(/\s*;\s*/, $cookie) ) { ($name, $value)= split(/=/, $_, 2) ; # $value may contain "=" # Set $session_id and $session_id_persistent from S and S2 cookies. if ($USE_DB_FOR_COOKIES) { $session_id= $value, next if $name eq 'S' ; $session_id_persistent= $value, next if $name eq 'S2' ; } $name= &cookie_decode($name) ; $value= &cookie_decode($value) ; ($type, @n)= split(/;/, $name) ; if ($type eq 'COOKIE') { ($cname, $path, $domain)= @n ; $domain= lc($domain) ; ($cvalue, $secure)= split(/;/, $value) ; next if $secure && ($target_scheme ne 'https') ; # According to the cookie spec, a cookie domain equal to a "." # plus the target domain should not match, but browsers treat # it as if it does, so we do the same here. if ( ($target_server=~ /\Q$domain\E$/i or (lc('.'.$target_server) eq lc($domain)) ) && $target_path=~ /^\Q$path\E/ ) { # Cookies are always supposed to have a name, but some servers # don't follow this, and at least one browser treats it as # cookie with only "value" instead of "name=value". So, # we follow that here, for these errant cookies. push(@matches, ($cname ne '' ? $cname.'='.$cvalue : $cvalue)) ; $pathlen{$matches[$#matches]}= length($path) ; } } elsif ($type eq 'AUTH') { # format of auth cookie's name is AUTH;$enc_realm;$enc_server ($realm, $server)= @n ; $realm=~ s/%([\da-fA-F]{2})/ pack('C', hex($1)) /ge ; $server=~ s/%([\da-fA-F]{2})/ pack('C', hex($1)) /ge ; my($portst)= ($target_port eq '') ? '' : ":$target_port" ; push(@auth, $realm, $value) if $server eq "$target_server$portst" ; } } # More specific path mappings (i.e. longer paths) should be sent first. $cookie= join('; ', sort { $pathlen{$b} <=> $pathlen{$a} } @matches) ; return $cookie, @auth ; } # Old notes: # # Cookie support: The trick is how to send a cookie back to the client that # it will return for appropriate hosts. Given that the target URL may be # encoded, and the client can't always tell where the target URL is, the # only way to do that is to get *all* the cookies from the client and send # along the matching ones. If the client has a lot of cookies through the # proxy, this could conceivably be a problem. Oh well, it works for the # limited amount I've tested. # Here, we transform the cookie from the server into something the client # will always send back to us, and embed the real server/path info in the # name of the name-value pair, since the cookie is uniquely identified by # the domain, path, and name. Upon return from the client, we split the # name back into its original fields. # One way to get around *some* of the all-cookies-all-the-time problem, # *sometimes*, may be possible to program with the following approach: # First, the target URL must be "encoded" (in proxy_encode()) in a way # that it resembles a path. For example, the default "://" --> "/" # encoding does this. Then, let the cookies go back to the client with # the target paths still intact. This would only work when the cookie # domain is the default, i.e. the source host. Check other possibilities # carefully, too, but I think you could get it to work somehow. # Question-- is the port supposed to be used in the domain field? # Everything here assumes not, which is conceivably a security risk. # Transform one cookie into something the client will send back through # the script, but still has all the needed info. Returns a transformed # cookie, or undef if the cookie is invalid (e.g. comes from # the wrong host). # A cookie is uniquely identified by the domain, path, and name, so this # transformation embeds the path and domain info into the "name". # If $USE_DB_FOR_COOKIES is true, then store cookie in database instead, # and return undef to clear any Set-Cookie: header. # This doesn't handle multiple comma-separated cookies-- possible, but # which seems a slight contradiction between the HTTP spec (section 4.2 # of both HTTP 1.0 and 1.1 specs) and the cookie spec at # http://www.netscape.com/newsref/std/cookie_spec.html. # NOTE: IF YOU MODIFY THIS ROUTINE, then be sure to review and possibly # modify the corresponding routine _proxy_jslib_cookie_to_client() in the # JavaScript library, far below in the routine return_jslib(). It is # a Perl-to-JavaScript translation of this routine. sub cookie_to_client { my($cookie, $source_path, $source_server)= @_ ; my($name, $value, $expires_clause, $path, $domain, $secure_clause, $httponly_clause) ; my($new_name, $new_value, $new_cookie) ; # Start last four regexes with ";" to avoid extracting from name=value. # Cookie values aren't supposed to have commas, per the spec, but at least # one site (go.com, using the Barista server) violates this. So for now, # allow commas in $value. # Cookie values aren't supposed to have spaces, either, but some sites # have spaces in cookie values. Thus, we allow spaces too. :P #($name, $value)= $cookie=~ /^\s*([^=;,\s]*)\s*=?\s*([^;,\s]*)/ ; ($name, $value)= $cookie=~ /^\s*([^=;,\s]*)\s*=?\s*([^;]*)/ ; ($expires_clause)= $cookie=~ /;\s*(expires\s*=[^;]*)/i ; ($path)= $cookie=~ /;\s*path\s*=\s*([^;,\s]*)/i ; # clash w/ ;-params? ($domain)= $cookie=~ /;\s*domain\s*=\s*([^;,\s]*)/i ; ($secure_clause)= $cookie=~ /;\s*(secure\b)/i ; ($httponly_clause)= $cookie=~ /;\s*(HttpOnly\b)/i ; # Path defaults to either the path of the URL that sent the cookie, or '/'. # See comments above $COOKIE_PATH_FOLLOWS_SPEC for more details. $path= $COOKIE_PATH_FOLLOWS_SPEC ? $source_path : '/' if $path eq '' ; # Domain must be checked for validity: defaults to the server that sent # the cookie; otherwise, must match end of that server name, and must # contain at least two dots if in one of these seven top-level domains, # three dots otherwise. # As it turns out, hostnames ending in extraneous dots, like # "slashdot.org.." resolve to the hostname without the dots. So we # need to guard against malicious cookie servers getting around the # two/three-dot requirement this way. # Unfortunately, the three-dot rule is not always followed; consider # for example the domain "google.de". Probably because of such domains, # browsers seem to only require two dots. Thus, do the same here, # unless $RESPECT_THREE_DOT_RULE is set. # Browsers also allow domains such as "example.com", i.e. missing the # leading dot. :P So, prepend a dot in such situations; only do this # if the 3-dot rule is already relaxed. if ($domain eq '') { $domain= $source_server ; } else { $domain=~ s/\.*$//g ; # removes trailing dots! $domain=~ tr/././s ; # ... and double dots for good measure. # Allow $domain to match domain-minus-leading-dot (erroneously), # because that's how browsers do it. return undef if ($source_server!~ /\Q$domain\E$/) and ('.'.$source_server ne $domain) ; if ($RESPECT_THREE_DOT_RULE) { return(undef) unless ( ( ($domain=~ tr/././) >= 3 ) || ( ($domain=~ tr/././) >= 2 && $domain=~ /\.(com|edu|net|org|gov|mil|int)$/i ) ) ; } else { if (($domain=~ tr/././) < 2) { return undef if $domain=~ /^\./ ; $domain= '.' . $domain ; return undef if ($domain=~ tr/././) < 2 ; } } } # Change $expires_clause to make it a session cookie if so configured. # Don't do so if the cookie expires in the past, which means a deleted cookie. if ($SESSION_COOKIES_ONLY && $expires_clause ne '') { my($expires_date)= $expires_clause=~ /^expires\s*=\s*(.*)$/i ; $expires_clause= '' if &date_is_after($expires_date, $now) ; } # If we're using a server-side database to store cookies, then store it and # return undef to clear the existing Set-Cookie: header. return undef if $USE_DB_FOR_COOKIES and store_cookie_in_db($name, $value, $expires_clause, $path, $domain, $secure_clause, $httponly_clause) ; # This is hereby the transformed format: name is COOKIE;$name;$path;$domain # (the three values won't already have semicolons in them); value is # $value;$secure_clause . Both name and value are then cookie_encode()'d. # The name contains everything that identifies the cookie, and the value # contains all info we might care about later. $new_name= &cookie_encode("COOKIE;$name;$path;$domain") ; # New value is "$value;$secure_clause", then cookie_encode()'d. $new_value= &cookie_encode("$value;$secure_clause") ; # Create the new cookie from its components, removing the empty ones. # The new domain is this proxy server, which is the default if it is not # specified. $new_cookie= join('; ', grep(length, $new_name . '=' . $new_value, $expires_clause, 'path=' . $ENV_SCRIPT_NAME . '/', ($RUNNING_ON_SSL_SERVER ? ('secure') : () ), $httponly_clause )) ; return $new_cookie ; } # Returns a cookie that contains authentication information for a particular # realm and server. The format of the cookie is: The name is # AUTH;$URL_encoded_realm;$URL_encoded_server, and the value is the # base64-encoded "$username:$password" needed for the Authorization: header. # On top of that, both name and value are cookie_encode()'d. # Leave the "expires" clause out, which means the cookie lasts as long as # the session, which is what we want. # Note that auth cookies are NOT stored in a server-side database, for security # reasons. Chances are there will never be enough auth cookies to overflow # the HTTP requests. sub auth_cookie { my($username, $password, $realm, $server)= @_ ; $realm=~ s/(\W)/ '%' . sprintf('%02x',ord($1)) /ge ; $server=~ s/(\W)/ '%' . sprintf('%02x',ord($1)) /ge ; return join('', &cookie_encode("AUTH;$realm;$server"), '=', &cookie_encode(&base64("$username:$password")), '; path=' . $ENV_SCRIPT_NAME . '/' ) ; } # Generates a set of cookies that will delete the cookies contained in the # given cookie strings (e.g. from HTTP_COOKIE). This is done by giving # each cookie an expiration time in the past, and setting their values # to "" for good measure. # The input @cookies can each be a list of cookies separated by ";" . The # cookies themselves can be either "name=value" or just "name". # The return value is one long string of multiple "Set-Cookie:" headers. # Slight quirk in Netscape and other browsers-- if cookie expiration is # set to the epoch time of "01-Jan-1970 00:00:00 GMT" (meaning second #0), # the cookie is treated as a session cookie instead of a deleted cookie. # Using second #1, i.e. "01-Jan-1970 00:00:01 GMT", causes the cookies to # be correctly deleted. sub cookie_clearer { my(@cookies)= @_ ; # may be one or more lists of cookies my($ret, $cname) ; foreach (@cookies) { foreach $cname ( split(/\s*;\s*/) ) { $cname=~ s/=.*// ; # change "name=value" to "name" $ret.= "Set-Cookie: $cname=; expires=Thu, 01-Jan-1970 00:00:01 GMT; " . "path=$ENV_SCRIPT_NAME/\015\012" ; } } return $ret ; } # Reads $session_id and $session_id_persistent from HTTP_COOKIE . sub get_session_cookies { my($name, $value) ; foreach ( split(/\s*;\s*/, $ENV{HTTP_COOKIE}) ) { ($name, $value)= split(/=/, $_) ; $session_id= $value, next if $name eq 'S' ; $session_id_persistent= $value, next if $name eq 'S2' ; } } #-------------------------------------------------------------------------- # Utility routines #-------------------------------------------------------------------------- # The following subroutine looks messy, but can be used to open any # TCP/IP socket in any Perl program. Except for the &HTMLdie() part. # Typeglobbing has trouble with mod_perl and tied filehandles, so pass socket # handle as a string instead (e.g. 'S'), or as a variable. # Older versions created the packet structure with the old "pack('S n a4 x8')" # method. However, some OS's (such as BSDI) vary from this, and it wouldn't # work with IPv6 either. So now we use the more general functions, like # pack_sockaddr_in() from Socket.pm. (IPv6 support may require other # changes too.) sub newsocketto { my($S, $host, $port)= @_ ; my($hostaddr, $remotehost) ; # If $host is long integer like 3467251275, break it into a.b.c.d format. # This is for big-endian; reverse the list for little-endian. $host= join('.', $host>>24 & 255, $host>>16 & 255, $host>>8 & 255, $host & 255) if $host=~ /^\d+$/ ; # Create the remote host data structure, from host name or IP address. # Note that inet_aton() handles both alpha names and IP addresses. $hostaddr= inet_aton($host) || &HTMLdie("Couldn't find address for $host: $!") ; # $remotehost= pack('S n a4 x8', AF_INET, $port, $hostaddr) ; $remotehost= pack_sockaddr_in($port, $hostaddr) ; # If the target IP address is a banned host or network, die appropriately. # This assumes that IP address structs have the most significant byte first. # This is a quick addition that will be fleshed out in a later version. # This may not work with IPv6, depending on what inet_aton() returns then. for (@BANNED_NETWORK_ADDRS) { &banned_server_die() if $hostaddr=~ /^$_/ ; # No URL forces a die } # Create the socket and connect to the remote host no strict 'refs' ; # needed to use $S as filehandle socket($S, AF_INET, SOCK_STREAM, (getprotobyname('tcp'))[2]) || &HTMLdie("Couldn't create socket: $!") ; connect($S, $remotehost) || &HTMLdie("Couldn't connect to $host:$port: $!") ; select((select($S), $|=1)[0]) ; # unbuffer the socket } # Read a specific number of bytes from a socket, looping if necessary. # Returns all bytes read (possibly less than $length), or undef on error. # Typeglobbing *STDIN into *S doesn't seem to work with mod_perl 1.21, so # pass socket handle as a string instead (e.g. 'STDIN'), or as a variable. # Using *S, the read() below immediately fails under mod_perl. sub read_socket { # local(*S, $length)= @_ ; my($S, $length)= @_ ; my($ret, $numread, $thisread) ; #$numread= 0 ; no strict 'refs' ; # needed to use $S as filehandle while ( ($numread<$length) # && ($thisread= read(S, $ret, $length-$numread, $numread) ) ) && ($thisread= read($S, $ret, $length-$numread, $numread) ) ) { $numread+= $thisread ; } return undef unless defined($thisread) ; return $ret ; } # Read a chunked body and footers from a socket; assumes that the # Transfer-Encoding: is indeed chunked. # Returns the body and footers (which should then be appended to any # previous headers), or undef on error. # For details of chunked encoding, see the HTTP 1.1 spec, e.g. RFC 2616 # section 3.6.1 . sub get_chunked_body { my($S)= @_ ; my($body, $footers, $chunk_size, $chunk) ; local($_) ; local($/)= "\012" ; # Read one chunk at a time and append to $body. # Note that hex() will automatically ignore a semicolon and beyond. no strict 'refs' ; # needed to use $S as filehandle $body= '' ; # to distinguish it from undef no warnings 'digit' ; # to let hex() operate without warnings while ($chunk_size= hex(<$S>) ) { $body.= $chunk= &read_socket($S, $chunk_size) ; return undef unless length($chunk) == $chunk_size ; # implies defined() $_= <$S> ; # clear CRLF after chunk } # After all chunks, read any footers, NOT including the final blank line. while (<$S>) { last if /^(\015\012|\012)/ || $_ eq '' ; # lines end w/ LF or CRLF $footers.= $_ ; } $footers=~ s/(\015\012|\012)[ \t]+/ /g ; # unwrap long footer lines return wantarray ? ($body, $footers) : $body ; } # This is a minimal routine that reads URL-encoded variables from a string, # presumably from something like QUERY_STRING. If no string is passed, # it will read from either QUERY_STRING or STDIN, depending on # REQUEST_METHOD. STDIN can't be read more than once for POST requests. # It returns a hash. In the event of multiple variables with the same name, # it concatenates the values into one hash element, delimiting with "\0". # Returns undef on error. sub getformvars { my($in)= @_ ; my(%in, $name, $value) ; # If no string is passed, read it from the usual channels. unless (defined($in)) { if ( ($ENV{'REQUEST_METHOD'} eq 'GET') || ($ENV{'REQUEST_METHOD'} eq 'HEAD') ) { $in= $ENV{'QUERY_STRING'} ; } elsif ($ENV{'REQUEST_METHOD'} eq 'POST') { return undef unless lc($ENV{'CONTENT_TYPE'}) eq 'application/x-www-form-urlencoded'; return undef unless defined($ENV{'CONTENT_LENGTH'}) ; $in= &read_socket($STDIN, $ENV{'CONTENT_LENGTH'}) ; # should we return undef if not all bytes were read? } else { return undef ; # unsupported REQUEST_METHOD } } foreach (split(/[&;]/, $in)) { s/\+/ /g ; ($name, $value)= split('=', $_, 2) ; $name=~ s/%([\da-fA-F]{2})/ pack('C', hex($1)) /ge ; $value=~ s/%([\da-fA-F]{2})/ pack('C', hex($1)) /ge ; $in{$name}.= "\0" if defined($in{$name}) ; # concatenate multiple vars $in{$name}.= $value ; } return %in ; } # For a given timestamp, returns a date in one of the following two forms, # depending on the setting of $use_dash: # "Wdy, DD Mon YYYY HH:MM:SS GMT" # "Wdy, DD-Mon-YYYY HH:MM:SS GMT" # The first form is used in HTTP dates, and the second in Netscape's cookie # spec (although cookies sometimes use the first form, which seems to be # handled OK by most recipients). # The first form is basically the date format in RFC 822 as updated in RFC # 1123, except GMT is always used here. sub rfc1123_date { my($time, $use_dash)= @_ ; my($s) = $use_dash ? '-' : ' ' ; my(@t)= gmtime($time) ; return sprintf("%s, %02d$s%s$s%04d %02d:%02d:%02d GMT", $WEEKDAY[$t[6]], $t[3], $MONTH[$t[4]], $t[5]+1900, $t[2], $t[1], $t[0] ) ; } # Returns true if $date1 is later than $date2. Both parameters can be in # either rfc1123_date() format or the total-seconds format from time(). # rfc1123_date() format is "Wdy, DD-Mon-YYYY HH:MM:SS GMT", possibly using # spaces instead of dashes. # Returns undef if either date is invalid. # A more general function would be un_rfc1123_date(), to take an RFC 1123 date # and return total seconds. sub date_is_after { my($date1, $date2)= @_ ; my(@d1, @d2) ; # Trivial case when both are numeric. return ($date1>$date2) if $date1=~ /^\d+$/ && $date2=~ /^\d+$/ ; # Get date components, depending on formats if ($date1=~ /^\d+$/) { @d1= (gmtime($date1))[3,4,5,2,1,0] ; } else { @d1= $date1=~ /^\w+,\s*(\d+)[ -](\w+)[ -](\d+)\s+(\d+):(\d+):(\d+)/ ; return undef unless @d1 ; $d1[1]= $UN_MONTH{lc($d1[1])} ; $d1[2]-= 1900 ; } if ($date2=~ /^\d+$/) { @d2= (gmtime($date2))[3,4,5,2,1,0] ; } else { @d2= $date2=~ /^\w+,\s*(\d+)[ -](\w+)[ -](\d+)\s+(\d+):(\d+):(\d+)/ ; return undef unless @d2 ; $d2[1]= $UN_MONTH{lc($d2[1])} ; $d2[2]-= 1900 ; } # Compare year, month, day, hour, minute, second in order. return ( ( $d1[2]<=>$d2[2] or $d1[1]<=>$d2[1] or $d1[0]<=>$d2[0] or $d1[3]<=>$d2[3] or $d1[4]<=>$d2[4] or $d1[5]<=>$d2[5] ) > 0 ) ; } # Escape any &"<> chars to &xxx; and return resulting string. # Also converts chars>127 to "&#nnn;" entities. # NOTE: IF YOU MODIFY THIS ROUTINE, then be sure to review and possibly # modify the corresponding routine _proxy_jslib_html_escape() in the # JavaScript library, far below in the routine return_jslib(). It is # a Perl-to-JavaScript translation of this routine. sub HTMLescape { my($s)= @_ ; $s=~ s/&/&/g ; # must be before all others $s=~ s/([^\x00-\x7f])/'&#' . ord($1) . ';'/ge ; $s=~ s/"/"/g ; $s=~ s//>/g ; return $s ; } # Unescape any &xxx; codes back to &"<> and return resulting string. # Simplified version here; only includes &"<> and "&#nnn"-type entities. # Some people accidentally leave off final ";", and some browsers support that # if the word ends there, so make the final ";" optional. # NOTE: IF YOU MODIFY THIS ROUTINE, then be sure to review and possibly # modify the corresponding routine _proxy_jslib_html_unescape() in the # JavaScript library, far below in the routine return_jslib(). It is # a Perl-to-JavaScript translation of this routine. sub HTMLunescape { my($s)= @_ ; $s=~ s/"\b;?/"/g ; $s=~ s/<\b;?//g ; $s=~ s/&#(x)?(\w+);?/ $1 ? chr(hex($2)) : chr($2) /ge ; $s=~ s/&\b;?/&/g ; # must be after all others return $s ; } # Base64-encode a string, except not inserting line breaks. sub base64 { my($s)= @_ ; my($ret, $p, @c, $t) ; # Base64 padding is done with "=", but that's in the first 64 characters. # So, use "@" as a placeholder for it until the tr/// statement. # For each 3 bytes, build a 24-bit integer and split it into 6-bit chunks. # Insert one or two padding chars if final substring is less than 3 bytes. while ($p>18, ($t>>12)%64, (@c>1) ? ($t>>6) %64 : 64, (@c>2) ? $t %64 : 64 ) ; # "@" is chr(64) } # Translate from bottom 64 chars into base64 chars, plus @ to = conversion. $ret=~ tr#\x00-\x3f@#A-Za-z0-9+/=# ; return $ret ; } # Opposite of base64() . sub unbase64 { my($s)= @_ ; my($ret, $p, @c, $t, $pad) ; $pad++ if $s=~ /=$/ ; $pad++ if $s=~ /==$/ ; $s=~ tr#A-Za-z0-9+/##cd ; # remove non-allowed characters $s=~ tr#A-Za-z0-9+/#\x00-\x3f# ; # for speed, translate to \x00-\x3f # For each 4 chars, build a 24-bit integer and split it into 8-bit bytes. # Remove one or two chars from result if input had padding chars. while ($p>16, ($t>>8) % 256, $t % 256 ) ; } chop($ret) if $pad>=1 ; chop($ret) if $pad>=2 ; return $ret ; } # Convert a string from UTF-16 encoding to UTF-8. sub un_utf16 { my($s)= @_ ; Encode::from_to($$s, "utf-16", "utf-8") ; # converts in-place } # Read an entire file into a string and return it; return undef on error. # Does NOT check for any security holes in $fname! sub readfile { my($fname)= @_ ; my($ret) ; local(*F, $/) ; open(F, "<$fname") || return undef ; undef $/ ; $ret= ; close(F) ; return $ret ; } sub random_string { my($len)= @_ ; my @chars= (0..9, 'a'..'z', 'A'..'Z') ; return join('', map { $chars[rand(scalar @chars)] } 1..$len) ; } # Simple, general-purpose HTTP client. The HTTP client in http_get() is too # specialized and non-modular to use for anything but the primary resource. # This leaves the connection open, i.e. a persistent connection, because that's # needed for the purpose this routine was written for (the external tests). # This routine expects a pointer to a hash containing "host", "port", "socket", # and "open" elements, plus a $request_uri string. In the hash, iff "open" # is false, then a new socket is opened, in the interest of persistent # connections. "host", "port", and "socket" (a string name of a filehandle) # are assumed to be unchanging. # Note that this HTTP client is missing many features, such as proxy support, # SSL support, and authentication. Eventually, http_get() may be restructured # to be more modular and support what we need here. # This is partially copied from http_get(). For more commenting, see that # routine, in the similar sections as below. sub http_get2 { my($c, $request_uri)= @_ ; my($s, $status, $status_code, $headers, $body, $footers, $rin, $win, $num_tries) ; local($/)= "\012" ; no strict 'refs' ; # needed for symbolic references # Using "$c->{socket}" causes syntax errors in some places, so alias it to $s. $s= $c->{socket} ; # For some reason, under mod_perl, occasionally the socket response is # empty. It may have something to do with the scope of the filehandles. # Work around it with this hack-- if such occurs, retry the routine up # to three times. RESTART: { # Create a new socket if a persistent one isn't lingering from last time. # Ideally we'd test eof() on the socket at the end of this routine, but # that may only fail after many seconds. So, here we assume the socket # is still usable if it's not '' and if we can write to it. vec($win= '', fileno($s), 1)= 1 if defined(fileno($s)) ; if (!$c->{open} || !select(undef, $win, undef, 0)) { &newsocketto($c->{socket}, $c->{host}, $c->{port}) ; $c->{open}= 1 ; } # Print the simple request. print $s 'GET ', $request_uri, " HTTP/1.1\015\012", 'Host: ', $c->{host}, (($c->{port}==80) ? '' : ":$c->{port}"), "\015\012", "\015\012" ; vec($rin= '', fileno($s), 1)= 1 ; select($rin, undef, undef, 60) || &HTMLdie("No response from $c->{host}:$c->{port}") ; $status= <$s> ; # hack hack.... unless ($status=~ m#^HTTP/#) { $c->{open}= 0 ; redo RESTART if ++$num_tries<3 ; &HTMLdie("Invalid response from $c->{host}: [$status]") ; } } # Loop to get $status and $headers until we get a non-100 response. # See comments in http_get(), above the similar block. do { ($status_code)= $status=~ m#^HTTP/\d+\.\d+\s+(\d+)# ; $headers= '' ; do { $headers.= $_= <$s> ; # $headers includes last blank line } until (/^(\015\012|\012)$/) || $_ eq '' ; #lines end w/ LF or CRLF $status= <$s> if $status_code == 100 ; # re-read for next iteration } until $status_code != 100 ; # Unfold long header lines, a la RFC 822 section 3.1.1 $headers=~ s/(\015\012|\012)[ \t]+/ /g ; # Read socket body depending on how length is determined; see RFC 2616 (the # HTTP 1.1 spec), section 4.4. if ($headers=~ /^Transfer-Encoding:[ \t]*chunked\b/mi) { ($body, $footers)= &get_chunked_body($s) ; &HTMLdie(&HTMLescape("Error reading chunked response from $c->{host} .")) unless defined($body) ; $headers=~ s/^Transfer-Encoding:[^\012]*\012?//mig ; $headers=~ s/^(\015\012|\012)/$footers$1/m ; } elsif ($headers=~ /^Content-Length:[ \t]*(\d+)/mi) { $body= &read_socket($s, $1) ; } else { undef $/ ; $body= <$s> ; # ergo won't be persistent connection close($s) ; $c->{open}= 0 ; } # If server doesn't support persistent connections, then close the socket. # We would test eof($s) here, but that causes a long wait. if ($headers=~ /^Connection:.*\bclose\b/mi || $status=~ m#^HTTP/1\.0#) { close($s) ; $c->{open}= 0 ; } return $body ; } #-------------------------------------------------------------------------- # Output routines #-------------------------------------------------------------------------- # Returns the complete HTML to be inserted at the top of a page, which may # consist of the URL entry form and/or a custom insertion in $INSERT_HTML # or $INSERT_FILE. # [Actually, this is only the insertion in the -- the URL form and # possibly the user's insertion-- not the JS insertion in the .] # As an important side effect, both %IN_CUSTOM_INSERTION and %in_mini_start_form # are set in set_custom_insertion() and mini_start_form(), respectively. # These are used later to handle certain JavaScript. # Note that any insertion should not have any relative URLs in it, because # there's no good base URL to resolve them with. See the comments where # $INSERT_HTML and $INSERT_FILE are set. # Use the global, persistent variable $CUSTOM_INSERTION to hold the custom # insertion from $INSERT_HTML or $INSERT_FILE. Set it the first time it's # needed (every time for a CGI script, once for a mod_perl script). This # minimizes how often an inserted file is opened and read. # $INSERT_HTML takes precedence over $INSERT_FILE. # The inserted entry form is never anonymized. sub full_insertion { my($URL, $in_top_frame)= @_ ; my($ret, $form, $insertion) ; $form= &mini_start_form($URL, $in_top_frame) if $e_insert_entry_form ; if (($INSERT_HTML ne '') || ($INSERT_FILE ne '')) { &set_custom_insertion if $CUSTOM_INSERTION eq '' ; # The insertion should not have relative URLs, but in case it does # provide a base URL of this script for lack of anything better. # It's erroneous, but it avoids unpredictable behavior. $url_start # is also required for proxify_html(), but it has already been set. # We can't do this only once to initialize, we must do this for each # run, because user config flags might change from run to run. # NOTE! If we don't use 0 in &proxify_html() here we'll recurse! if ($ANONYMIZE_INSERTION) { local($base_url)= $script_url ; &fix_base_vars ; $insertion= &proxify_html(\$CUSTOM_INSERTION,0) ; } else { $insertion= $CUSTOM_INSERTION ; } } $ret= $FORM_AFTER_INSERTION ? $insertion . $form : $form . $insertion ; my(%inc_by)= %in_mini_start_form ; foreach (keys %IN_CUSTOM_INSERTION) { $inc_by{$_}+= $IN_CUSTOM_INSERTION{$_} ; } $ret.= "\n" if %inc_by ; $ret= "\n
\n$ret
\n\n
\n" ; return $ret ; } # Returns the HTML needed for JavaScript support, the insertion into the # of the document. sub js_insertion { my($base_url_jsq, $default_script_type_jsq, $default_style_type_jsq, $p_cookies_are_banned_here, $p_doing_insert_here, $p_session_cookies_only, $p_cookie_path_follows_spec, $p_respect_three_dot_rule, $p_allow_unproxified_scripts, $p_use_db_for_cookies, $cookies_from_db_jsq) ; # Create JS double-quoted string of base URL and other vars. ($base_url_jsq= $base_url )=~ s/(["\\])/\\$1/g ; ($default_script_type_jsq= $default_script_type)=~ s/(["\\])/\\$1/g ; ($default_style_type_jsq= $default_style_type )=~ s/(["\\])/\\$1/g ; ($cookies_from_db_jsq= $USE_DB_FOR_COOKIES ? get_cookies_from_db($path, $host, $port, $scheme, 1) : '')=~ s/(["\\])/\\$1/g ; $p_cookies_are_banned_here= $cookies_are_banned_here ? 'true' : 'false' ; $p_doing_insert_here= $doing_insert_here ? 'true' : 'false' ; $p_session_cookies_only= $SESSION_COOKIES_ONLY ? 'true' : 'false' ; $p_cookie_path_follows_spec= $COOKIE_PATH_FOLLOWS_SPEC ? 'true' : 'false' ; $p_respect_three_dot_rule= $RESPECT_THREE_DOT_RULE ? 'true' : 'false' ; $p_allow_unproxified_scripts= $ALLOW_UNPROXIFIED_SCRIPTS ? 'true' : 'false' ; $p_use_db_for_cookies= $USE_DB_FOR_COOKIES ? 'true' : 'false' ; return '\n" . qq(\n) ; } # Set $CUSTOM_INSERTION from the correct source. Also set %IN_CUSTOM_INSERTION # according to its contents. This is needed for JavaScript handling, to # handle arrays like document.forms[] etc. that reference page elements in # order. Insertions at the top of the page throw these arrays off, so we # must compensate by incrementing those subscripts by the number of forms, # links, etc. in the top insertion. The counts in %IN_CUSTOM_INSERTION are # used for the custom insertion; elements in the inserted entry form are # handled elsewhere. # The relevant arrays in the document object are applets[], embeds[], forms[], # ids[], layers[], anchors[], images[], and links[]. The first five # correspond directly to HTML tag names; the last three must be handled # individually. The patterns below to detect and aren't # exact, but should work in almost all cases. The pattern to detect tags # isn't even perfect-- it fails on script blocks, etc. However, errors would # be rare and fairly harmless, and this whole situation is pretty rare anyway. sub set_custom_insertion { return if $CUSTOM_INSERTION ne '' ; return unless ($INSERT_HTML ne '') || ($INSERT_FILE ne '') ; # Read $CUSTOM_INSERTION from the appropriate source. $CUSTOM_INSERTION= ($INSERT_HTML ne '') ? $INSERT_HTML : &readfile($INSERT_FILE) ; # Now, set counts in %IN_CUSTOM_INSERTION. %IN_CUSTOM_INSERTION= () ; foreach (qw(applet embed form id layer)) { $IN_CUSTOM_INSERTION{$_.'s'}++ while $CUSTOM_INSERTION=~ /<\s*$_\b/gi ; } $IN_CUSTOM_INSERTION{anchors}++ while $CUSTOM_INSERTION=~ /<\s*a\b[^>]*\bname\s*=/gi ; $IN_CUSTOM_INSERTION{links}++ while $CUSTOM_INSERTION=~ /<\s*a\b[^>]*\bhref\s*=/gi ; $IN_CUSTOM_INSERTION{images}++ while $CUSTOM_INSERTION=~ /<\s*img\b/gi ; } # Print the footer common to most error responses sub footer { my($rightlink)= $NO_LINK_TO_START ? '' : qq(Restart) ; my $proxified_homepage= &HTMLescape(full_url('http://www.jmarshall.com/tools/cgiproxy/')) ; my $download_link= &HTMLescape(full_url("http://www.jmarshall.com/tools/cgiproxy/releases/cgiproxy.latest.tar.gz")) ; return <
CGIProxy $PROXY_VERSION (download) $rightlink

EOF } # Return the contents of the top frame, i.e. the one with whatever insertion # we have-- the entry form and/or the inserted HTML or file. sub return_top_frame { my($enc_URL)= @_ ; my($body, $insertion) ; my($date_header)= &rfc1123_date($now, 0) ; # Redirect any links to the top frame. Make sure any called routines know # this by setting $base_unframes. Also use $url_start_noframe to make # sure any links with a "target" attribute that are followed from an # anonymized insertion have the frame flag unset, and therefore have # their own correct insertion. local($base_unframes)= 1 ; local($url_start)= $url_start_noframe ; $body= &full_insertion(&wrap_proxy_decode($enc_URL), 1) ; my $response= < $body EOR my $cl= length($response) ; print $STDOUT <\n" ; my $encode_prefix= $ENV{HTTP_USER_AGENT}=~ /Chrome|Safari/ ? "\\x7f" : "\\x01" ; $onsubmit= qq( onsubmit="if (!this.URL.value.match(/^$encode_prefix/)) this.URL.value= '$encode_prefix'+_proxy_jslib_wrap_proxy_encode(this.URL.value) ; return true") ; $onload= qq( onload="document.URLform.URL.focus() ; if (document.URLform.URL.value.match(/^$encode_prefix/)) document.URLform.URL.value= _proxy_jslib_wrap_proxy_decode(document.URLform.URL.value.replace(/$encode_prefix/, ''))") ; } else { $jslib_block= $onsubmit= '' ; $onload= ' onload="document.URLform.URL.focus()"' ; } # Include checkboxes if user config is allowed. if ($ALLOW_USER_CONFIG) { my($rc_on)= $e_remove_cookies ? ' checked' : '' ; my($rs_on)= $e_remove_scripts ? ' checked' : '' ; my($fa_on)= $e_filter_ads ? ' checked' : '' ; my($br_on)= $e_hide_referer ? ' checked' : '' ; my($if_on)= $e_insert_entry_form ? ' checked' : '' ; $flags= <



EOF } my $response= < $jslib_block Start Using CGIProxy $msg

CGIProxy

Start browsing through this CGI-based proxy by entering a URL below. Only HTTP and FTP URLs are supported. Not all functions will work (e.g. some Java applets), but most pages will be fine. $flags

Manage cookies

EOR my $cl= length($response) ; print $STDOUT <[ UP ]) : '' ; # Alter various HTML depending on whether we're in the top frame or not. ($table_open, $table_close)= $in_top_frame ? ('', '') : ('
', '
') ; # Set global hash %in_mini_start_form according to how many each of applets, # embeds, form, ids, layers, anchors, images, and links there are in this # form. It's used for handling certain JavaScript, later. # This isn't a persistent variable because it could vary from run to run. %in_mini_start_form= ('forms', 1, 'links', (($up_path ne '') ? 2 : 1)) ; # Encode the URL before submitting, if so configured. Start it with "\x01" # or "\x7f" (depending on the browser) to indicate that it's encoded. # Possible clash when a page has another element named "URL"; revisit if needed. if ($ENCODE_URL_INPUT) { $needs_jslib= 1 ; my $encode_prefix= $ENV{HTTP_USER_AGENT}=~ /Chrome|Safari/ ? "\\x7f" : "\\x01" ; $onsubmit= qq( onsubmit="if (!this.URL.value.match(/^$encode_prefix/)) this.URL.value= '$encode_prefix'+_proxy_jslib_wrap_proxy_encode(this.URL.value) ; return true") ; $onfocus= qq( onfocus="if (this.value.match(/^$encode_prefix/)) this.value= _proxy_jslib_wrap_proxy_decode(this.value.replace(/\\$encode_prefix/, ''))") ; } else { $onsubmit= '' ; } # Display one of two forms, depending on whether user config is allowed. if ($ALLOW_USER_CONFIG) { my($rc_on)= $e_remove_cookies ? ' checked=""' : '' ; my($rs_on)= $e_remove_scripts ? ' checked=""' : '' ; my($fa_on)= $e_filter_ads ? ' checked=""' : '' ; my($br_on)= $e_hide_referer ? ' checked=""' : '' ; my($if_on)= $e_insert_entry_form ? ' checked=""' : '' ; # jsm-- remove for production release, plus in form below. my($safe_URL2) ; ($safe_URL2= $URL)=~ s/([^\w.-])/ '%' . sprintf('%02x',ord($1)) /ge ; $safe_URL2= "http://jmarshall.com/bugs/report.cgi?URL=$safe_URL2&version=$PROXY_VERSION&rm=$RUN_METHOD" ; $safe_URL2= &HTMLescape(&full_url($safe_URL2)) ; return <
$table_open   Location via proxy: $up_link  
[Report a bug]   [Manage cookies]                 $table_close
EOF # If user config isn't allowed, then show a different form. } else { return <
$table_open Location via proxy: $up_link   [Manage cookies] $table_close
EOF } } # Display cookies to the user and let user selectively delete them. # No expiration date is displayed because to make that available would # require embedding it in every cookie. sub manage_cookies { my($qs)= @_ ; my($return_url, $action, $clear_cookies_url, $cookie_rows, $auth_rows, $cookie_header_row, $from_tag) ; my(@cookies, @auths, $name, $value, $type, @n, $delete_cb, $cname, $path, $domain, $cvalue, $secure, $realm, $server, $username) ; my($date_header)= &rfc1123_date($now, 0) ; my(%in)= &getformvars($qs) ; # $in{'from'} is already proxy_encoded $return_url= &HTMLescape( $url_start . $in{'from'} ) ; $action= &HTMLescape( $url_start . &wrap_proxy_encode('x-proxy://cookies/update') ) ; # Create "clear cookies" link, preserving any query string. $clear_cookies_url= $url_start . &wrap_proxy_encode('x-proxy://cookies/clear') ; $clear_cookies_url.= '?' . $qs if $qs ne '' ; $clear_cookies_url= &HTMLescape($clear_cookies_url) ; # probably never necessary # Include from-URL in form if it's available. $from_tag= '' if $in{'from'} ne ''; # First, create $cookie_rows and $auth_rows from $ENV{'HTTP_COOKIE'}. # Note that the "delete" checkboxes use the encoded name as their identifier. # With minor rewriting, this could sort cookies e.g. by server. Is that # preferred? Note that the order of cookies in $ENV{'HTTP_COOKIE'} has # meaning. foreach ( split(/\s*;\s*/, $ENV{'HTTP_COOKIE'}) ) { ($name, $value)= split(/=/, $_, 2) ; # $value may contain "=" $delete_cb= '' ; $name= &cookie_decode($name) ; $value= &cookie_decode($value) ; ($type, @n)= split(/;/, $name) ; if ($type eq 'COOKIE') { next if $USE_DB_FOR_COOKIES ; ($cname, $path, $domain)= @n ; ($cvalue, $secure)= split(/;/, $value) ; push(@cookies, {delete_cb => $delete_cb, domain => $domain, path => $path, name => $cname, value => $cvalue, secure => $secure}) ; } elsif ($type eq 'AUTH') { # format of auth cookie's name is AUTH;$enc_realm;$enc_server ($realm, $server)= @n ; $realm=~ s/%([\da-fA-F]{2})/ pack('C', hex($1)) /ge ; $server=~ s/%([\da-fA-F]{2})/ pack('C', hex($1)) /ge ; ($username)= split(/:/, &unbase64($value)) ; push(@auths, {delete_cb => $delete_cb, server => $server, username => $username, realm => $realm}) ; } } # Grab cookies from the database if using it for cookies. if ($USE_DB_FOR_COOKIES) { @cookies= get_all_cookies_from_db() ; $_->{delete_cb}= '{domain};$_->{path};$_->{name}") . '">' foreach @cookies ; } @cookies= sort {$a->{domain} cmp $b->{domain} or $a->{path} cmp $b->{path} or $a->{name} cmp $b->{name}} @cookies ; @auths= sort {$a->{server} cmp $b->{server} or $a->{realm} cmp $b->{realm} or $a->{username} cmp $b->{username}} @auths ; # Set $cookie_rows and $auth_rows, with defaults as needed. if ($USE_DB_FOR_COOKIES) { $cookie_header_row= <Delete this cookie? For server names ending in: ... and a path starting with: Expires Secure? HTTP only? Cookie name Value EOH $cookie_rows= join('', map {sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n", $_->{delete_cb}, &HTMLescape($_->{domain}), &HTMLescape($_->{path}), &HTMLescape($_->{expires}) || '(session)', $_->{secure} ? 'Yes' : 'No', $_->{httponly} ? 'Yes' : 'No', &HTMLescape($_->{name}), &HTMLescape($_->{value}) )} @cookies) ; # If $cookie_rows is empty, set appropriate message. $cookie_rows= " 
You are not currently sending any cookies through this proxy.
 \n" if $cookie_rows eq '' ; } else { $cookie_header_row= <Delete this cookie? For server names ending in: ... and a path starting with: Secure? Cookie name Value EOH $cookie_rows= join('', map {sprintf("%s\n%s\n%s\n%s\n%s\n%s\n", $_->{delete_cb}, &HTMLescape($_->{domain}), &HTMLescape($_->{path}), $_->{secure} ? 'Yes' : 'No', &HTMLescape($_->{name}), &HTMLescape($_->{value}) )} @cookies) ; # If $cookie_rows is empty, set appropriate message. $cookie_rows= " 
You are not currently sending any cookies through this proxy.
 \n" if $cookie_rows eq '' ; } $auth_rows= join('', map {sprintf("%s\n%s\n%s\n%s\n", $_->{delete_cb}, &HTMLescape($_->{server}), &HTMLescape($_->{realm}), &HTMLescape($_->{username}) )} @auths) ; $auth_rows= " 
You are not currently authenticated to any sites through this proxy.
 \n" if $auth_rows eq '' ; my $response= < CGIProxy Cookie Management

Return to browsing

Delete all cookies

Here are the cookies you're using through CGIProxy:

$from_tag

$cookie_header_row $cookie_rows

Authentication cookies:

$auth_rows
Delete this cookie? Server User Realm

EOR my $cl= length($response) ; print $STDOUT < Enter username and password for $realm at $server

Authorization Required

$msg
Enter username and password for $realm at $server:
Username:
Password:    

This requires cookie support turned on in your browser.

Note: Anytime you use a proxy, you're trusting the owner of that proxy with all information you enter, including your name and password here. This is true for any proxy, not just this one. EOR my $cl= length($response) ; print $STDOUT < WARNING: Entering non-anonymous area!

WARNING: Entering non-anonymous area!

This proxy only supports HTTP and FTP. Any browsing to another URL will be directly from your browser, and no longer anonymous.

Follow the link below to exit your anonymous browsing session, and continue to the URL non-anonymously.

$URL
EOR my $cl= length($response) ; print $STDOUT < WARNING: SSL not supported, entering non-anonymous area!

WARNING: SSL not supported, entering non-anonymous area!

This proxy as installed does not support SSL, i.e. URLs that start with "https://". To support SSL, the proxy administrator needs to install the Net::SSLeay Perl module (perhaps by running "nph-proxy.cgi install-modules"), and then this proxy will automatically support SSL (the CGIProxy site has more info). In the meantime, any browsing to an "https://" URL will be directly from your browser, and no longer anonymous.

Follow the link below to exit your anonymous browsing session, and continue to the URL non-anonymously.

$URL
EOR my $cl= length($response) ; print $STDOUT < Compressed content not supported, but was sent by server.

Compressed content not supported, but was sent by server.

The server at $host:$port replied with compressed content, even though it was told not to. That server is either misconfigured, or has a bug.

To support compressed content, the proxy administrator needs to install the IO::Compress::Gzip Perl package-- perhaps by running "nph-proxy.cgi install-modules"-- and then this proxy will automatically support it. (Note that the IO::Compress::Gzip package is already included in Perl 5.9.4 or later.) EOR my $cl= length($response) ; print $STDOUT < The proxy can't access that server, sorry.

The proxy can't access that server, sorry.

The owner of this proxy has restricted which servers it can access, presumably for security or bandwidth reasons. The server you just tried to access is not on the list of allowed servers. EOR my $cl= length($response) ; print $STDOUT < You are not allowed to use this proxy, sorry.

You are not allowed to use this proxy, sorry.

The owner of this proxy has restricted which users are allowed to use it. Based on your IP address, you are not an authorized user. EOR my $cl= length($response) ; print $STDOUT < Proxy cannot loop back through itself

Proxy cannot loop back through itself

The URL you tried to access would cause this proxy to access itself, which is redundant and probably a waste of resources. The owner of this proxy has configured it to disallow such looping.

Rather than telling the proxy to access the proxy to access the desired resource, try telling the proxy to access the resource directly. The link below may do this.

$URL
EOR my $cl= length($response) ; print $STDOUT < Retrieval of secure URLs through a non-secure proxy is forbidden.

Retrieval of secure URLs through a non-secure proxy is forbidden.

This proxy is running on a non-secure server, which means that retrieval of pages from secure servers is not permitted. The danger is that the user and the end server may believe they have a secure connection between them, while in fact the link between the user and this proxy is insecure and eavesdropping may occur. That's why we have secure servers, after all.

This proxy must run on a secure server before being allowed to retrieve pages from other secure servers. EOR my $cl= length($response) ; print $STDOUT < Script content blocked

Script content blocked

The resource you requested (or were redirected to without your knowledge) is apparently an executable script. Such resources have been blocked by this proxy, presumably for your own protection.

Even if you're sure you want the script, you can't get it through this proxy the way it's configured. If permitted, try browsing through this proxy without removing scripts. Otherwise, you'll need to reconfigure the proxy or find another way to get the resource. EOR my $cl= length($response) ; print $STDOUT < Proxy cannot forward non-text files

Proxy cannot forward non-text files

Due to bandwidth limitations, the owner of this particular proxy is forwarding only text files. For best results, turn off automatic image loading if your browser lets you.

If you need access to images or other binary data, route your browser through another proxy (or install one yourself-- it's easy). EOR my $cl= length($response) ; print $STDOUT < Page uses UTF-16 encoding, which is unsupported by this version of Perl

Page uses UTF-16 encoding, which is unsupported by this version of Perl

The page you requested appears to be in Unicode's UTF-16 format. This is not supported by the version of Perl running on this server (more exactly, the "Encode" Perl module could not be found).

To support UTF-16, please upgrade to Perl version 5.8.0 or later. EOR my $cl= length($response) ; print $STDOUT < Page has malformed Unicode

Page has malformed Unicode

This page says it's using the charset "$charset", but the content could not be correctly decoded as that charset. Please notify the owner of the page in question. EOR my $cl= length($response) ; print $STDOUT < $title

$title

$msg

EOR my $cl= length($response) ; print $STDOUT < blocks, which # browsers try to work around. :P For now, remove one-line HTML # comments and declarations from the start of a script block. 1 while ($in=~ s/^\s*(?:\s*)+// or $in=~ s/^\s*(?:\s*)+// ) ; # MSIE fails when uncommented "-->" is encountered in the middle of a # script, like when we insert "_proxy_jslib_flush_write_buffers()" at # the end. Thus, remove leading "". # Also remove the remainder of the first line after the "\s*$/$1/s ; # Note that these patterns contain an embedded set of parentheses that # only match if the input element is a token. # Correction: Because of Perl's long-string-literal bug, there are two # additional sets of embedded parentheses, which may match /'/ or /"/ . OUTER: while ($div_ok ? $in=~ /\G($RE_JS_INPUT_ELEMENT_DIV)/gco : $in=~ /\G($RE_JS_INPUT_ELEMENT_REG_EXP)/gco) { ($element, $token, $closequote1, $closequote2)= ($1, $2, $3, $4) ; # To work around Perl's long-string-literal bug, read in rest of # string literal if needed. if ($token=~ /^['"]/ && !$closequote1 && !$closequote2) { last unless &get_string_literal_remainder(\$in, \$token) ; $element= $token ; } # If a token was gotten, then set $div_ok according to the token. # Until we get a more complete parser, this is a pretty good guess. # Note that here, "token" also includes DivPunctuator and # RegularExpressionLiteral. # DivPunctuator may come after: certain reserved words, identifiers, # the four punctuators ") ] ++ --", numeric and string literals, # and regular expression literals. To match identifiers but not # the wrong reserved words, it's probably easier to include all # identifiers, then just exclude those reserved words which may # precede RegularExpressionLiteral. The last line of the pattern # below tests the start of the token for several possible token # types, combined into one pattern. # Reserved words that may precede DivPunctuator are qw(this null true false); # reserved words that may precede RegularExpressionLiteral are # qw(case delete do else in instanceof new return throw typeof void). # NOTE: We no longer use this regex here, but instead set $div_ok # in each appropriate block of code below. This saves about 5% # of the entire call to proxify_js(). (We still use the regex in # get_next_js_expr(), however.) #if (defined($token)) { # $div_ok= $token=~ m#^(?:\)|\]|\+\+|--)$| # ^(?!(?:case|delete|do|else|in|instanceof|new|return|throw|typeof|void)$) # (?:\pL|[\$_\\0-9'"]|\.\d|/..)#x ; #} $newline_since_last_token= 1 if $element=~ /^$RE_JS_LINE_TERMINATOR$/o ; $new_last_token= '' ; # Keep track of whether we're in a function, to correctly handle returns. $in_braces++ if $token eq '{' ; $in_braces-- if $token eq '}' ; $in_func= 0 if $in_braces==0 ; # Now, handle cases depending on value of $token. # Only allow whitespace within a term, not comments, or else removing # the final "." gets messy later. Don't remove white space # altogether, since it's needed to separate tokens correctly. Line # terminators also have to be preserved, for the sake of automatic # semicolon insertion and other syntactic constructs. if ($token eq '') { if ($term_so_far ne '') { if ($element=~ /$RE_JS_LINE_TERMINATOR/o) { $term_so_far.= "\n" ; } else { $term_so_far.= ' ' ; } } else { push(@out, $element) ; } # Increment identifiers from other libraries, to allow chaining of # multiple proxies and to close a privacy hole. } elsif ($token=~ s/^_proxy(\d*)_/'_proxy'.($1+1).'_'/e) { $term_so_far.= $token ; $div_ok= 1 ; # Treat these as beginning a term. # Due to Perl's long-string-literal bug, string literals are matched # by /^['"]/ rather than by $RE_JS_STRING_LITERAL. #} elsif ($token=~ /^(?:$RE_JS_NUMERIC_LITERAL|$RE_JS_STRING_LITERAL|$RE_JS_REGULAR_EXPRESSION_LITERAL)$/o) { } elsif ($token=~ /^(?:$RE_JS_NUMERIC_LITERAL|$RE_JS_REGULAR_EXPRESSION_LITERAL)$/o or $token=~ /^['"]/) { push(@out, $prefix, $term_so_far) ; $prefix= '' ; $term_so_far= $token ; $div_ok= 1 ; # Now all input elements are handled except identifiers (including # reserved words) and all punctuators (including DivPunctuator). # All punctuators end a term except for .[(, which each need a special # block here to handle them; all punctuators that are # AssignmentOperator or ++/-- must also be handled specially. # Handle increment and decrement operators, and "delete", using this # simplification: ++/-- is post- if there's a term so far and # not a newline since the last token, and pre- otherwise. # Pre- operators become the "prefix" parameter in the call to # _proxy_jslib_assign(); with post- operators, $prefix and # $term_so_far are pushed onto @out, then the operator itself. # Note that $term_so_far may have already been transformed during # the processing of a previous token. # Handle case when parentheses surround the term, e.g. "delete(a.b)" . } elsif ($token eq '++' or $token eq '--' or $token eq 'delete') { # Handle "-->" instead of "--" if needed. if ($token eq '--' and $in=~ /\G\s*>/gco) { push(@out, $prefix, $term_so_far, '-->') ; $prefix= $term_so_far= '' ; } elsif (($term_so_far ne '') and !$newline_since_last_token) { push(@out, $prefix, $term_so_far, $token) ; $prefix= $term_so_far= '' ; $div_ok= 1 ; } else { push(@out, $prefix, $term_so_far) ; $prefix= $term_so_far= '' ; my $start_paren= $in=~ /\G$RE_JS_SKIP*\(/gco ; my($o, $p)= &get_next_js_term(\$in) ; last unless defined($p) ; last if $start_paren and !($in=~ /\G$RE_JS_SKIP*\)/gco) ; if ($o ne '') { push(@out, " _proxy_jslib_assign('$token', (" . (&proxify_js($o, 0, $with_level))[0] . "), (" . (&proxify_js($p, 0, $with_level))[0] . "), '')" ) ; } else { # Note that $p is guaranteed to be a quoted identifier here. $p=~ s/^'|'$//g ; if ($token eq 'delete') { push(@out, "delete $p"); } else { push(@out, "($p= _proxy_jslib_assign_rval('$token', '$p', '', '', (typeof $p=='undefined' ? void 0 : $p)))") ; } } $div_ok= 1 ; # $prefix= $token ; } # eval() is a special case. It should normally be followed by a # parenthesis, in which case we transform "eval(expr)" into # "eval(_proxy_jslib_proxify_js(expr))". # If it's not followed by a parenthesis, then that means the code # is probably trying to assign something to the eval function itself. # By spec, this may be treated as an error. We handle it in the # next block using _proxy_jslib_handle(), though imperfectly (e.g. # when eval is replaced by a function, local variables are no longer # in scope). # When its argument is not a primitive string, eval() returns its # argument unchanged, which mucks this code up a bit. As an imperfect # solution, this is handled in _proxy_jslib_proxify_js(), by having it # return its argument unchanged if it's not a string. } elsif (($token eq 'eval') && $in=~ /\G($RE_JS_SKIP*\()/gco) { $needs_jslib= 1 ; $term_so_far.= $token . $1 . '_proxy_jslib_proxify_js((' . (&proxify_js(&get_next_js_expr(\$in,1), 0, $with_level))[0] . "), 0, $with_level) )" ; last unless $in=~ /\G\)/gc ; $div_ok= 1 ; # Here, explicitly handle all properties and methods that need special # treatment. Property names only are listed, and sorted out in the # all-purpose routines _proxy_jslib_handle(), _proxy_jslib_assign(), # and _proxy_jslib_assign_rval(). # For document.write() and document.writeln(), note that the writing of # one e.g. tag can be split among several write() statements. So for # the parsing of its output to happen correctly, for each JS block we # accumulate a buffer of all write() output and then proxify and flush # it as much as possible, leaving the remainder (e.g. a partial tag or # an incomplete