diff options
| -rw-r--r-- | .gitmodules | 6 | ||||
| -rw-r--r-- | Reaktor/Makefile | 8 | ||||
| m--------- | Reaktor/repos/dnsrecon | 0 | ||||
| m--------- | Reaktor/repos/gxfr | 0 | ||||
| -rw-r--r-- | Reaktor/repos/gxfr/gxfr.py | 235 | ||||
| -rw-r--r-- | Reaktor/repos/gxfr/here.csv | 5 | ||||
| -rw-r--r-- | punani/Makefile | 6 | ||||
| -rwxr-xr-x | punani/autostart/punani-debian | 102 | 
8 files changed, 121 insertions, 241 deletions
| diff --git a/.gitmodules b/.gitmodules index 2823cad5..296d7099 100644 --- a/.gitmodules +++ b/.gitmodules @@ -19,3 +19,9 @@  [submodule "submodules/github/jbalogh/python-irclib"]  	path = submodules/github/jbalogh/python-irclib  	url = https://github.com/jbalogh/python-irclib.git +[submodule "Reaktor/repos/gxfr"] +	path = Reaktor/repos/gxfr +	url = https://github.com/makefu/gxfr.git +[submodule "Reaktor/repos/dnsrecon"] +	path = Reaktor/repos/dnsrecon +	url = https://github.com/makefu/dnsrecon.git diff --git a/Reaktor/Makefile b/Reaktor/Makefile index 2b121435..f608db90 100644 --- a/Reaktor/Makefile +++ b/Reaktor/Makefile @@ -1,4 +1,12 @@ +init: +	cd ..;git submodule init; git submodule update +	cd repos/gxfr/; git checkout master; git pull +	cd repos/dnsrecon; git checkout master; git pull + + +  debian-autostart: +	useradd reaktor ||:  	cp autostart/reaktor-debian /etc/init.d/reaktor  	cp autostart/reaktor /etc/default/  	update-rc.d reaktor defaults diff --git a/Reaktor/repos/dnsrecon b/Reaktor/repos/dnsrecon new file mode 160000 +Subproject 31de30e4f6674585676c841c5612a330c22de94 diff --git a/Reaktor/repos/gxfr b/Reaktor/repos/gxfr new file mode 160000 +Subproject 4606858e7814189c527ba912e1d8575248f719d diff --git a/Reaktor/repos/gxfr/gxfr.py b/Reaktor/repos/gxfr/gxfr.py deleted file mode 100644 index 819f0b11..00000000 --- a/Reaktor/repos/gxfr/gxfr.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/python -tt - -# gxfr replicates dns zone transfers by enumerating subdomains using advanced search engine queries and conducting dns lookups. -# By Tim Tomes (LaNMaSteR53) -# Available for download at http://LaNMaSteR53.com or http://code.google.com/p/gxfr/ - -import sys, os.path, urllib, urllib2, re, time, socket, random, socket - - -def help(): -  print """  Syntax: ./gxfr.py domain [options] -   -  -h, --help               this screen -  -v                       enable verbose mode -  -t [num of seconds]      set number of seconds to wait between queries (default=15) -  -q [max num of queries]  restrict to maximum number of queries (default=0, indefinite) -  --dns-lookup             enable dns lookups of all subdomains -  --proxy [file|ip:port|-] use a proxy or list of open proxies to send queries (@random w/list) -                             - [file] must consist of 1 or more ip:port pairs -                             - replace filename with '-' (dash) to accept stdin -  --user-agent ['string']  set custom user-agent string -  --timeout [seconds]      set socket timeout (default=system default) -  --csv [file] -   -  Examples:  -  $ ./gxfr.py foxnews.com --dns-lookup -v -  $ ./gxfr.py foxnews.com --dns-lookup --proxy open_proxies.txt --timeout 10 -  $ ./gxfr.py foxnews.com --dns-lookup -t 5 -q 5 -v --proxy 127.0.0.1:8080 -  $ curl http://rmccurdy.com/scripts/proxy/good.txt | ./gxfr.py website.com -v -t 3 --proxy - -  """ -  sys.exit(2) - -if len(sys.argv) < 2: -  help() - -if '-h' in sys.argv or '--help' in sys.argv: -  help() - -# declare vars and process arguments -query_cnt = 0 -csvname = False -domain = sys.argv[1] -sys.argv = sys.argv[2:] -lookup = False -encrypt = True -base_url = 'https://www.google.com' -base_uri = '/m/search?' -base_query = 'site:' + domain -pattern = '>([\.\w-]*)\.%s.+?<' % (domain) -proxy = False -user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)' -verbose = False -secs = 15 -max_queries = 10 # default = 10 queries -# process command line arguments -if len(sys.argv) > 0: -  if '--dns-lookup' in sys.argv: -    lookup = True -  if '--csv' in sys.argv: -    csvname = sys.argv[sys.argv.index('--csv') + 1] -  if '--proxy' in sys.argv: -    proxy = True -    filename = sys.argv[sys.argv.index('--proxy') + 1] -    if filename == '-': -      proxies = sys.stdin.read().split()      -    elif os.path.exists(filename): -      content = open(filename).read() -      proxies = re.findall('\d+\.\d+\.\d+\.\d+:\d+', content) -    elif re.match(r'^\d+\.\d+\.\d+\.\d+:\d+$', filename): -      proxies = [filename] -    else: -      help() -  if '--timeout' in sys.argv: -    timeout = int(sys.argv[sys.argv.index('--timeout') + 1]) -    socket.setdefaulttimeout(timeout) -  if '--user-agent' in sys.argv: -    user_agent = sys.argv[sys.argv.index('--user-agent') + 1]   -  if '-v' in sys.argv: -    verbose = True -  if '-t' in sys.argv: -    secs = int(sys.argv[sys.argv.index('-t') + 1]) -  if '-q' in sys.argv: -    max_queries = int(sys.argv[sys.argv.index('-q') + 1]) -subs = [] -new = True -page = 0 - -# --begin-- -print '[-] domain:', domain -print '[-] user-agent:', user_agent -# execute search engine queries and scrape results storing subdomains in a list -print '[-] querying search engine, please wait...' -# loop until no new subdomains are found -while new == True: -  try: -    query = '' -    # build query based on results of previous results -    for sub in subs: -      query += ' -site:%s.%s' % (sub, domain) -    full_query = base_query + query -    start_param = '&start=%s' % (str(page*10)) -    query_param = 'q=%s' % (urllib.quote_plus(full_query)) -    if len(base_uri) + len(query_param) + len(start_param) < 2048: -      last_query_param = query_param -      params = query_param + start_param -    else: -      params = last_query_param[:2047-len(start_param)-len(base_uri)] + start_param -    full_url = base_url + base_uri + params -    # note: query character limit is passive in mobile, but seems to be ~794 -    # note: query character limit seems to be 852 for desktop queries -    # note: typical URI max length is 2048 (starts after top level domain) -    if verbose: print '[+] using query: %s...' % (full_url) -    # build web request and submit query -    request = urllib2.Request(full_url) -    # spoof user-agent string -    request.add_header('User-Agent', user_agent) -    # if proxy is enabled, use the correct handler -    if proxy == True: -      # validate proxies at runtime -      while True: -        try: -          # select a proxy from list at random -          num = random.randint(0,len(proxies)-1) -          host = proxies[num] -          opener = urllib2.build_opener(urllib2.ProxyHandler({'http': host})) -          if verbose: print '[+] sending query to', host -          # send query to proxy server -          result = opener.open(request).read() -          # exit while loop if successful -          break -        except Exception as inst: -          print '[!] %s failed: %s' % (host, inst) -          if len(proxies) == 1: -            # exit of no proxy servers from list are valid -            print '[-] valid proxy server not found' -            sys.exit(2) -          else: -            # remove host from list of proxies and try again -            del proxies[num] -    else: -      opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler()) -      # send query to search engine -      try: -        result = opener.open(request).read() -      except Exception as inst: -        print '[!] {0}'.format(inst) -        if str(inst).index('503') != -1: print '[!] possible shun: use --proxy or find something else to do for 24 hours :)' -        sys.exit(2) -    if not verbose: sys.stdout.write('.'); sys.stdout.flush() -    #if not verbose: sys.stdout.write('\n'); sys.stdout.flush() -    # iterate query count -    query_cnt += 1 -    sites = re.findall(pattern, result) -    # create a uniq list -    sites = list(set(sites)) -    new = False -    # add subdomain to list if not already exists -    for site in sites: -      if site not in subs: -        if verbose: print '[!] subdomain found:', site -        subs.append(site) -        new = True -    # exit if maximum number of queries has been made -    if query_cnt == max_queries: -      print '[-] maximum number of queries made...' -      break -    # start going through all pages if querysize is maxed out -    if new == False: -      # exit if all subdomains have been found -      if not 'Next page' in result: -        #import pdb; pdb.set_trace() # curl to stdin breaks pdb -        print '[-] all available subdomains found...' -        break -      else: -        page += 1 -        new = True -        if verbose: print '[+] no new subdomains found on page. jumping to result %d.' % (page*10) -    # sleep script to avoid lock-out -    if verbose: print '[+] sleeping to avoid lock-out...' -    time.sleep(secs) -  except KeyboardInterrupt: -    # catch keyboard interrupt and gracefull complete script -    break - -# print list of subdomains -print '[-] successful queries made:', str(query_cnt) -if verbose: -  # rebuild and display final query if in verbose mode -  #final_query = '' -  #for sub in subs: -  #  final_query += '+-site:%s.%s' % (sub, domain) -  #print '[+] final query string: %sstart=%s&%s%s' % (base_url, str(page*10), base_query, query) -  print '[+] final query string: %s' % (full_url) -print ' ' -print '[subdomains] -', str(len(subs)) -csvwriter = False -try: -  if csvname: -    import csv -    csvwriter = csv.writer(open(csvname,'wb')) -except: -  print "[!] Cannot open CSV" -for sub in subs:  -  dom = '%s.%s' % (sub, domain ) -  hostname,aliases,ips = socket.gethostbyname_ex(dom) -  #print hostname,aliases,ip -  print dom,",".join(ips)  -  try: -    line = [dom] + ips -    csvwriter.writerow([dom] + ips) -  except: pass - - -# conduct dns lookup if argument is present -if lookup == True: -  print ' ' -  print '[-] querying dns, please wait...' -  dict = {} -  # create a dictionary where the subdomain is the key and a list of all associated ips is the value -  for sub in subs: -    sub = '%s.%s' % (sub, domain) -    if verbose: print '[+] querying dns for %s...' % (sub) -    # dns query and dictionary assignment -    try: -      dict[sub] = list(set([item[4][0] for item in socket.getaddrinfo(sub, 80)])) -    except socket.gaierror: -      # dns lookup failure -      dict[sub] = list(set(['no entry'])) -  # print table of subdomains and ips -  print ' ' -  print '[ip]'.ljust(16, ' ') + '[subdomain]' -  for key in dict.keys(): -    for ip in dict[key]: -      print ip.ljust(16, ' ') + key -# --end-- diff --git a/Reaktor/repos/gxfr/here.csv b/Reaktor/repos/gxfr/here.csv deleted file mode 100644 index 95faaa9c..00000000 --- a/Reaktor/repos/gxfr/here.csv +++ /dev/null @@ -1,5 +0,0 @@ -mobile.foxnews.com,72.5.158.94
 -video.foxnews.com,2.20.180.43,2.20.180.96
 -www.foxnews.com,2.20.180.96,2.20.180.34
 -latino.foxnews.com,2.20.180.72,2.20.180.26
 -ureport.foxnews.com,69.90.218.153
 diff --git a/punani/Makefile b/punani/Makefile index 26057d00..f444b1fc 100644 --- a/punani/Makefile +++ b/punani/Makefile @@ -1,5 +1,9 @@  install: ../bin/punani - +	  ../bin/punani:  	ln -snvf ../punani/bin/punani ../bin/punani +debian: +	useradd punani||: +	cp autostart/punani-debian /etc/init.d/punani +	update-rc.d punani defaults diff --git a/punani/autostart/punani-debian b/punani/autostart/punani-debian new file mode 100755 index 00000000..53db0336 --- /dev/null +++ b/punani/autostart/punani-debian @@ -0,0 +1,102 @@ +#!/bin/sh +# uses template from /etc/init.d/skeleton +### BEGIN INIT INFO +# Provides:          punani +# Required-Start:     +# Required-Stop:      +# Default-Start:     2 3 4 5 +# Default-Stop:      0 1 6 +# Short-Description: punani +# Description:       starts punani daemon +#                     +### END INIT INFO + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +NAME=punani +USER=punani +DESC="$NAME daemon" +DAEMON=/usr/bin/python +DAEMON_DIR="/krebs/punani" +DAEMON_ARGS="${DAEMON_DIR}/index.py" +PIDFILE=/var/run/$NAME.pid +SCRIPTNAME=/etc/init.d/$NAME + +[ -x "$DAEMON" ] || exit 0 +[ -r /etc/default/$NAME ] && . /etc/default/$NAME +. /lib/init/vars.sh +. /lib/lsb/init-functions + +do_start() +{ +  #   0 if daemon has been started +  #   1 if daemon was already running +  #   2 if daemon could not be started +  start-stop-daemon -b -d $DAEMON_DIR -c $USER --start --quiet --make-pidfile --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \ +          || return 1 +  start-stop-daemon -b -d $DAEMON_DIR -c $USER --start --quiet --make-pidfile --pidfile $PIDFILE --exec $DAEMON -- \ +      $DAEMON_ARGS \ +      || return 2 +} + +do_stop() +{ +  #   0 if daemon has been stopped +  #   1 if daemon was already stopped +  #   2 if daemon could not be stopped +  start-stop-daemon --stop --retry=TERM/30/KILL/5 --pidfile $PIDFILE +  RETVAL="$?" +  [ "$RETVAL" = 2 ] && return 2 +  rm -f $PIDFILE +  return "$RETVAL" +} + +do_reload() { +  start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE +  return 0 +} + +case "$1" in +  start) +  [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME" +  do_start +  case "$?" in +      0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; +      2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; +  esac +  ;; +  stop) +  [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME" +  do_stop +  case "$?" in +      0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; +      2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; +  esac +  ;; +  status) +       status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $? +       ;; +  restart|force-reload) +  log_daemon_msg "Restarting $DESC" "$NAME" +  do_stop +  case "$?" in +    0|1) +      do_start +      case "$?" in +          0) log_end_msg 0 ;; +          1) log_end_msg 1 ;; +          *) log_end_msg 1 ;; +      esac +      ;; +    *) +      # Failed to stop +      log_end_msg 1 +      ;; +  esac +  ;; +  *) +  echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2 +  exit 3 +  ;; +esac + +: | 
