Starting the change from python2 to python3.7

This commit is contained in:
NotoriousRebel 2018-11-14 22:13:52 -05:00
parent 617eb58699
commit 2a20d3ff47
28 changed files with 1227 additions and 1129 deletions

8
Dockerfile Normal file
View file

@ -0,0 +1,8 @@
FROM python:2-alpine
RUN mkdir /app
RUN pip install requests
WORKDIR /app
COPY . /app
RUN chmod +x *.py
ENTRYPOINT ["/app/theHarvester.py"]
CMD ["--help"]

View file

@ -63,7 +63,7 @@ The sources are:
* shodan: Shodan Computer search engine, will search for ports and banner of the
discovered hosts (http://www.shodanhq.com/)
* hunter: Hunter search engine
* hunter: Hunter search engine (you need to add your Key in the discovery/huntersearch.py file)
Active:
-------
@ -104,6 +104,8 @@ https://github.com/laramies/theHarvester
Thanks:
-------
* Matthew Brown @NotoriousRebel
* Janos Zold @Jzod
* John Matherly - SHODAN project
* Lee Baird for suggestions and bugs reporting
* Ahmed Aboul Ela - subdomain names dictionary (big and small)

39
censysparser.py Normal file
View file

@ -0,0 +1,39 @@
from bs4 import BeautifulSoup
import re
class parser:
def __init__(self, results):
self.results = results
self.ipaddresses = []
self.soup = BeautifulSoup(results.results,features="html.parser")
self.hostnames = []
self.numberofpages = 0
def search_hostnames(self):
try:
hostnamelist = self.soup.findAll('tt')
for hostnameitem in hostnamelist:
self.hostnames.append(hostnameitem.text)
return self.hostnames
except Exception,e:
print("Error occurred: " + e)
def search_ipaddresses(self):
try:
ipaddresslist = self.soup.findAll('a','SearchResult__title-text')
for ipaddressitem in ipaddresslist:
self.ipaddresses.append(ipaddressitem.text.strip())
return self.ipaddresses
except Exception,e:
print("Error occurred: " + e)
def search_numberofpages(self):
try:
items = self.soup.findAll(href=re.compile("page"))
for item in items:
if (item.text !='next'): #to filter out pagination
self.numberofpages+=1
return self.numberofpages
except Exception,e:
print("Error occurred: " + e)

View file

@ -13,12 +13,11 @@
import string
import types
import time
import Type
import Class
import Opcode
import discovery.DNS.Type as Type
import discovery.DNS.Class as Class
import discovery.DNS.Opcode as Opcode
import asyncore
class DNSError(Exception):
pass
@ -33,7 +32,7 @@ def ParseResolvConf(resolv_path):
try:
lines = open(resolv_path).readlines()
except:
print "error in path" + resolv_path
print("error in path" + resolv_path)
for line in lines:
line = string.strip(line)
if not line or line[0] == ';' or line[0] == '#':
@ -68,7 +67,9 @@ class DnsRequest:
def __init__(self, *name, **args):
self.donefunc = None
self.async = None
#fix maybe?
self.asyn= False
#self.async = None #TODO FIX async is a keyword
self.defaults = {}
self.argparse(name, args)
self.defaults = self.args
@ -172,11 +173,11 @@ def req(self, *name, **args):
else:
qtype = self.args['qtype']
if 'name' not in self.args:
print self.args
print(self.args)
raise DNSError('nothing to lookup')
qname = self.args['name']
if qtype == Type.AXFR:
print 'Query type AXFR, protocol forced to TCP'
print('Query type AXFR, protocol forced to TCP')
protocol = 'tcp'
# print 'QTYPE %d(%s)' % (qtype, Type.typestr(qtype))
m = Lib.Mpacker()
@ -193,7 +194,7 @@ def req(self, *name, **args):
self.sendTCPRequest(server)
except socket.error as reason:
raise DNSError(reason)
if self.async:
if self.asyn:
return None
else:
return self.response
@ -208,7 +209,7 @@ def sendUDPRequest(self, server):
#self.s.connect((self.ns, self.port))
self.conn()
self.time_start = time.time()
if not self.async:
if not self.asyn:
self.s.send(self.request)
self.response = self.processUDPReply()
# except socket.error:
@ -216,7 +217,7 @@ def sendUDPRequest(self, server):
continue
break
if not self.response:
if not self.async:
if not self.asyn:
raise DNSError('no working nameservers found')
def sendTCPRequest(self, server):
@ -253,7 +254,7 @@ def __init__(self, *name, **args):
else:
self.donefunc = self.showResult
# self.realinit(name,args) # XXX todo
self.async = 1
self.asyn = 1
def conn(self):
import time

View file

@ -26,12 +26,12 @@
import string
import types
import Type
import Class
import Opcode
import Status
import discovery.DNS.Type as Type
import discovery.DNS.Class as Class
import discovery.DNS.Opcode as Opcode
import discovery.DNS.Status as Status
from Base import DNSError
from discovery.DNS.Base import DNSError
class UnpackError(DNSError):
@ -116,14 +116,15 @@ def addname(self, name):
# The case of the first occurrence of a name is preserved.
# Redundant dots are ignored.
list = []
for label in string.splitfields(name, '.'):
for label in name.split('.'):
if label:
if len(label) > 63:
raise PackError('label too long')
list.append(label)
keys = []
s = ''
for i in range(len(list)):
key = string.upper(string.joinfields(list[i:], '.'))
key = str.upper((s.join(list[i:])))
keys.append(key)
if key in self.index:
pointer = self.index[key]
@ -142,8 +143,8 @@ def addname(self, name):
if offset + len(buf) < 0x3FFF:
index.append((keys[j], offset + len(buf)))
else:
print 'DNS.Lib.Packer.addname:',
print 'warning: pointer too big'
print('DNS.Lib.Packer.addname:',)
print('warning: pointer too big')
buf = buf + (chr(n) + label)
if pointer:
buf = buf + pack16bit(pointer | 0xC000)
@ -155,26 +156,26 @@ def addname(self, name):
def dump(self):
keys = sorted(self.index.keys())
print '-' * 40
print('-' * 40)
for key in keys:
print '%20s %3d' % (key, self.index[key])
print '-' * 40
print('%20s %3d' % (key, self.index[key]))
print('-' * 40)
space = 1
for i in range(0, len(self.buf) + 1, 2):
if self.buf[i:i + 2] == '**':
if not space:
print
print()
space = 1
continue
space = 0
print '%4d' % i,
print('%4d' % i,)
for c in self.buf[i:i + 2]:
if ' ' < c < '\177':
print ' %c' % c,
print(' %c' % c,)
else:
print '%2d' % ord(c),
print
print '-' * 40
print('%2d' % ord(c),)
print()
print('-' * 40)
# Unpacking class
@ -257,8 +258,8 @@ def testpacker():
p.addbytes('*' * 26)
p.addname('')
timing.finish()
print timing.milli(), "ms total for packing"
print round(timing.milli() / i, 4), 'ms per packing'
print(timing.milli(), "ms total for packing")
print(round(timing.milli() / i, 4), 'ms per packing')
# p.dump()
u = Unpacker(p.buf)
u.getaddr()
@ -284,8 +285,8 @@ def testpacker():
u.getbytes(26),
u.getname())
timing.finish()
print timing.milli(), "ms total for unpacking"
print round(timing.milli() / i, 4), 'ms per unpacking'
print(timing.milli(), "ms total for unpacking")
print(round(timing.milli() / i, 4), 'ms per unpacking')
# for item in res: print item
@ -379,7 +380,7 @@ def addSOA(self, name, klass, ttl,
def addTXT(self, name, klass, ttl, list):
self.addRRheader(name, Type.TXT, klass, ttl)
if isinstance(list, types.StringType):
if isinstance(list, str):
list = [list]
for txtdata in list:
self.addstring(txtdata)
@ -555,29 +556,29 @@ class Munpacker(RRunpacker, Qunpacker, Hunpacker):
# These affect the unpacker's current position!
def dumpM(u):
print 'HEADER:',
print('HEADER:',)
(id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount) = u.getHeader()
print 'id=%d,' % id,
print 'qr=%d, opcode=%d, aa=%d, tc=%d, rd=%d, ra=%d, z=%d, rcode=%d,' \
% (qr, opcode, aa, tc, rd, ra, z, rcode)
print('id=%d,' % id,)
print('qr=%d, opcode=%d, aa=%d, tc=%d, rd=%d, ra=%d, z=%d, rcode=%d,' \
% (qr, opcode, aa, tc, rd, ra, z, rcode))
if tc:
print '*** response truncated! ***'
print('*** response truncated! ***')
if rcode:
print '*** nonzero error code! (%d) ***' % rcode
print ' qdcount=%d, ancount=%d, nscount=%d, arcount=%d' \
% (qdcount, ancount, nscount, arcount)
print('*** nonzero error code! (%d) ***' % rcode)
print(' qdcount=%d, ancount=%d, nscount=%d, arcount=%d' \
% (qdcount, ancount, nscount, arcount))
for i in range(qdcount):
print 'QUESTION %d:' % i,
print('QUESTION %d:' % i,)
dumpQ(u)
for i in range(ancount):
print 'ANSWER %d:' % i,
print('ANSWER %d:' % i,)
dumpRR(u)
for i in range(nscount):
print 'AUTHORITY RECORD %d:' % i,
print('AUTHORITY RECORD %d:' % i,)
dumpRR(u)
for i in range(arcount):
print 'ADDITIONAL RECORD %d:' % i,
print('ADDITIONAL RECORD %d:' % i,)
dumpRR(u)
@ -594,44 +595,44 @@ def __init__(self, u, args):
def show(self):
import time
print '; <<>> PDG.py 1.0 <<>> %s %s' % (self.args['name'],
self.args['qtype'])
print('; <<>> PDG.py 1.0 <<>> %s %s' % (self.args['name'],
self.args['qtype']))
opt = ""
if self.args['rd']:
opt = opt + 'recurs '
h = self.header
print ';; options: ' + opt
print ';; got answer:'
print ';; ->>HEADER<<- opcode %s, status %s, id %d' % (
h['opcode'], h['status'], h['id'])
print(';; options: ' + opt)
print(';; got answer:')
print(';; ->>HEADER<<- opcode %s, status %s, id %d' % (
h['opcode'], h['status'], h['id']))
flags = filter(lambda x, h=h: h[x], ('qr', 'aa', 'rd', 'ra', 'tc'))
print ';; flags: %s; Ques: %d, Ans: %d, Auth: %d, Addit: %d' % (
string.join(flags), h['qdcount'], h['ancount'], h['nscount'],
h['arcount'])
print ';; QUESTIONS:'
print(';; flags: %s; Ques: %d, Ans: %d, Auth: %d, Addit: %d' % (
''.join(map(str,flags)), h['qdcount'], h['ancount'], h['nscount'],
h['arcount']))
print(';; QUESTIONS:')
for q in self.questions:
print ';; %s, type = %s, class = %s' % (q['qname'], q['qtypestr'],
q['qclassstr'])
print
print ';; ANSWERS:'
print(';; %s, type = %s, class = %s' % (q['qname'], q['qtypestr'],
q['qclassstr']))
print()
print(';; ANSWERS:')
for a in self.answers:
print '%-20s %-6s %-6s %s' % (a['name'], repr(a['ttl']), a['typename'],
a['data'])
print
print ';; AUTHORITY RECORDS:'
print('%-20s %-6s %-6s %s' % (a['name'], repr(a['ttl']), a['typename'],
a['data']))
print()
print(';; AUTHORITY RECORDS:')
for a in self.authority:
print '%-20s %-6s %-6s %s' % (a['name'], repr(a['ttl']), a['typename'],
a['data'])
print
print ';; ADDITIONAL RECORDS:'
print('%-20s %-6s %-6s %s' % (a['name'], repr(a['ttl']), a['typename'],
a['data']))
print()
print(';; ADDITIONAL RECORDS:')
for a in self.additional:
print '%-20s %-6s %-6s %s' % (a['name'], repr(a['ttl']), a['typename'],
a['data'])
print
print('%-20s %-6s %-6s %s' % (a['name'], repr(a['ttl']), a['typename'],
a['data']))
print()
if 'elapsed' in self.args:
print ';; Total query time: %d msec' % self.args['elapsed']
print ';; To SERVER: %s' % (self.args['server'])
print ';; WHEN: %s' % time.ctime(time.time())
print(';; Total query time: %d msec' % self.args['elapsed'])
print(';; To SERVER: %s' % (self.args['server']))
print(';; WHEN: %s' % time.ctime(time.time()))
def storeM(self, u):
(self.header['id'], self.header['qr'], self.header['opcode'],
@ -682,25 +683,25 @@ def storeRR(self, u):
def dumpQ(u):
qname, qtype, qclass = u.getQuestion()
print 'qname=%s, qtype=%d(%s), qclass=%d(%s)' \
print('qname=%s, qtype=%d(%s), qclass=%d(%s)' \
% (qname,
qtype, Type.typestr(qtype),
qclass, Class.classstr(qclass))
qclass, Class.classstr(qclass)))
def dumpRR(u):
name, type, klass, ttl, rdlength = u.getRRheader()
typename = Type.typestr(type)
print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
print('name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
% (name,
type, typename,
klass, Class.classstr(klass),
ttl)
ttl))
mname = 'get%sdata' % typename
if hasattr(u, mname):
print ' formatted rdata:', getattr(u, mname)()
print(' formatted rdata:', getattr(u, mname)())
else:
print ' binary rdata:', u.getbytes(rdlength)
print(' binary rdata:', u.getbytes(rdlength))
if __name__ == "__main__":
testpacker()
@ -753,10 +754,10 @@ def dumpRR(u):
# added identifying header to top of each file
#
# Revision 1.7 2001/07/19 07:50:44 anthony
# Added SRV (RFC 2782) support. Code from Michael Ströder.
# Added SRV (RFC 2782) support. Code from Michael Str<EFBFBD>der.
#
# Revision 1.6 2001/07/19 07:39:18 anthony
# 'type' -> 'rrtype' in getRRheader(). Fix from Michael Ströder.
# 'type' -> 'rrtype' in getRRheader(). Fix from Michael Str<EFBFBD>der.
#
# Revision 1.5 2001/07/19 07:34:19 anthony
# oops. glitch in storeRR (fixed now).

View file

@ -11,16 +11,16 @@
__version__ = '2.3.1'
import Type
import Opcode
import Status
import Class
from Base import DnsRequest, DNSError
from Lib import DnsResult
from Base import *
from Lib import *
from discovery.DNS import Type
from discovery.DNS import Opcode
from discovery.DNS import Status
from discovery.DNS import Class
from discovery.DNS.Base import DnsRequest, DNSError
from discovery.DNS.Lib import DnsResult
from discovery.DNS.Base import *
from discovery.DNS.Lib import *
Error = DNSError
from lazy import *
from discovery.DNS.lazy import *
Request = DnsRequest
Result = DnsResult

View file

@ -7,7 +7,7 @@
#
# routines for lazy people.
import Base
import discovery.DNS.Base as Base
import string

View file

@ -19,23 +19,21 @@
License analog to the current Python license
"""
import string
import re
import _winreg
import winreg
def binipdisplay(s):
"convert a binary array of ip adresses to a python list"
if len(s) % 4 != 0:
raise EnvironmentError # well ...
ol = []
for i in range(len(s) / 4):
s = '.'
for i in range(int(len(s) / 4)):
s1 = s[:4]
s = s[4:]
ip = []
for j in s1:
ip.append(str(ord(j)))
ol.append(string.join(ip, '.'))
ol.append(s.join(ip))
return ol
@ -49,75 +47,75 @@ def stringdisplay(s):
def RegistryResolve():
nameservers = []
x = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
x = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
y = _winreg.OpenKey(x,
y = winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters")
except EnvironmentError: # so it isn't NT/2000/XP
# windows ME, perhaps?
try: # for Windows ME
y = _winreg.OpenKey(x,
y = winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\VxD\MSTCP")
nameserver, dummytype = _winreg.QueryValueEx(y, 'NameServer')
nameserver, dummytype = winreg.QueryValueEx(y, 'NameServer')
if nameserver and not (nameserver in nameservers):
nameservers.extend(stringdisplay(nameserver))
except EnvironmentError:
pass
return nameservers # no idea
try:
nameserver = _winreg.QueryValueEx(y, "DhcpNameServer")[0].split()
nameserver = winreg.QueryValueEx(y, "DhcpNameServer")[0].split()
except:
nameserver = _winreg.QueryValueEx(y, "NameServer")[0].split()
nameserver = winreg.QueryValueEx(y, "NameServer")[0].split()
if nameserver:
nameservers = nameserver
nameserver = _winreg.QueryValueEx(y, "NameServer")[0]
_winreg.CloseKey(y)
nameserver = winreg.QueryValueEx(y, "NameServer")[0]
winreg.CloseKey(y)
try: # for win2000
y = _winreg.OpenKey(x,
y = winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\DNSRegisteredAdapters")
for i in range(1000):
try:
n = _winreg.EnumKey(y, i)
z = _winreg.OpenKey(y, n)
dnscount, dnscounttype = _winreg.QueryValueEx(z,
n = winreg.EnumKey(y, i)
z = winreg.OpenKey(y, n)
dnscount, dnscounttype = winreg.QueryValueEx(z,
'DNSServerAddressCount')
dnsvalues, dnsvaluestype = _winreg.QueryValueEx(z,
dnsvalues, dnsvaluestype = winreg.QueryValueEx(z,
'DNSServerAddresses')
nameservers.extend(binipdisplay(dnsvalues))
_winreg.CloseKey(z)
winreg.CloseKey(z)
except EnvironmentError:
break
_winreg.CloseKey(y)
winreg.CloseKey(y)
except EnvironmentError:
pass
#
try: # for whistler
y = _winreg.OpenKey(x,
y = winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces")
for i in range(1000):
try:
n = _winreg.EnumKey(y, i)
z = _winreg.OpenKey(y, n)
n = winreg.EnumKey(y, i)
z = winreg.OpenKey(y, n)
try:
nameserver, dummytype = _winreg.QueryValueEx(
nameserver, dummytype = winreg.QueryValueEx(
z, 'NameServer')
if nameserver and not (nameserver in nameservers):
nameservers.extend(stringdisplay(nameserver))
except EnvironmentError:
pass
_winreg.CloseKey(z)
winreg.CloseKey(z)
except EnvironmentError:
break
_winreg.CloseKey(y)
winreg.CloseKey(y)
except EnvironmentError:
# print "Key Interfaces not found, just do nothing"
pass
#
_winreg.CloseKey(x)
winreg.CloseKey(x)
return nameservers
if __name__ == "__main__":
print "Name servers:", RegistryResolve()
print("Name servers:", RegistryResolve())
#
# $Log: win32dns.py,v $

View file

@ -1,23 +1,3 @@
__all__ = ["bingsearch",
"googlesearch",
"googleplussearch",
"pgpsearch",
"linkedinsearch",
"exaleadsearch",
"yandexsearch",
"googlesets",
"dnssearch",
"shodansearch",
"jigsaw",
"twittersearch",
"dogpilesearch",
"baidusearch",
"yahoosearch",
"netcraft",
"crtsh",
"virustotal",
"threatcrowd",
"wfuzz_search",
"port_scanner",
"takeover",
"googleCSE"]
from api import WebAPI
from client import Shodan
from exception import APIError

View file

@ -1,5 +1,5 @@
import string
import httplib
import http.client
import sys
import myparser
import re
@ -22,7 +22,7 @@ def __init__(self, word, limit, start):
self.counter = start
def do_search(self):
h = httplib.HTTP(self.server)
h = http.client.HTTP(self.server)
h.putrequest('GET', "/search?q=%40" + self.word +
"&count=50&first=" + str(self.counter))
h.putheader('Host', self.hostname)
@ -35,7 +35,7 @@ def do_search(self):
self.totalresults += self.results
def do_search_api(self):
h = httplib.HTTP(self.apiserver)
h = http.client.HTTP(self.apiserver)
h.putrequest('GET', "/xml.aspx?Appid=" + self.bingApi + "&query=%40" +
self.word + "&sources=web&web.count=40&web.offset=" + str(self.counter))
h.putheader('Host', "api.search.live.net")
@ -74,7 +74,7 @@ def get_allhostnames(self):
def process(self, api):
if api == "yes":
if self.bingApi == "":
print "Please insert your API key in the discovery/bingsearch.py"
print("Please insert your API key in the discovery/bingsearch.py")
sys.exit()
while (self.counter < self.limit):
if api == "yes":
@ -84,7 +84,7 @@ def process(self, api):
self.do_search()
time.sleep(1)
self.counter += 50
print "\tSearching " + str(self.counter) + " results..."
print("\tSearching " + str(self.counter) + " results...")
def process_vhost(self):
# Maybe it is good to use other limit for this.

71
discovery/censys.py Normal file
View file

@ -0,0 +1,71 @@
import httplib
import sys
import random
import requests
import censysparser
class search_censys:
def __init__(self, word):
self.word = word
self.url = ""
self.page = ""
self.results = ""
self.total_results = ""
self.server = "censys.io"
self.userAgent = ["(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36"
,("Mozilla/5.0 (Linux; Android 7.0; SM-G892A Build/NRD90M; wv) " +
"AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/60.0.3112.107 Mobile Safari/537.36"),
("Mozilla/5.0 (Windows Phone 10.0; Android 6.0.1; Microsoft; RM-1152) " +
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Mobile Safari/537.36 Edge/15.15254"),
"Mozilla/5.0 (SMART-TV; X11; Linux armv7l) AppleWebKit/537.42 (KHTML, like Gecko) Chromium/25.0.1349.2 Chrome/25.0.1349.2 Safari/537.42"
,"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36 OPR/43.0.2442.991"
,"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36 OPR/48.0.2685.52"
,"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
,"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
,"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)"]
def do_search(self):
try:
headers = {'user-agent': random.choice(self.userAgent),'Accept':'*/*','Referer':self.url}
response = requests.get(self.url, headers=headers)
self.results = response.content
self.total_results += self.results
except Exception,e:
print e
def process(self,morepage=None):
try:
if (morepage is not None):
self.page =str(morepage)
baseurl = self.url
self.url = baseurl + "&page=" + self.page
else:
self.url="https://" + self.server + "/ipv4/_search?q=" + self.word
self.do_search()
print "\tSearching Censys results.."
except Exception,e:
print("Error occurred: " + e)
def get_hostnames(self):
try:
hostnames = censysparser.parser(self)
return hostnames.search_hostnames()
except Exception,e:
print("Error occurred: " + e)
def get_ipaddresses(self):
try:
ips = censysparser.parser(self)
return ips.search_ipaddresses()
except Exception,e:
print("Error occurred: " + e)
def get_totalnumberofpages(self):
try:
pages = censysparser.parser(self)
return pages.search_numberofpages()
except Exception,e:
print("Error occurred: " + e)

View file

@ -1,5 +1,5 @@
import IPy
import DNS
from discovery import IPy
import discovery.DNS as DNS
import string
import socket
import sys
@ -18,7 +18,7 @@ def __init__(self, range, verbose=True):
DNS.ParseResolvConf("/etc/resolv.conf")
nameserver = DNS.defaults['server'][0]
except:
print "Error in DNS resolvers"
print("Error in DNS resolvers")
sys.exit()
def run(self, host):
@ -42,7 +42,7 @@ def get_ip_list(self, ips):
try:
list = IPy.IP(ips)
except:
print "Error in IP format, check the input and try again. (Eg. 192.168.1.0/24)"
print("Error in IP format, check the input and try again. (Eg. 192.168.1.0/24)")
sys.exit()
name = []
for x in list:
@ -75,12 +75,12 @@ def __init__(self, domain, dnsserver, verbose=False):
res_path = os.path.join(fileDir,'lib/resolvers.txt')
with open(res_path) as f:
self.resolvers = f.read().splitlines()
except Exception, e:
print "Resolvers file can't be open"
except Exception as e:
print("Resolvers file can't be open")
try:
f = open(self.file, "r")
except:
print "Error opening dns dictionary file"
print("Error opening dns dictionary file")
sys.exit()
self.list = f.readlines()
@ -105,10 +105,10 @@ def getdns(self, domain):
server=primary,
aa=1).req()
except Exception as e:
print e
print(e)
if test.header['status'] != "NOERROR":
print "Error"
print("Error")
sys.exit()
self.nameserver = test.answers[0]['data']
elif self.nameserver == "local":
@ -118,7 +118,7 @@ def getdns(self, domain):
def run(self, host):
if self.nameserver == "":
self.nameserver = self.getdns(self.domain)
print "\n\033[94m[-] Using DNS server: " + self.nameserver + "\033[1;33;40m\n"
print("\n\033[94m[-] Using DNS server: " + self.nameserver + "\033[1;33;40m\n")
#secure_random = random.SystemRandom()
#self.nameserver = secure_random.choice(self.resolvers)
@ -144,7 +144,7 @@ def process(self):
for x in self.list:
host = self.run(x)
if host is not None:
print " : " + host.split(":")[1]
print(" : " + host.split(":")[1])
results.append(host)
return results
@ -199,7 +199,7 @@ def getdns(self, domain):
0]['data']
test = DNS.Request(rootdom, qtype='NS', server=primary, aa=1).req()
if test.header['status'] != "NOERROR":
print "Error"
print("Error")
sys.exit()
self.nameserver = test.answers[0]['data']
elif self.nameserver == "local":

View file

@ -1,5 +1,5 @@
import string
import httplib
import http.client
import sys
import myparser
import re
@ -7,7 +7,6 @@
class search_exalead:
def __init__(self, word, limit, start):
self.word = word
self.files = "pdf"
@ -20,7 +19,7 @@ def __init__(self, word, limit, start):
self.counter = start
def do_search(self):
h = httplib.HTTP(self.server)
h = http.client.HTTPConnection(self.server)
h.putrequest('GET', "/search/web/results/?q=%40" + self.word +
"&elements_per_page=50&start_index=" + str(self.counter))
h.putheader('Host', self.hostname)
@ -37,7 +36,7 @@ def do_search(self):
self.totalresults += self.results
def do_search_files(self, files):
h = httplib.HTTP(self.server)
h = http.client.HTTPConnection(self.server)
h.putrequest(
'GET',
"search/web/results/?q=" +
@ -58,7 +57,7 @@ def check_next(self):
nextres = renext.findall(self.results)
if nextres != []:
nexty = "1"
print str(self.counter)
print(str(self.counter))
else:
nexty = "0"
return nexty
@ -79,7 +78,7 @@ def process(self):
while self.counter <= self.limit:
self.do_search()
self.counter += 50
print "\tSearching " + str(self.counter) + " results..."
print("\ tSearching " + str(self.counter) + " results...")
def process_files(self, files):
while self.counter < self.limit:

View file

@ -21,13 +21,13 @@ def __init__(self, word, limit):
def do_search(self):
try:
urly="https://" + self.server + "/search?num=100&start=" + str(self.counter) + "&hl=en&meta=&q=site%3Aplus.google.com%20intext%3A%22Works%20at%22%20" + self.word+ "%20-inurl%3Aphotos%20-inurl%3Aabout%20-inurl%3Aposts%20-inurl%3Aplusones"
except Exception, e:
print e
except Exception as e:
print(e)
try:
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'}
r=requests.get(urly,headers=headers)
except Exception,e:
print e
except Exception as e:
print(e)
self.results = r.content
self.totalresults += self.results
@ -39,4 +39,4 @@ def process(self):
while (self.counter < self.limit):
self.do_search()
self.counter += 100
print "\tSearching " + str(self.counter) + " results.."
print("\tSearching " + str(self.counter) + " results..")

View file

@ -32,25 +32,25 @@ def __init__(self, word, limit, start):
def do_search(self):
try: #do normal scraping
urly="http://" + self.server + "/search?num=" + self.quantity + "&start=" + str(self.counter) + "&hl=en&meta=&q=%40\"" + self.word + "\""
except Exception, e:
print e
except Exception as e:
print(e)
try:
params = {'User-Agent': random.choice(self.userAgent)} #select random user agent
r=requests.get(urly,params= params)
except Exception,e:
print e
except Exception as e:
print(e)
self.results = r.content
self.totalresults += self.results
def do_search_profiles(self):
try:
urly="http://" + self.server + "/search?num=" + self.quantity + "&start=" + str(self.counter) + "&hl=en&meta=&q=site:www.google.com%20intitle:\"Google%20Profile\"%20\"Companies%20I%27ve%20worked%20for\"%20\"at%20" + self.word + "\""
except Exception, e:
print e
except Exception as e:
print(e)
try:
r=requests.get(urly)
except Exception,e:
print e
except Exception as e:
print (e)
self.results = r.content
#'&hl=en&meta=&q=site:www.google.com%20intitle:"Google%20Profile"%20"Companies%20I%27ve%20worked%20for"%20"at%20' + self.word + '"')
self.totalresults += self.results
@ -77,17 +77,17 @@ def process(self,google_dorking):
self.do_search()
#more = self.check_next()
time.sleep(1)
print "\tSearching " + str(self.counter) + " results..."
print("\tSearching " + str(self.counter) + " results...")
self.counter += 100
else: #google dorking is true
self.counter = 0 #reset counter
print '\n'
print "[-] Searching with Google Dorks: "
print('\n')
print("[-] Searching with Google Dorks: ")
while self.counter <= self.limit and self.counter <= 200: # only 200 dorks in list
self.googledork() #call google dorking method if user wanted it!
# more = self.check_next()
time.sleep(.1)
print "\tSearching " + str(self.counter) + " results..."
time.sleep(.5)
print("\tSearching " + str(self.counter) + " results...")
self.counter += 100
def process_profiles(self):
@ -95,7 +95,7 @@ def process_profiles(self):
self.do_search_profiles()
time.sleep(0.15)
self.counter += 100
print "\tSearching " + str(self.counter) + " results..."
print("\tSearching " + str(self.counter) + " results...")
def append_dorks(self):
try: # wrap in try-except incase filepaths are messed up

View file

@ -1,5 +1,5 @@
import string
import httplib
import http.client
import sys
import myparser
import re
@ -24,7 +24,7 @@ def __init__(self, list):
self.set = self.set + "&q" + str(id) + "=" + str(x)
def do_search(self):
h = httplib.HTTP(self.server)
h = http.client.HTTPConnection(self.server)
h.putrequest('GET', "/sets?hl=en&" + self.set)
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)

View file

@ -7,28 +7,28 @@ class search_hunter:
def __init__(self, word, limit, start):
self.word = word
self.limit = limit
self.limit = 100
self.start = start
self.key = ""
if self.key == "":
print "You need an API key in order to use the Hunter search engine. You can get one here: https://hunter.io"
print("You need an API key in order to use the Hunter search engine. You can get one here: https://hunter.io")
sys.exit()
self.results = ""
self.totalresults = ""
self.counter = start
self.database = "https://api.hunter.io/v2/domain-search?domain=" + word + "&api_key=" + self.key
self.database = "https://api.hunter.io/v2/domain-search?domain=" + word + "&api_key=" + self.key +"&limit=" + str(self.limit)
def do_search(self):
try:
r = requests.get(self.database)
except Exception,e:
print e
except Exception as e:
print(e)
self.results = r.content
self.totalresults += self.results
def process(self):
self.do_search() #only need to do it once
print '\tDone Searching Results'
print('\tDone Searching Results')
def get_emails(self):
rawres = myparser.parser(self.totalresults, self.word)

View file

@ -20,12 +20,12 @@ def __init__(self, word, limit):
def do_search(self):
try:
urly="http://"+ self.server + "/search?num=100&start=" + str(self.counter) + "&hl=en&meta=&q=site%3Alinkedin.com/in%20" + self.word
except Exception, e:
print e
except Exception as e:
print(e)
try:
r=requests.get(urly)
except Exception,e:
print e
except Exception as e:
print(e)
self.results = r.content
self.totalresults += self.results
@ -37,4 +37,4 @@ def process(self):
while (self.counter < self.limit):
self.do_search()
self.counter += 100
print "\tSearching " + str(self.counter) + " results.."
print("\tSearching " + str(self.counter) + " results..")

View file

@ -1,5 +1,5 @@
import string
import httplib
import http.client
import sys
import myparser
@ -15,18 +15,18 @@ def __init__(self, word):
self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
def process(self):
print "\tSearching PGP results..."
print("\tSearching PGP results...")
try:
h = httplib.HTTP(self.server)
h = http.client.HTTP(self.server)
h.putrequest('GET', "/pks/lookup?search=" + self.word + "&op=index")
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
except Exception, e:
print "Unable to connect to PGP server: ",str(e)
except Exception as e:
print("Unable to connect to PGP server: ",str(e))
pass
def get_emails(self):

View file

@ -30,7 +30,7 @@
# Define a basestring type if necessary for Python3 compatibility
try:
basestring
str
except NameError:
basestring = str

View file

@ -1,4 +1,4 @@
from shodan import Shodan
from discovery.shodan import Shodan
import sys
import json
@ -9,7 +9,7 @@ def __init__(self, host):
self.host = host
self.key = "oCiMsgM6rQWqiTvPxFHYcExlZgg7wvTt"
if self.key == "":
print "You need an API key in order to use SHODAN database. You can get one here: http://www.shodanhq.com/"
print("You need an API key in order to use SHODAN database. You can get one here: http://www.shodanhq.com/")
sys.exit()
self.api = Shodan(self.key)
@ -17,7 +17,7 @@ def run(self):
try:
host = self.api.host(self.host)
return host['data']
except Exception, e:
print "SHODAN empty reply or error in the call"
print e
except Exception as e:
print("SHODAN empty reply or error in the call")
print(e)
return "error"

View file

@ -1,5 +1,5 @@
import string
import httplib
import http.client
import sys
import myparser
import re
@ -19,7 +19,7 @@ def __init__(self, word, limit, start):
self.counter = start
def do_search(self):
h = httplib.HTTP(self.server)
h = http.client.HTTPConnection(self.server)
h.putrequest('GET', "/search?text=%40" + self.word +
"&numdoc=50&lr=" + str(self.counter))
h.putheader('Host', self.hostname)
@ -28,10 +28,10 @@ def do_search(self):
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults += self.results
print self.results
print(self.results)
def do_search_files(self, files): # TODO
h = httplib.HTTP(self.server)
h = http.client.HTTPConnection(self.server)
h.putrequest('GET', "/search?text=%40" + self.word +
"&numdoc=50&lr=" + str(self.counter))
h.putheader('Host', self.hostname)
@ -46,7 +46,7 @@ def check_next(self):
nextres = renext.findall(self.results)
if nextres != []:
nexty = "1"
print str(self.counter)
print(str(self.counter))
else:
nexty = "0"
return nexty
@ -67,7 +67,7 @@ def process(self):
while self.counter <= self.limit:
self.do_search()
self.counter += 50
print "Searching " + str(self.counter) + " results..."
print("Searching " + str(self.counter) + " results...")
def process_files(self, files):
while self.counter < self.limit:

View file

@ -776,4 +776,4 @@ def _number_format(val, dec):
return dec and ('%.' + str(dec) + 'f') % val or int(round(val))
if __name__ == '__main__':
print __doc__
print(__doc__)

View file

@ -1 +1,2 @@
requests==2.18.4
bs4==0.0.1

View file

@ -29,8 +29,8 @@ def store(self,domain, resource,res_type,source):
c.execute ('INSERT INTO results (domain,resource, type, find_date, source) VALUES (?,?,?,?,?)',(self.domain,self.resource,self.type,self.date,self.source))
conn.commit()
conn.close()
except Exception, e:
print e
except Exception as e:
print(e)
return
def store_all(self,domain,all,res_type,source):
@ -46,6 +46,6 @@ def store_all(self,domain,all,res_type,source):
c.execute ('INSERT INTO results (domain,resource, type, find_date, source) VALUES (?,?,?,?,?)',(self.domain,x,self.type,self.date,self.source))
conn.commit()
conn.close()
except Exception, e:
print e
except Exception as e:
print(e)
return

View file

@ -12,7 +12,7 @@ def test_emails(self):
results = '@domain.com***a@domain***banotherdomain.com***c@domain.com***d@sub.domain.com***'
p = myparser.parser(results, word)
emails = sorted(p.emails())
self.assertEquals(emails, [ 'c@domain.com', 'd@sub.domain.com' ])
self.assertEqual(emails, [ 'c@domain.com', 'd@sub.domain.com' ])
if __name__ == '__main__':
unittest.main()

View file

@ -1,10 +1,8 @@
#!/usr/bin/env python
import string
import httplib
import sys
import os
from socket import *
import re
import getopt
import stash
@ -13,25 +11,25 @@
try:
import requests
except:
print "Request library not found, please install it before proceeding\n"
print("Request library not found, please install it before proceeding\n")
sys.exit()
from discovery import *
from lib import htmlExport
from lib import hostchecker
print "\n \033[92m *******************************************************************"
print "* *"
print "* | |_| |__ ___ /\ /\__ _ _ ____ _____ ___| |_ ___ _ __ *"
print "* | __| '_ \ / _ \ / /_/ / _` | '__\ \ / / _ \/ __| __/ _ \ '__| *"
print "* | |_| | | | __/ / __ / (_| | | \ V / __/\__ \ || __/ | *"
print "* \__|_| |_|\___| \/ /_/ \__,_|_| \_/ \___||___/\__\___|_| *"
print "* *"
print "* TheHarvester Ver. 3.0.0 *"
print "* Coded by Christian Martorella *"
print "* Edge-Security Research *"
print "* cmartorella@edge-security.com *"
print "*******************************************************************\033[94m\n\n"
print("\n \033[92m *******************************************************************")
print("* *")
print("* | |_| |__ ___ /\ /\__ _ _ ____ _____ ___| |_ ___ _ __ *")
print("* | __| '_ \ / _ \ / /_/ / _` | '__\ \ / / _ \/ __| __/ _ \ '__| *")
print("* | |_| | | | __/ / __ / (_| | | \ V / __/\__ \ || __/ | *")
print("* \__|_| |_|\___| \/ /_/ \__,_|_| \_/ \___||___/\__\___|_| *")
print("* *")
print("* TheHarvester Ver. 3.0.0 *")
print("* Coded by Christian Martorella *")
print("* Edge-Security Research *")
print("* cmartorella@edge-security.com *")
print("*******************************************************************\033[94m\n\n")
def usage():
@ -41,29 +39,29 @@ def usage():
if os.path.dirname(sys.argv[0]) == os.getcwd():
comm = "./" + comm
print "Usage: theharvester options \n"
print " -d: Domain to search or company name"
print """ -b: data source: baidu, bing, bingapi, dogpile, google, googleCSE,
print("Usage: theharvester options \n")
print(" -d: Domain to search or company name")
print(""" -b: data source: baidu, bing, bingapi, dogpile, google, googleCSE,
googleplus, google-profiles, linkedin, pgp, twitter, vhost,
virustotal, threatcrowd, crtsh, netcraft, yahoo, hunter, all\n"""
print " -g: use google dorking instead of normal google search"
print " -s: start in result number X (default: 0)"
print " -v: verify host name via dns resolution and search for virtual hosts"
print " -f: save the results into an HTML and XML file (both)"
print " -n: perform a DNS reverse query on all ranges discovered"
print " -c: perform a DNS brute force for the domain name"
print " -t: perform a DNS TLD expansion discovery"
print " -e: use this DNS server"
print " -p: port scan the detected hosts and check for Takeovers (80,443,22,21,8080)"
print " -l: limit the number of results to work with(bing goes from 50 to 50 results,"
print " google 100 to 100, and pgp doesn't use this option)"
print " -h: use SHODAN database to query discovered hosts"
print "\nExamples:"
print " " + comm + " -d microsoft.com -l 500 -b google -h myresults.html"
print " " + comm + " -d microsoft.com -b pgp"
print " " + comm + " -d microsoft -l 200 -b linkedin"
print " " + comm + " -d microsoft.com -l 200 -g -b google"
print " " + comm + " -d apple.com -b googleCSE -l 500 -s 300\n"
virustotal, threatcrowd, crtsh, netcraft, yahoo, hunter, all\n""")
print(" -g: use google dorking instead of normal google search")
print(" -s: start in result number X (default: 0)")
print(" -v: verify host name via dns resolution and search for virtual hosts")
print(" -f: save the results into an HTML and XML file (both)")
print(" -n: perform a DNS reverse query on all ranges discovered")
print(" -c: perform a DNS brute force for the domain name")
print(" -t: perform a DNS TLD expansion discovery")
print(" -e: use this DNS server")
print(" -p: port scan the detected hosts and check for Takeovers (80,443,22,21,8080)")
print(" -l: limit the number of results to work with(bing goes from 50 to 50 results,")
print(" google 100 to 100, and pgp doesn't use this option)")
print(" -h: use SHODAN database to query discovered hosts")
print("\nExamples:")
print((" " + comm + " -d microsoft.com -l 500 -b google -h myresults.html"))
print((" " + comm + " -d microsoft.com -b pgp"))
print((" " + comm + " -d microsoft -l 200 -b linkedin"))
print((" " + comm + " -d microsoft.com -l 200 -g -b google"))
print((" " + comm + " -d apple.com -b googleCSE -l 500 -s 300\n"))
def start(argv):
@ -78,7 +76,7 @@ def start(argv):
try:
db=stash.stash_manager()
db.do_init()
except Exception, e:
except Exception as e:
pass
start = 0
host_ip = []
@ -124,11 +122,11 @@ def start(argv):
engines = set(arg.split(','))
supportedengines = set(["baidu","bing","crtsh","bingapi","dogpile","google","googleCSE","virustotal","threatcrowd","googleplus","google-profiles","linkedin","pgp","twitter","vhost","yahoo","netcraft","hunter","all"])
if set(engines).issubset(supportedengines):
print "found supported engines"
print "[-] Starting harvesting process for domain: " + word + "\n"
print("found supported engines")
print(("[-] Starting harvesting process for domain: " + word + "\n"))
for engineitem in engines:
if engineitem == "google":
print "[-] Searching in Google:"
print("[-] Searching in Google:")
search = googlesearch.search_google(word, limit, start)
search.process(google_dorking)
all_emails = search.get_emails()
@ -137,11 +135,11 @@ def start(argv):
try:
db=stash.stash_manager()
db.store(word,x,'host','google')
except Exception, e:
print e
except Exception as e:
print(e)
if engineitem == "netcraft":
print "[-] Searching in Netcraft:"
print("[-] Searching in Netcraft:")
search = netcraft.search_netcraft(word)
search.process()
all_hosts = search.get_hostnames()
@ -151,7 +149,7 @@ def start(argv):
if engineitem == "threatcrowd":
print "[-] Searching in Threatcrowd:"
print("[-] Searching in Threatcrowd:")
search = threatcrowd.search_threatcrowd(word)
search.process()
all_hosts = search.get_hostnames()
@ -160,7 +158,7 @@ def start(argv):
db.store_all(word,all_hosts,'host','threatcrowd')
if engineitem == "virustotal":
print "[-] Searching in Virustotal:"
print("[-] Searching in Virustotal:")
search = virustotal.search_virustotal(word)
search.process()
all_hosts = search.get_hostnames()
@ -170,7 +168,7 @@ def start(argv):
if engineitem == "crtsh":
print "[-] Searching in CRT.sh:"
print("[-] Searching in CRT.sh:")
search = crtsh.search_crtsh(word)
search.process()
all_hosts = search.get_hostnames()
@ -179,7 +177,7 @@ def start(argv):
db.store_all(word,all_hosts,'host','CRTsh')
if engineitem == "googleCSE":
print "[-] Searching in Google Custom Search:"
print("[-] Searching in Google Custom Search:")
search = googleCSE.search_googleCSE(word, limit, start)
search.process()
search.store_results()
@ -191,7 +189,7 @@ def start(argv):
db.store_all(word,all_hosts,'host','googleCSE')
elif engineitem == "bing" or engineitem == "bingapi":
print "[-] Searching in Bing:"
print("[-] Searching in Bing:")
search = bingsearch.search_bing(word, limit, start)
if engineitem == "bingapi":
bingapi = "yes"
@ -202,14 +200,14 @@ def start(argv):
all_hosts = search.get_hostnames()
elif engineitem == "dogpile":
print "[-] Searching in Dogpilesearch.."
print("[-] Searching in Dogpilesearch..")
search = dogpilesearch.search_dogpile(word, limit)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
elif engineitem == "pgp":
print "[-] Searching in PGP key server.."
print("[-] Searching in PGP key server..")
search = pgpsearch.search_pgp(word)
search.process()
all_emails = search.get_emails()
@ -220,65 +218,65 @@ def start(argv):
db.store_all(word,all_emails,'emails','pgp')
elif engineitem == "yahoo":
print "[-] Searching in Yahoo.."
print("[-] Searching in Yahoo..")
search = yahoosearch.search_yahoo(word, limit)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
# elif engineitem == "baidu":
print "[-] Searching in Baidu.."
elif engineitem == "baidu":
print("[-] Searching in Baidu..")
search = baidusearch.search_baidu(word, limit)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
elif engineitem == "googleplus":
print "[-] Searching in Google+ .."
print("[-] Searching in Google+ ..")
search = googleplussearch.search_googleplus(word, limit)
search.process()
people = search.get_people()
print "Users from Google+:"
print "===================="
print("Users from Google+:")
print("====================")
for user in people:
print user
print(user)
sys.exit()
elif engineitem == "twitter":
print "[-] Searching in Twitter .."
print("[-] Searching in Twitter ..")
search = twittersearch.search_twitter(word, limit)
search.process()
people = search.get_people()
print "Users from Twitter:"
print "-------------------"
print("Users from Twitter:")
print("-------------------")
for user in people:
print user
print(user)
sys.exit()
elif engineitem == "linkedin":
print "[-] Searching in Linkedin.."
print("[-] Searching in Linkedin..")
search = linkedinsearch.search_linkedin(word, limit)
search.process()
people = search.get_people()
print "Users from Linkedin:"
print "-------------------"
print("Users from Linkedin:")
print("-------------------")
for user in people:
print user
print(user)
sys.exit()
elif engineitem == "google-profiles":
print "[-] Searching in Google profiles.."
print("[-] Searching in Google profiles..")
search = googlesearch.search_google(word, limit, start)
search.process_profiles()
people = search.get_profiles()
print "Users from Google profiles:"
print "---------------------------"
print("Users from Google profiles:")
print("---------------------------")
for users in people:
print users
print(users)
sys.exit()
elif engineitem == "hunter":
print "[-] Searching in Hunter:"
print("[-] Searching in Hunter:")
from discovery import huntersearch
#import locally or won't work
search = huntersearch.search_hunter(word, limit, start)
@ -287,11 +285,11 @@ def start(argv):
all_hosts = search.get_hostnames()
elif engineitem == "all":
print "Full harvest on " + word
print(("Full harvest on " + word))
all_emails = []
all_hosts = []
print "[-] Searching in Google.."
print("[-] Searching in Google..")
search = googlesearch.search_google(word, limit, start)
search.process(google_dorking)
emails = search.get_emails()
@ -303,7 +301,7 @@ def start(argv):
db=stash.stash_manager()
db.store_all(word,all_hosts,'host','google')
print "[-] Searching in PGP Key server.."
print("[-] Searching in PGP Key server..")
search = pgpsearch.search_pgp(word)
search.process()
emails = search.get_emails()
@ -315,7 +313,7 @@ def start(argv):
db=stash.stash_manager()
db.store_all(word,all_hosts,'email','PGP')
print "[-] Searching in Netcraft server.."
print("[-] Searching in Netcraft server..")
search = netcraft.search_netcraft(word)
search.process()
hosts = search.get_hostnames()
@ -323,7 +321,7 @@ def start(argv):
db=stash.stash_manager()
db.store_all(word,all_hosts,'host','netcraft')
print "[-] Searching in ThreatCrowd server.."
print("[-] Searching in ThreatCrowd server..")
try:
search = threatcrowd.search_threatcrowd(word)
search.process()
@ -334,7 +332,7 @@ def start(argv):
db.store_all(word,all_hosts,'host','threatcrowd')
except Exception: pass
print "[-] Searching in CRTSH server.."
print("[-] Searching in CRTSH server..")
search = crtsh.search_crtsh(word)
search.process()
hosts = search.get_hostnames()
@ -342,7 +340,7 @@ def start(argv):
db=stash.stash_manager()
db.store_all(word,all_hosts,'host','CRTsh')
print "[-] Searching in Virustotal server.."
print("[-] Searching in Virustotal server..")
search = virustotal.search_virustotal(word)
search.process()
hosts = search.get_hostnames()
@ -350,7 +348,7 @@ def start(argv):
db=stash.stash_manager()
db.store_all(word,all_hosts,'host','virustotal')
print "[-] Searching in Bing.."
print("[-] Searching in Bing..")
bingapi = "no"
search = bingsearch.search_bing(word, limit, start)
search.process(bingapi)
@ -363,7 +361,7 @@ def start(argv):
#Clean up email list, sort and uniq
all_emails=sorted(set(all_emails))
print "[-] Searching in Hunter:"
print("[-] Searching in Hunter:")
from discovery import huntersearch
#import locally
search = huntersearch.search_hunter(word, limit, start)
@ -379,34 +377,34 @@ def start(argv):
else:
#if engine not in ("baidu", "bing", "crtsh","bingapi","dogpile","google", "googleCSE","virustotal","threatcrowd", "googleplus", "google-profiles","linkedin", "pgp", "twitter", "vhost", "yahoo","netcraft","all"):
usage()
print "Invalid search engine, try with: baidu, bing, bingapi, crtsh, dogpile, google, googleCSE, virustotal, netcraft, googleplus, google-profiles, linkedin, pgp, twitter, vhost, yahoo, hunter, all"
print("Invalid search engine, try with: baidu, bing, bingapi, crtsh, dogpile, google, googleCSE, virustotal, netcraft, googleplus, google-profiles, linkedin, pgp, twitter, vhost, yahoo, hunter, all")
sys.exit()
#else:
# pass
#Results############################################################
print("\n\033[1;32;40m Harvesting results")
print "\n\n[+] Emails found:"
print "------------------"
print("\n\n[+] Emails found:")
print("------------------")
if all_emails == []:
print "No emails found"
print("No emails found")
else:
print "\n".join(all_emails)
print(("\n".join(all_emails)))
print("\033[1;33;40m \n[+] Hosts found in search engines:")
print "------------------------------------"
print("------------------------------------")
if all_hosts == []:
print "No hosts found"
print("No hosts found")
else:
total = len(all_hosts)
print "\nTotal hosts: " + str(total) + "\n"
print(("\nTotal hosts: " + str(total) + "\n"))
all_hosts=sorted(set(all_hosts))
print "\033[94m[-] Resolving hostnames IPs...\033[1;33;40m \n "
print("\033[94m[-] Resolving hostnames IPs...\033[1;33;40m \n ")
full_host = hostchecker.Checker(all_hosts)
full = full_host.check()
for host in full:
ip = host.split(':')[1]
print host
print(host)
if ip != "empty":
if host_ip.count(ip.lower()):
pass
@ -416,13 +414,13 @@ def start(argv):
#DNS Brute force####################################################
dnsres = []
if dnsbrute == True:
print "\n\033[94m[-] Starting DNS brute force: \033[1;33;40m"
print("\n\033[94m[-] Starting DNS brute force: \033[1;33;40m")
a = dnssearch.dns_force(word, dnsserver, verbose=True)
res = a.process()
print "\n\033[94m[-] Hosts found after DNS brute force:"
print "---------------------------------------"
print("\n\033[94m[-] Hosts found after DNS brute force:")
print("---------------------------------------")
for y in res:
print y
print(y)
dnsres.append(y.split(':')[0])
if y not in full:
full.append(y)
@ -436,35 +434,35 @@ def start(argv):
host = x.split(':')[1]
domain = x.split(':')[0]
if host != "empty" :
print "- Scanning : " + host
print(("- Scanning : " + host))
ports = [80,443,22,8080,21]
try:
scan = port_scanner.port_scan(host,ports)
openports = scan.process()
if len(openports) > 1:
print "\t\033[91m Detected open ports: " + ','.join(str(e) for e in openports) + "\033[1;32;40m"
print(("\t\033[91m Detected open ports: " + ','.join(str(e) for e in openports) + "\033[1;32;40m"))
takeover_check = 'True'
if takeover_check == 'True':
if len(openports) > 0:
search_take = takeover.take_over(domain)
search_take.process()
except Exception, e:
print e
except Exception as e:
print(e)
#DNS reverse lookup#################################################
dnsrev = []
if dnslookup == True:
print "\n[+] Starting active queries:"
print("\n[+] Starting active queries:")
analyzed_ranges = []
for x in host_ip:
print x
print(x)
ip = x.split(":")[0]
range = ip.split(".")
range[3] = "0/24"
range = string.join(range, '.')
if not analyzed_ranges.count(range):
print "\033[94m[-]Performing reverse lookup in : " + range + "\033[1;33;40m"
print(("\033[94m[-]Performing reverse lookup in : " + range + "\033[1;33;40m"))
a = dnssearch.dns_reverse(range, True)
a.list()
res = a.process()
@ -476,29 +474,29 @@ def start(argv):
dnsrev.append(x)
if x not in full:
full.append(x)
print "Hosts found after reverse lookup (in target domain):"
print "---------------------------------"
print("Hosts found after reverse lookup (in target domain):")
print("---------------------------------")
for xh in dnsrev:
print xh
print(xh)
#DNS TLD expansion###################################################
dnstldres = []
if dnstld == True:
print "[-] Starting DNS TLD expansion:"
print("[-] Starting DNS TLD expansion:")
a = dnssearch.dns_tld(word, dnsserver, verbose=True)
res = a.process()
print "\n[+] Hosts found after DNS TLD expansion:"
print "------------------------------------------"
print("\n[+] Hosts found after DNS TLD expansion:")
print("------------------------------------------")
for y in res:
print y
print(y)
dnstldres.append(y)
if y not in full:
full.append(y)
#Virtual hosts search###############################################
if virtual == "basic":
print "\n[+] Virtual hosts:"
print "------------------"
print("\n[+] Virtual hosts:")
print("------------------")
for l in host_ip:
search = bingsearch.search_bing(l, limit, start)
search.process_vhost()
@ -507,7 +505,7 @@ def start(argv):
x = re.sub(r'[[\<\/?]*[\w]*>]*','',x)
x = re.sub('<','',x)
x = re.sub('>','',x)
print l + "\t" + x
print((l + "\t" + x))
vhost.append(l + ":" + x)
full.append(l + ":" + x)
vhost=sorted(set(vhost))
@ -522,7 +520,7 @@ def start(argv):
try:
ip = x.split(":")[1]
if not shodanvisited.count(ip):
print "\tSearching for: " + ip
print(("\tSearching for: " + ip))
a = shodansearch.search_shodan(ip)
shodanvisited.append(ip)
results = a.run()
@ -532,12 +530,12 @@ def start(argv):
res['info'] = ''
shodanres.append(
x + "SAPO" + str(res['info']) + "SAPO" + str(res['data']))
except:
except Exception:
pass
print "\n [+] Shodan results:"
print "------------------"
print("\n [+] Shodan results:")
print("------------------")
for x in shodanres:
print x.split("SAPO")[0] + ":" + x.split("SAPO")[1]
print((x.split("SAPO")[0] + ":" + x.split("SAPO")[1]))
else:
pass
@ -552,15 +550,15 @@ def start(argv):
search.process(google_dorking)
emails = search.get_emails()
hosts = search.get_hostnames()
print emails
print hosts
print(emails)
print(hosts)
else:
pass
#Reporting#######################################################
if filename != "":
try:
print "[+] Saving files..."
print("[+] Saving files...")
html = htmlExport.htmlExport(
all_emails,
full,
@ -573,8 +571,8 @@ def start(argv):
dnstldres)
save = html.writehtml()
except Exception as e:
print e
print "Error creating the file"
print(e)
print("Error creating the file")
try:
filename = filename.split(".")[0] + ".xml"
file = open(filename, 'w')
@ -629,15 +627,15 @@ def start(argv):
file.write('</theHarvester>')
file.flush()
file.close()
print "Files saved!"
print("Files saved!")
except Exception as er:
print "Error saving XML file: " + er
print(("Error saving XML file: " + er))
sys.exit()
if __name__ == "__main__":
try:
start(sys.argv[1:])
except KeyboardInterrupt:
print "Search interrupted by user.."
print("Search interrupted by user..")
except:
sys.exit()