Removed unused directory.

This commit is contained in:
NotoriousRebel 2019-08-08 16:35:22 -04:00
parent 0ee2241cdc
commit 8df15e3f84
40 changed files with 0 additions and 5203 deletions

View file

@ -1,363 +0,0 @@
"""
$Id: Base.py,v 1.12.2.4 2007/05/22 20:28:31 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
Base functionality. Request and Response classes, that sort of thing.
"""
import socket
import time
from discovery.DNS import Type, Class, Opcode
import asyncore
class DNSError(Exception):
pass
defaults = {'protocol': 'udp', 'port': 53, 'opcode': Opcode.QUERY,
'qtype': Type.A, 'rd': 1, 'timing': 1, 'timeout': 30}
defaults['server'] = []
def ParseResolvConf(resolv_path):
global defaults
try:
lines = open(resolv_path).readlines()
except:
print('error in path' + resolv_path)
for line in lines:
line = line.strip()
if not line or line[0] == ';' or line[0] == '#':
continue
fields = line.split()
if len(fields) < 2:
continue
if fields[0] == 'domain' and len(fields) > 1:
defaults['domain'] = fields[1]
if fields[0] == 'search':
pass
if fields[0] == 'options':
pass
if fields[0] == 'sortlist':
pass
if fields[0] == 'nameserver':
defaults['server'].append(fields[1])
def DiscoverNameServers():
import sys
if sys.platform in ('win32', 'nt'):
import win32dns
defaults['server'] = win32dns.RegistryResolve()
else:
return ParseResolvConf()
class DnsRequest:
""" high level Request object """
def __init__(self, *name, **args):
self.donefunc = None
#fix maybe?
self.asyn = False
#self.async = None #TODO FIX async is a keyword
self.defaults = {}
self.argparse(name, args)
self.defaults = self.args
def argparse(self, name, args):
if not name and 'name' in self.defaults:
args['name'] = self.defaults['name']
if isinstance(name, str):
args['name'] = name
else:
if len(name) == 1:
if name[0]:
args['name'] = name[0]
for i in defaults.keys():
if i not in args:
if i in self.defaults:
args[i] = self.defaults[i]
else:
args[i] = defaults[i]
if isinstance(args['server'], str):
args['server'] = [args['server']]
self.args = args
def socketInit(self, a, b):
self.s = socket.socket(a, b)
def processUDPReply(self):
import time
import select
if self.args['timeout'] > 0:
r, w, e = select.select([self.s], [], [], self.args['timeout'])
if not len(r):
raise DNSError('Timeout')
self.reply = self.s.recv(1024)
self.time_finish = time.time()
self.args['server'] = self.ns
return self.processReply()
def processTCPReply(self):
import time
from discovery.DNS import Lib
self.f = self.s.makefile('r')
header = self.f.read(2)
if len(header) < 2:
raise DNSError('EOF')
count = Lib.unpack16bit(header)
self.reply = self.f.read(count)
if len(self.reply) != count:
raise DNSError('incomplete reply')
self.time_finish = time.time()
self.args['server'] = self.ns
return self.processReply()
def processReply(self):
from discovery.DNS import Lib
self.args['elapsed'] = (self.time_finish - self.time_start) * 1000
u = Lib.Munpacker(self.reply)
r = Lib.DnsResult(u, self.args)
r.args = self.args
# self.args=None # mark this DnsRequest object as used.
return r
#### TODO TODO TODO ####
# if protocol == 'tcp' and qtype == Type.AXFR:
# while 1:
# header = f.read(2)
# if len(header) < 2:
# print '========== EOF =========='
# break
# count = Lib.unpack16bit(header)
# if not count:
# print '========== ZERO COUNT =========='
# break
# print '========== NEXT =========='
# reply = f.read(count)
# if len(reply) != count:
# print '*** Incomplete reply ***'
# break
# u = Lib.Munpacker(reply)
# Lib.dumpM(u)
def conn(self):
self.s.connect((str(self.ns), self.port))
def req(self, *name, **args):
" needs a refactoring "
import time
from discovery.DNS import Lib
self.argparse(name, args)
# if not self.args:
# raise DNSError,'reinitialize request before reuse'
protocol = self.args['protocol']
self.port = self.args['port']
opcode = self.args['opcode']
rd = self.args['rd']
server = self.args['server']
if isinstance(self.args['qtype'], str):
try:
qtype = getattr(Type, str.upper(self.args['qtype']))
except AttributeError:
raise DNSError('unknown query type')
else:
qtype = self.args['qtype']
if 'name' not in self.args:
print(self.args)
raise DNSError('nothing to lookup')
qname = self.args['name']
if qtype == Type.AXFR:
print('Query type AXFR, protocol forced to TCP')
protocol = 'tcp'
# print 'QTYPE %d(%s)' % (qtype, Type.typestr(qtype))
m = Lib.Mpacker()
# jesus. keywords and default args would be good. TODO.
m.addHeader(0,
0, opcode, 0, 0, rd, 0, 0, 0,
1, 0, 0, 0)
m.addQuestion(qname, qtype, Class.IN)
self.request = m.getbuf()
try:
if protocol == 'udp':
self.sendUDPRequest(server)
else:
self.sendTCPRequest(server)
except socket.error as reason:
raise DNSError(reason)
if self.asyn:
return None
else:
return self.response
def sendUDPRequest(self, server):
"refactor me"
self.response = None
self.socketInit(socket.AF_INET, socket.SOCK_DGRAM)
for self.ns in server:
try:
# TODO. Handle timeouts &c correctly (RFC)
#self.s.connect((self.ns, self.port))
self.conn()
self.time_start = time.time()
if not self.asyn:
self.s.send(self.request)
self.response = self.processUDPReply()
# except socket.error:
except Exception as e:
print(e)
continue
break
if not self.response:
if not self.asyn:
raise DNSError('no working nameservers found')
def sendTCPRequest(self, server):
" do the work of sending a TCP request "
import time
import discovery.DNS.Lib as Lib
self.response = None
for self.ns in server:
try:
self.socketInit(socket.AF_INET, socket.SOCK_STREAM)
self.time_start = time.time()
self.conn()
self.s.send(Lib.pack16bit(len(self.request)) + self.request)
self.s.shutdown(1)
self.response = self.processTCPReply()
except socket.error:
continue
break
if not self.response:
raise DNSError('no working nameservers found')
# class DnsAsyncRequest(DnsRequest):
class DnsAsyncRequest(DnsRequest, asyncore.dispatcher_with_send):
" an asynchronous request object. out of date, probably broken "
def __init__(self, *name, **args):
DnsRequest.__init__(self, *name, **args)
# XXX todo
if 'done' in args and args['done']:
self.donefunc = args['done']
else:
self.donefunc = self.showResult
# self.realinit(name,args) # XXX todo
self.asyn = True
def conn(self):
import time
self.connect((self.ns, self.port))
self.time_start = time.time()
if 'start' in self.args and self.args['start']:
asyncore.dispatcher.go(self)
def socketInit(self, a, b):
self.create_socket(a, b)
asyncore.dispatcher.__init__(self)
self.s = self
def handle_read(self):
if self.args['protocol'] == 'udp':
self.response = self.processUDPReply()
if self.donefunc:
self.donefunc(*(self,))
def handle_connect(self):
self.send(self.request)
def handle_write(self):
pass
def showResult(self, *s):
self.response.show()
#
# $Log: Base.py,v $
# Revision 1.12.2.4 2007/05/22 20:28:31 customdesigned
# Missing import Lib
#
# Revision 1.12.2.3 2007/05/22 20:25:52 customdesigned
# Use socket.inetntoa,inetaton.
#
# Revision 1.12.2.2 2007/05/22 20:21:46 customdesigned
# Trap socket error
#
# Revision 1.12.2.1 2007/05/22 20:19:35 customdesigned
# Skip bogus but non-empty lines in resolv.conf
#
# Revision 1.12 2002/04/23 06:04:27 anthonybaxter
# attempt to refactor the DNSRequest.req method a little. after doing a bit
# of this, I've decided to bite the bullet and just rewrite the puppy. will
# be checkin in some design notes, then unit tests and then writing the sod.
#
# Revision 1.11 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.10 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.9 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.8 2002/03/19 10:30:33 anthonybaxter
# first round of major bits and pieces. The major stuff here (summarised
# from my local, off-net CVS server :/ this will cause some oddities with
# the
#
# tests/testPackers.py:
# a large slab of unit tests for the packer and unpacker code in DNS.Lib
#
# DNS/Lib.py:
# placeholder for addSRV.
# added 'klass' to addA, make it the same as the other A* records.
# made addTXT check for being passed a string, turn it into a length 1 list.
# explicitly check for adding a string of length > 255 (prohibited).
# a bunch of cleanups from a first pass with pychecker
# new code for pack/unpack. the bitwise stuff uses struct, for a smallish
# (disappointly small, actually) improvement, while addr2bin is much
# much faster now.
#
# DNS/Base.py:
# added DiscoverNameServers. This automatically does the right thing
# on unix/ win32. No idea how MacOS handles this. *sigh*
# Incompatible change: Don't use ParseResolvConf on non-unix, use this
# function, instead!
# a bunch of cleanups from a first pass with pychecker
#
# Revision 1.5 2001/08/09 09:22:28 anthonybaxter
# added what I hope is win32 resolver lookup support. I'll need to try
# and figure out how to get the CVS checkout onto my windows machine to
# make sure it works (wow, doing something other than games on the
# windows machine :)
#
# Code from Wolfgang.Strobl@gmd.de
# win32dns.py from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260
#
# Really, ParseResolvConf() should be renamed "FindNameServers" or
# some such.
#
# Revision 1.4 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.3 2001/07/19 07:20:12 anthony
# Handle blank resolv.conf lines.
# Patch from Bastian Kleineidam
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#

View file

@ -1,58 +0,0 @@
"""
$Id: Class.py,v 1.6 2002/04/23 12:52:19 anthonybaxter Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
CLASS values (section 3.2.4)
"""
IN = 1 # the Internet
CS = 2 # the CSNET class (Obsolete - used only for examples in
# some obsolete RFCs)
CH = 3 # the CHAOS class. When someone shows me python running on
# a Symbolics Lisp machine, I'll look at implementing this.
HS = 4 # Hesiod [Dyer 87]
# QCLASS values (section 3.2.5)
ANY = 255 # any class
# Construct reverse mapping dictionary
_names = dir()
classmap = {}
for _name in _names:
if _name[0] != '_':
classmap[eval(_name)] = _name
def classstr(klass):
if klass in classmap:
return classmap[klass]
else:
return repr(klass)
#
# $Log: Class.py,v $
# Revision 1.6 2002/04/23 12:52:19 anthonybaxter
# cleanup whitespace.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#

View file

@ -1,764 +0,0 @@
# -*- encoding: utf-8 -*-
"""
$Id: Lib.py,v 1.11.2.3 2007/05/22 20:27:40 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
Library code. Largely this is packers and unpackers for various types.
"""
#
#
# See RFC 1035:
# ------------------------------------------------------------------------
# Network Working Group P. Mockapetris
# Request for Comments: 1035 ISI
# November 1987
# Obsoletes: RFCs 882, 883, 973
#
# DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION
# ------------------------------------------------------------------------
from discovery.DNS import Type, Class, Opcode, Status
from discovery.DNS.Base import DNSError
class UnpackError(DNSError):
pass
class PackError(DNSError):
pass
# Low-level 16 and 32 bit integer packing and unpacking
from struct import pack as struct_pack
from struct import unpack as struct_unpack
from socket import inet_ntoa, inet_aton
def pack16bit(n):
return struct_pack('!H', n)
def pack32bit(n):
return struct_pack('!L', n)
def unpack16bit(s):
return struct_unpack('!H', s)[0]
def unpack32bit(s):
return struct_unpack('!L', s)[0]
def addr2bin(addr):
return struct_unpack('!l', inet_aton(addr))[0]
def bin2addr(n):
return inet_ntoa(struct_pack('!L', n))
# Packing class
class Packer:
" packer base class. supports basic byte/16bit/32bit/addr/string/name "
def __init__(self):
self.buf = b''
self.index = {}
def getbuf(self):
return self.buf
def addbyte(self, c):
if len(c) != 1:
raise TypeError('one character expected')
self.buf = self.buf + c
def addbytes(self, bytes):
self.buf = self.buf + bytes
def add16bit(self, n):
self.buf = self.buf + pack16bit(n)
def add32bit(self, n):
self.buf = self.buf + pack32bit(n)
def addaddr(self, addr):
n = addr2bin(addr)
self.buf = self.buf + pack32bit(n)
def addstring(self, s):
if len(s) > 255:
raise ValueError("Can't encode string of length " +
"%s (> 255)" % (len(s)))
self.addbyte(chr(len(s)))
self.addbytes(s)
def addname(self, name):
# Domain name packing (section 4.1.4)
# Add a domain name to the buffer, possibly using pointers.
# The case of the first occurrence of a name is preserved.
# Redundant dots are ignored.
list = []
for label in name.split('.'):
if label:
if len(label) > 63:
raise PackError('label too long')
list.append(label)
keys = []
s = ''
for i in range(len(list)):
key = str.upper((s.join(list[i:])))
keys.append(key)
if key in self.index:
pointer = self.index[key]
break
else:
i = len(list)
pointer = None
# Do it into temporaries first so exceptions don't
# mess up self.index and self.buf
buf = ''
offset = len(self.buf)
index = []
for j in range(i):
label = list[j]
n = len(label)
if offset + len(buf) < 0x3FFF:
index.append((keys[j], offset + len(buf)))
else:
print('DNS.Lib.Packer.addname:',)
print('warning: pointer too big')
buf = buf + (chr(n) + label)
if pointer:
buf = buf + pack16bit(pointer | 0xC000)
else:
buf = buf + '\0'
self.buf = self.buf + bytes(buf, encoding='utf-8')
for key, value in index:
self.index[key] = value
def dump(self):
keys = sorted(self.index.keys())
print('-' * 40)
for key in keys:
print('%20s %3d' % (key, self.index[key]))
print('-' * 40)
space = 1
for i in range(0, len(self.buf) + 1, 2):
if self.buf[i:i + 2] == '**':
if not space:
print()
space = 1
continue
space = 0
print('%4d' % i,)
for c in self.buf[i:i + 2]:
if ' ' < c < '\177':
print(' %c' % c,)
else:
print('%2d' % ord(c),)
print()
print('-' * 40)
# Unpacking class
class Unpacker:
def __init__(self, buf):
self.buf = buf
self.offset = 0
def getbyte(self):
if self.offset >= len(self.buf):
raise UnpackError("Ran off end of data")
c = self.buf[self.offset]
self.offset = self.offset + 1
return c
def getbytes(self, n):
s = self.buf[self.offset: self.offset + n]
if len(s) != n:
raise UnpackError('not enough data left')
self.offset = self.offset + n
return s
def get16bit(self):
return unpack16bit(self.getbytes(2))
def get32bit(self):
return unpack32bit(self.getbytes(4))
def getaddr(self):
return bin2addr(self.get32bit())
def getstring(self):
return self.getbytes(ord(self.getbyte()))
def getname(self):
# Domain name unpacking (section 4.1.4)
c = self.getbyte()
i = ord(chr(c))
if i & 0xC0 == 0xC0:
d = self.getbyte()
j = ord(chr(d))
pointer = ((i << 8) | j) & ~0xC000
save_offset = self.offset
try:
self.offset = pointer
domain = self.getname()
finally:
self.offset = save_offset
return domain
if i == 0:
return ''
domain = self.getbytes(i).decode('UTF-8')
remains = self.getname()
if not remains:
return domain
else:
return domain + '.' + remains
# Test program for packin/unpacking (section 4.1.4)
def testpacker():
N = 2500
R = range(N)
import timing
# See section 4.1.4 of RFC 1035
timing.start()
for i in R:
p = Packer()
p.addaddr('192.168.0.1')
p.addbytes('*' * 20)
p.addname('f.ISI.ARPA')
p.addbytes('*' * 8)
p.addname('Foo.F.isi.arpa')
p.addbytes('*' * 18)
p.addname('arpa')
p.addbytes('*' * 26)
p.addname('')
timing.finish()
print(timing.milli(), "ms total for packing")
print(round(timing.milli() / i, 4), 'ms per packing')
# p.dump()
u = Unpacker(p.buf)
u.getaddr()
u.getbytes(20)
u.getname()
u.getbytes(8)
u.getname()
u.getbytes(18)
u.getname()
u.getbytes(26)
u.getname()
timing.start()
for i in R:
u = Unpacker(p.buf)
res = (u.getaddr(),
u.getbytes(20),
u.getname(),
u.getbytes(8),
u.getname(),
u.getbytes(18),
u.getname(),
u.getbytes(26),
u.getname())
timing.finish()
print(timing.milli(), "ms total for unpacking")
print(round(timing.milli() / i, 4), 'ms per unpacking')
# for item in res: print item
# Pack/unpack RR toplevel format (section 3.2.1)
class RRpacker(Packer):
def __init__(self):
Packer.__init__(self)
self.rdstart = None
def addRRheader(self, name, type, klass, ttl, *rest):
self.addname(name)
self.add16bit(type)
self.add16bit(klass)
self.add32bit(ttl)
if rest:
if rest[1:]:
raise TypeError('too many args')
rdlength = rest[0]
else:
rdlength = 0
self.add16bit(rdlength)
self.rdstart = len(self.buf)
def patchrdlength(self):
rdlength = unpack16bit(self.buf[self.rdstart - 2:self.rdstart])
if rdlength == len(self.buf) - self.rdstart:
return
rdata = self.buf[self.rdstart:]
save_buf = self.buf
ok = 0
try:
self.buf = self.buf[:self.rdstart - 2]
self.add16bit(len(rdata))
self.buf = self.buf + rdata
ok = 1
finally:
if not ok:
self.buf = save_buf
def endRR(self):
if self.rdstart is not None:
self.patchrdlength()
self.rdstart = None
def getbuf(self):
if self.rdstart is not None:
self.patchrdlength()
return Packer.getbuf(self)
# Standard RRs (section 3.3)
def addCNAME(self, name, klass, ttl, cname):
self.addRRheader(name, Type.CNAME, klass, ttl)
self.addname(cname)
self.endRR()
def addHINFO(self, name, klass, ttl, cpu, os):
self.addRRheader(name, Type.HINFO, klass, ttl)
self.addstring(cpu)
self.addstring(os)
self.endRR()
def addMX(self, name, klass, ttl, preference, exchange):
self.addRRheader(name, Type.MX, klass, ttl)
self.add16bit(preference)
self.addname(exchange)
self.endRR()
def addNS(self, name, klass, ttl, nsdname):
self.addRRheader(name, Type.NS, klass, ttl)
self.addname(nsdname)
self.endRR()
def addPTR(self, name, klass, ttl, ptrdname):
self.addRRheader(name, Type.PTR, klass, ttl)
self.addname(ptrdname)
self.endRR()
def addSOA(self, name, klass, ttl,
mname, rname, serial, refresh, retry, expire, minimum):
self.addRRheader(name, Type.SOA, klass, ttl)
self.addname(mname)
self.addname(rname)
self.add32bit(serial)
self.add32bit(refresh)
self.add32bit(retry)
self.add32bit(expire)
self.add32bit(minimum)
self.endRR()
def addTXT(self, name, klass, ttl, list):
self.addRRheader(name, Type.TXT, klass, ttl)
if isinstance(list, str):
list = [list]
for txtdata in list:
self.addstring(txtdata)
self.endRR()
# Internet specific RRs (section 3.4) -- class = IN
def addA(self, name, klass, ttl, address):
self.addRRheader(name, Type.A, klass, ttl)
self.addaddr(address)
self.endRR()
def addWKS(self, name, ttl, address, protocol, bitmap):
self.addRRheader(name, Type.WKS, Class.IN, ttl)
self.addaddr(address)
self.addbyte(chr(protocol))
self.addbytes(bitmap)
self.endRR()
def addSRV(self):
raise NotImplementedError
def prettyTime(seconds):
if seconds < 60:
return seconds, "%d seconds" % (seconds)
if seconds < 3600:
return seconds, "%d minutes" % (seconds / 60)
if seconds < 86400:
return seconds, "%d hours" % (seconds / 3600)
if seconds < 604800:
return seconds, "%d days" % (seconds / 86400)
else:
return seconds, "%d weeks" % (seconds / 604800)
class RRunpacker(Unpacker):
def __init__(self, buf):
Unpacker.__init__(self, buf)
self.rdend = None
def getRRheader(self):
name = self.getname()
rrtype = self.get16bit()
klass = self.get16bit()
ttl = self.get32bit()
rdlength = self.get16bit()
self.rdend = self.offset + rdlength
return (name, rrtype, klass, ttl, rdlength)
def endRR(self):
if self.offset != self.rdend:
raise UnpackError('end of RR not reached')
def getCNAMEdata(self):
return self.getname()
def getHINFOdata(self):
return self.getstring(), self.getstring()
def getMXdata(self):
return self.get16bit(), self.getname()
def getNSdata(self):
return self.getname()
def getPTRdata(self):
return self.getname()
def getSOAdata(self):
return self.getname(), \
self.getname(), \
('serial',) + (self.get32bit(),), \
('refresh ',) + prettyTime(self.get32bit()), \
('retry',) + prettyTime(self.get32bit()), \
('expire',) + prettyTime(self.get32bit()), \
('minimum',) + prettyTime(self.get32bit())
def getTXTdata(self):
list = []
while self.offset != self.rdend:
list.append(self.getstring())
return list
def getAdata(self):
return self.getaddr()
def getWKSdata(self):
address = self.getaddr()
protocol = ord(self.getbyte())
bitmap = self.getbytes(self.rdend - self.offset)
return address, protocol, bitmap
def getSRVdata(self):
"""
_Service._Proto.Name TTL Class SRV Priority Weight Port Target
"""
priority = self.get16bit()
weight = self.get16bit()
port = self.get16bit()
target = self.getname()
# print '***priority, weight, port, target', priority, weight, port,
# target
return priority, weight, port, target
# Pack/unpack Message Header (section 4.1)
class Hpacker(Packer):
def addHeader(self, id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount):
self.add16bit(id)
self.add16bit((qr & 1) << 15 | (opcode & 0xF) << 11 | (aa & 1) << 10
| (tc & 1) << 9 | (rd & 1) << 8 | (ra & 1) << 7
| (z & 7) << 4 | (rcode & 0xF))
self.add16bit(qdcount)
self.add16bit(ancount)
self.add16bit(nscount)
self.add16bit(arcount)
class Hunpacker(Unpacker):
def getHeader(self):
id = self.get16bit()
flags = self.get16bit()
qr, opcode, aa, tc, rd, ra, z, rcode = (
(flags >> 15) & 1,
(flags >> 11) & 0xF,
(flags >> 10) & 1,
(flags >> 9) & 1,
(flags >> 8) & 1,
(flags >> 7) & 1,
(flags >> 4) & 7,
(flags >> 0) & 0xF)
qdcount = self.get16bit()
ancount = self.get16bit()
nscount = self.get16bit()
arcount = self.get16bit()
return (id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount)
# Pack/unpack Question (section 4.1.2)
class Qpacker(Packer):
def addQuestion(self, qname, qtype, qclass):
self.addname(qname)
self.add16bit(qtype)
self.add16bit(qclass)
class Qunpacker(Unpacker):
def getQuestion(self):
return self.getname(), self.get16bit(), self.get16bit()
# Pack/unpack Message(section 4)
# NB the order of the base classes is important for __init__()!
class Mpacker(RRpacker, Qpacker, Hpacker):
pass
class Munpacker(RRunpacker, Qunpacker, Hunpacker):
pass
# Routines to print an unpacker to stdout, for debugging.
# These affect the unpacker's current position!
def dumpM(u):
print('HEADER:',)
(id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount) = u.getHeader()
print('id=%d,' % id,)
print('qr=%d, opcode=%d, aa=%d, tc=%d, rd=%d, ra=%d, z=%d, rcode=%d,' \
% (qr, opcode, aa, tc, rd, ra, z, rcode))
if tc:
print('*** response truncated! ***')
if rcode:
print('*** nonzero error code! (%d) ***' % rcode)
print(' qdcount=%d, ancount=%d, nscount=%d, arcount=%d' \
% (qdcount, ancount, nscount, arcount))
for i in range(qdcount):
print('QUESTION %d:' % i,)
dumpQ(u)
for i in range(ancount):
print('ANSWER %d:' % i,)
dumpRR(u)
for i in range(nscount):
print('AUTHORITY RECORD %d:' % i,)
dumpRR(u)
for i in range(arcount):
print('ADDITIONAL RECORD %d:' % i,)
dumpRR(u)
class DnsResult:
def __init__(self, u, args):
self.header = {}
self.questions = []
self.answers = []
self.authority = []
self.additional = []
self.args = args
self.storeM(u)
def show(self):
import time
print('; <<>> PDG.py 1.0 <<>> %s %s' % (self.args['name'],
self.args['qtype']))
opt = ""
if self.args['rd']:
opt = opt + 'recurs '
h = self.header
print(';; options: ' + opt)
print(';; got answer:')
print(';; ->>HEADER<<- opcode %s, status %s, id %d' % (
h['opcode'], h['status'], h['id']))
flags = filter(lambda x, h=h: h[x], ('qr', 'aa', 'rd', 'ra', 'tc'))
print(';; flags: %s; Ques: %d, Ans: %d, Auth: %d, Addit: %d' % (
''.join(map(str,flags)), h['qdcount'], h['ancount'], h['nscount'],
h['arcount']))
print(';; QUESTIONS:')
for q in self.questions:
print(';; %s, type = %s, class = %s' % (q['qname'], q['qtypestr'],
q['qclassstr']))
print()
print(';; ANSWERS:')
for a in self.answers:
print('%-20s %-6s %-6s %s' % (a['name'], repr(a['ttl']), a['typename'],
a['data']))
print()
print(';; AUTHORITY RECORDS:')
for a in self.authority:
print('%-20s %-6s %-6s %s' % (a['name'], repr(a['ttl']), a['typename'],
a['data']))
print()
print(';; ADDITIONAL RECORDS:')
for a in self.additional:
print('%-20s %-6s %-6s %s' % (a['name'], repr(a['ttl']), a['typename'],
a['data']))
print()
if 'elapsed' in self.args:
print(';; Total query time: %d msec' % self.args['elapsed'])
print(';; To SERVER: %s' % (self.args['server']))
print(';; WHEN: %s' % time.ctime(time.time()))
def storeM(self, u):
(self.header['id'], self.header['qr'], self.header['opcode'],
self.header['aa'], self.header['tc'], self.header['rd'],
self.header['ra'], self.header['z'], self.header['rcode'],
self.header['qdcount'], self.header['ancount'],
self.header['nscount'], self.header['arcount']) = u.getHeader()
self.header['opcodestr'] = Opcode.opcodestr(self.header['opcode'])
self.header['status'] = Status.statusstr(self.header['rcode'])
for i in range(self.header['qdcount']):
# print 'QUESTION %d:' % i,
self.questions.append(self.storeQ(u))
for i in range(self.header['ancount']):
# print 'ANSWER %d:' % i,
self.answers.append(self.storeRR(u))
for i in range(self.header['nscount']):
# print 'AUTHORITY RECORD %d:' % i,
self.authority.append(self.storeRR(u))
for i in range(self.header['arcount']):
# print 'ADDITIONAL RECORD %d:' % i,
self.additional.append(self.storeRR(u))
def storeQ(self, u):
q = {}
q['qname'], q['qtype'], q['qclass'] = u.getQuestion()
q['qtypestr'] = Type.typestr(q['qtype'])
q['qclassstr'] = Class.classstr(q['qclass'])
return q
def storeRR(self, u):
r = {}
r['name'], r['type'], r['class'], r[
'ttl'], r['rdlength'] = u.getRRheader()
r['typename'] = Type.typestr(r['type'])
r['classstr'] = Class.classstr(r['class'])
# print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
# % (name,
# type, typename,
# klass, Class.classstr(class),
# ttl)
mname = 'get%sdata' % r['typename']
if hasattr(u, mname):
r['data'] = getattr(u, mname)()
else:
r['data'] = u.getbytes(r['rdlength'])
return r
def dumpQ(u):
qname, qtype, qclass = u.getQuestion()
print('qname=%s, qtype=%d(%s), qclass=%d(%s)' \
% (qname,
qtype, Type.typestr(qtype),
qclass, Class.classstr(qclass)))
def dumpRR(u):
name, type, klass, ttl, rdlength = u.getRRheader()
typename = Type.typestr(type)
print('name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
% (name,
type, typename,
klass, Class.classstr(klass),
ttl))
mname = 'get%sdata' % typename
if hasattr(u, mname):
print(' formatted rdata:', getattr(u, mname)())
else:
print(' binary rdata:', u.getbytes(rdlength))
if __name__ == "__main__":
testpacker()
#
# $Log: Lib.py,v $
# Revision 1.11.2.3 2007/05/22 20:27:40 customdesigned
# Fix unpacker underflow.
#
# Revision 1.11.2.2 2007/05/22 20:25:53 customdesigned
# Use socket.inetntoa,inetaton.
#
# Revision 1.11.2.1 2007/05/22 20:20:39 customdesigned
# Mark utf-8 encoding
#
# Revision 1.11 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.10 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.9 2002/03/19 10:30:33 anthonybaxter
# first round of major bits and pieces. The major stuff here (summarised
# from my local, off-net CVS server :/ this will cause some oddities with
# the
#
# tests/testPackers.py:
# a large slab of unit tests for the packer and unpacker code in DNS.Lib
#
# DNS/Lib.py:
# placeholder for addSRV.
# added 'klass' to addA, make it the same as the other A* records.
# made addTXT check for being passed a string, turn it into a length 1 list.
# explicitly check for adding a string of length > 255 (prohibited).
# a bunch of cleanups from a first pass with pychecker
# new code for pack/unpack. the bitwise stuff uses struct, for a smallish
# (disappointly small, actually) improvement, while addr2bin is much
# much faster now.
#
# DNS/Base.py:
# added DiscoverNameServers. This automatically does the right thing
# on unix/ win32. No idea how MacOS handles this. *sigh*
# Incompatible change: Don't use ParseResolvConf on non-unix, use this
# function, instead!
# a bunch of cleanups from a first pass with pychecker
#
# Revision 1.8 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.7 2001/07/19 07:50:44 anthony
# Added SRV (RFC 2782) support. Code from Michael Str<74>der.
#
# Revision 1.6 2001/07/19 07:39:18 anthony
# 'type' -> 'rrtype' in getRRheader(). Fix from Michael Str<74>der.
#
# Revision 1.5 2001/07/19 07:34:19 anthony
# oops. glitch in storeRR (fixed now).
# Reported by Bastian Kleineidam and by greg lin.
#
# Revision 1.4 2001/07/19 07:16:42 anthony
# Changed (opcode&0xF)<<11 to (opcode*0xF)<<11.
# Patch from Timothy J. Miller.
#
# Revision 1.3 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#

View file

@ -1,52 +0,0 @@
"""
$Id: Opcode.py,v 1.6 2002/04/23 10:51:43 anthonybaxter Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
Opcode values in message header. RFC 1035, 1996, 2136.
"""
QUERY = 0
IQUERY = 1
STATUS = 2
NOTIFY = 4
UPDATE = 5
# Construct reverse mapping dictionary
_names = dir()
opcodemap = {}
for _name in _names:
if _name[0] != '_':
opcodemap[eval(_name)] = _name
def opcodestr(opcode):
if opcode in opcodemap:
return opcodemap[opcode]
else:
return repr(opcode)
#
# $Log: Opcode.py,v $
# Revision 1.6 2002/04/23 10:51:43 anthonybaxter
# Added UPDATE, NOTIFY.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#

View file

@ -1,67 +0,0 @@
"""
$Id: Status.py,v 1.7 2002/04/23 12:52:19 anthonybaxter Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
Status values in message header
"""
NOERROR = 0 # No Error [RFC 1035]
FORMERR = 1 # Format Error [RFC 1035]
SERVFAIL = 2 # Server Failure [RFC 1035]
NXDOMAIN = 3 # Non-Existent Domain [RFC 1035]
NOTIMP = 4 # Not Implemented [RFC 1035]
REFUSED = 5 # Query Refused [RFC 1035]
YXDOMAIN = 6 # Name Exists when it should not [RFC 2136]
YXRRSET = 7 # RR Set Exists when it should not [RFC 2136]
NXRRSET = 8 # RR Set that should exist does not [RFC 2136]
NOTAUTH = 9 # Server Not Authoritative for zone [RFC 2136]
NOTZONE = 10 # Name not contained in zone [RFC 2136]
BADVERS = 16 # Bad OPT Version [RFC 2671]
BADSIG = 16 # TSIG Signature Failure [RFC 2845]
BADKEY = 17 # Key not recognized [RFC 2845]
BADTIME = 18 # Signature out of time window [RFC 2845]
BADMODE = 19 # Bad TKEY Mode [RFC 2930]
BADNAME = 20 # Duplicate key name [RFC 2930]
BADALG = 21 # Algorithm not supported [RFC 2930]
# Construct reverse mapping dictionary
_names = dir()
statusmap = {}
for _name in _names:
if _name[0] != '_':
statusmap[eval(_name)] = _name
def statusstr(status):
if status in statusmap:
return statusmap[status]
else:
return repr(status)
#
# $Log: Status.py,v $
# Revision 1.7 2002/04/23 12:52:19 anthonybaxter
# cleanup whitespace.
#
# Revision 1.6 2002/04/23 10:57:57 anthonybaxter
# update to complete the list of response codes.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#

View file

@ -1,80 +0,0 @@
# -*- encoding: utf-8 -*-
"""
$Id: Type.py,v 1.6.2.1 2007/05/22 20:20:39 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
TYPE values (section 3.2.2)
"""
A = 1 # a host address
NS = 2 # an authoritative name server
MD = 3 # a mail destination (Obsolete - use MX)
MF = 4 # a mail forwarder (Obsolete - use MX)
CNAME = 5 # the canonical name for an alias
SOA = 6 # marks the start of a zone of authority
MB = 7 # a mailbox domain name (EXPERIMENTAL)
MG = 8 # a mail group member (EXPERIMENTAL)
MR = 9 # a mail rename domain name (EXPERIMENTAL)
NULL = 10 # a null RR (EXPERIMENTAL)
WKS = 11 # a well known service description
PTR = 12 # a domain name pointer
HINFO = 13 # host information
MINFO = 14 # mailbox or mail list information
MX = 15 # mail exchange
TXT = 16 # text strings
AAAA = 28 # IPv6 AAAA records (RFC 1886)
SRV = 33 # DNS RR for specifying the location of services (RFC 2782)
# Additional TYPE values from host.c source
UNAME = 110
MP = 240
# QTYPE values (section 3.2.3)
AXFR = 252 # A request for a transfer of an entire zone
MAILB = 253 # A request for mailbox-related records (MB, MG or MR)
MAILA = 254 # A request for mail agent RRs (Obsolete - see MX)
ANY = 255 # A request for all records
# Construct reverse mapping dictionary
_names = dir()
typemap = {}
for _name in _names:
if _name[0] != '_':
typemap[eval(_name)] = _name
def typestr(type):
if type in typemap:
return typemap[type]
else:
return repr(type)
#
# $Log: Type.py,v $
# Revision 1.6.2.1 2007/05/22 20:20:39 customdesigned
# Mark utf-8 encoding
#
# Revision 1.6 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.5 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.4 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.3 2001/07/19 07:38:28 anthony
# added type code for SRV. From Michael Ströder.
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#

View file

@ -1,60 +0,0 @@
# -*- encoding: utf-8 -*-
# $Id: __init__.py,v 1.8.2.2 2007/05/22 21:06:52 customdesigned Exp $
#
# This file is part of the pydns project.
# Homepage: http://pydns.sourceforge.net
#
# This code is covered by the standard Python License.
#
# __init__.py for DNS class.
__version__ = '2.3.1'
from discovery.DNS import Type
from discovery.DNS import Opcode
from discovery.DNS import Status
from discovery.DNS import Class
from discovery.DNS.Base import DnsRequest, DNSError
from discovery.DNS.Lib import DnsResult
from discovery.DNS.Base import *
from discovery.DNS.Lib import *
Error = DNSError
from discovery.DNS.lazy import *
Request = DnsRequest
Result = DnsResult
#
# $Log: __init__.py,v $
# Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned
# utf-8 in __init__.py
#
# Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned
# Release 2.3.1
#
# Revision 1.8 2002/05/06 06:17:49 anthonybaxter
# found that the old README file called itself release 2.2. So make
# this one 2.3...
#
# Revision 1.7 2002/05/06 06:16:15 anthonybaxter
# make some sort of reasonable version string. releasewards ho!
#
# Revision 1.6 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2001/11/26 17:57:51 stroeder
# Added __version__
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#

View file

@ -1,53 +0,0 @@
# $Id: lazy.py,v 1.5.2.1 2007/05/22 20:23:38 customdesigned Exp $
#
# This file is part of the pydns project.
# Homepage: http://pydns.sourceforge.net
#
# This code is covered by the standard Python License.
#
# routines for lazy people.
from discovery.DNS import Base
def revlookup(name):
"convenience routine for doing a reverse lookup of an address"
if Base.defaults['server'] == []:
Base.DiscoverNameServers()
a = name.split('.')
a.reverse()
s = '.'
b = s.join(a) + '.in-addr.arpa'
# this will only return one of any records returned.
return Base.DnsRequest(b, qtype='ptr').req().answers[0]['data']
def mxlookup(name):
"""
convenience routine for doing an MX lookup of a name. returns a
sorted list of (preference, mail exchanger) records
"""
if Base.defaults['server'] == []:
Base.DiscoverNameServers()
a = Base.DnsRequest(name, qtype='mx').req().answers
l = sorted(map(lambda x: x['data'], a))
return l
#
# $Log: lazy.py,v $
# Revision 1.5.2.1 2007/05/22 20:23:38 customdesigned
# Lazy call to DiscoverNameServers
#
# Revision 1.5 2002/05/06 06:14:38 anthonybaxter
# reformat, move import to top of file.
#
# Revision 1.4 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#

View file

@ -1,143 +0,0 @@
"""
$Id: win32dns.py,v 1.3.2.1 2007/05/22 20:26:49 customdesigned Exp $
Extract a list of TCP/IP name servers from the registry 0.1
0.1 Strobl 2001-07-19
Usage:
RegistryResolve() returns a list of ip numbers (dotted quads), by
scouring the registry for addresses of name servers
Tested on Windows NT4 Server SP6a, Windows 2000 Pro SP2 and
Whistler Pro (XP) Build 2462 and Windows ME
... all having a different registry layout wrt name servers :-/
Todo:
Program doesn't check whether an interface is up or down
(c) 2001 Copyright by Wolfgang Strobl ws@mystrobl.de,
License analog to the current Python license
"""
import winreg
def binipdisplay(s):
"convert a binary array of ip adresses to a python list"
if len(s) % 4 != 0:
raise EnvironmentError # well ...
ol = []
s = '.'
for i in range(int(len(s) / 4)):
s1 = s[:4]
s = s[4:]
ip = []
for j in s1:
ip.append(str(ord(j)))
ol.append(s.join(ip))
return ol
def stringdisplay(s):
'''convert "d.d.d.d,d.d.d.d" to ["d.d.d.d","d.d.d.d"].
also handle u'd.d.d.d d.d.d.d', as reporting on SF
'''
import re
return map(str, re.split("[ ,]", s))
def RegistryResolve():
nameservers = []
x = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
y = winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters")
except EnvironmentError: # so it isn't NT/2000/XP
# windows ME, perhaps?
try: # for Windows ME
y = winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\VxD\MSTCP")
nameserver, dummytype = winreg.QueryValueEx(y, 'NameServer')
if nameserver and not (nameserver in nameservers):
nameservers.extend(stringdisplay(nameserver))
except EnvironmentError:
pass
return nameservers # no idea
try:
nameserver = winreg.QueryValueEx(y, "DhcpNameServer")[0].split()
except:
nameserver = winreg.QueryValueEx(y, "NameServer")[0].split()
if nameserver:
nameservers = nameserver
nameserver = winreg.QueryValueEx(y, "NameServer")[0]
winreg.CloseKey(y)
try: # for win2000
y = winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\DNSRegisteredAdapters")
for i in range(1000):
try:
n = winreg.EnumKey(y, i)
z = winreg.OpenKey(y, n)
dnscount, dnscounttype = winreg.QueryValueEx(z,
'DNSServerAddressCount')
dnsvalues, dnsvaluestype = winreg.QueryValueEx(z,
'DNSServerAddresses')
nameservers.extend(binipdisplay(dnsvalues))
winreg.CloseKey(z)
except EnvironmentError:
break
winreg.CloseKey(y)
except EnvironmentError:
pass
#
try: # for whistler
y = winreg.OpenKey(x,
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces")
for i in range(1000):
try:
n = winreg.EnumKey(y, i)
z = winreg.OpenKey(y, n)
try:
nameserver, dummytype = winreg.QueryValueEx(
z, 'NameServer')
if nameserver and not (nameserver in nameservers):
nameservers.extend(stringdisplay(nameserver))
except EnvironmentError:
pass
winreg.CloseKey(z)
except EnvironmentError:
break
winreg.CloseKey(y)
except EnvironmentError:
# print "Key Interfaces not found, just do nothing."
pass
#
winreg.CloseKey(x)
return nameservers
if __name__ == "__main__":
print('Name servers:', RegistryResolve())
# $Log: win32dns.py,v $
# Revision 1.3.2.1 2007/05/22 20:26:49 customdesigned
# Fix win32 nameserver discovery.
#
# Revision 1.3 2002/05/06 06:15:31 anthonybaxter
# apparently some versions of windows return servers as unicode
# string with space sep, rather than strings with comma sep.
# *sigh*
#
# Revision 1.2 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs. yay.
#
# Revision 1.1 2001/08/09 09:22:28 anthonybaxter
# added what I hope is win32 resolver lookup support. I'll need to try
# and figure out how to get the CVS checkout onto my windows machine to
# make sure it works (wow, doing something other than games on the
# windows machine :)
#
# Code from Wolfgang.Strobl@gmd.de
# win32dns.py from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260
#
# Really, ParseResolvConf() should be renamed "FindNameServers" or some such.

File diff suppressed because it is too large Load diff

View file

@ -1,25 +0,0 @@
__all__ = ['baidusearch',
'bingsearch',
'censys',
'crtsh',
'cymon',
'dnssearch',
'dogpilesearch',
'duckduckgosearch',
'exaleadsearch',
'googlecertificates',
'googlesearch',
'huntersearch',
'intelxsearch',
'linkedinsearch',
'netcraft',
'port_scanner',
'securitytrailssearch',
'shodansearch',
'takeover',
'threatcrowd',
'trello',
'twittersearch',
'virustotal',
'yahoosearch',
'yandexsearch']

View file

@ -1,41 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import requests
import time
class SearchBaidu:
def __init__(self, word, limit):
self.word = word
self.total_results = ""
self.server = 'www.baidu.com'
self.hostname = 'www.baidu.com'
self.limit = limit
self.counter = 0
def do_search(self):
url = 'http://' + self.server + '/s?wd=%40' + self.word + '&pn=' + str(self.counter) + '&oq=' + self.word
url = f'https://{self.server}/s?wd=%40{self.word}&pn{self.counter}&oq={self.word}'
headers = {
'Host': self.hostname,
'User-agent': Core.get_user_agent()
}
h = requests.get(url=url, headers=headers)
time.sleep(getDelay())
self.total_results += h.text
def process(self):
while self.counter <= self.limit and self.counter <= 1000:
self.do_search()
print(f'\tSearching {self.counter} results.')
self.counter += 10
def get_emails(self):
rawres = myparser.Parser(self.total_results, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.Parser(self.total_results, self.word)
return rawres.hostnames()

View file

@ -1,89 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import requests
import time
class SearchBing:
def __init__(self, word, limit, start):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = 'www.bing.com'
self.apiserver = 'api.search.live.net'
self.hostname = 'www.bing.com'
self.quantity = '50'
self.limit = int(limit)
self.bingApi = Core.bing_key()
self.counter = start
def do_search(self):
headers = {
'Host': self.hostname,
'Cookie': 'SRCHHPGUSR=ADLT=DEMOTE&NRSLT=50',
'Accept-Language': 'en-us,en',
'User-agent': Core.get_user_agent()
}
h = requests.get(url=('https://' + self.server + '/search?q=%40"' + self.word + '"&count=50&first=' + str(self.counter)), headers=headers)
self.results = h.text
self.totalresults += self.results
def do_search_api(self):
url = 'https://api.cognitive.microsoft.com/bing/v7.0/search?'
params = {
'q': self.word,
'count': str(self.limit),
'offset': '0',
'mkt': 'en-us',
'safesearch': 'Off'
}
headers = {'User-Agent': Core.get_user_agent(), 'Ocp-Apim-Subscription-Key': self.bingApi}
h = requests.get(url=url, headers=headers, params=params)
self.results = h.text
self.totalresults += self.results
def do_search_vhost(self):
headers = {
'Host': self.hostname,
'Cookie': 'mkt=en-US;ui=en-US;SRCHHPGUSR=NEWWND=0&ADLT=DEMOTE&NRSLT=50',
'Accept-Language': 'en-us,en',
'User-agent': Core.get_user_agent()
}
url = 'http://' + self.server + '/search?q=ip:' + self.word + '&go=&count=50&FORM=QBHL&qs=n&first=' + str(self.counter)
h = requests.get(url=url, headers=headers)
self.results = h.text
self.totalresults += self.results
def get_emails(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def get_allhostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames_all()
def process(self, api):
if api == 'yes':
if self.bingApi is None:
raise MissingKey(True)
while self.counter < self.limit:
if api == 'yes':
self.do_search_api()
time.sleep(getDelay())
else:
self.do_search()
time.sleep(getDelay())
self.counter += 50
print(f'\tSearching {self.counter} results.')
def process_vhost(self):
# Maybe it is good to use other limit for this.
while self.counter < self.limit:
self.do_search_vhost()
self.counter += 50

View file

@ -1,130 +0,0 @@
from lib.core import *
from parsers import censysparser
import requests
class SearchCensys:
def __init__(self, word, limit):
self.word = word
self.urlhost = ""
self.urlcert = ""
self.page = ""
self.resultshosts = ""
self.resultcerts = ""
self.total_resultshosts = ""
self.total_resultscerts = ""
self.server = 'censys.io'
self.ips = []
self.hostnamesall = []
self.limit = limit
def do_searchhosturl(self):
try:
headers = {'user-agent': Core.get_user_agent(), 'Accept': '*/*', 'Referer': self.urlhost}
responsehost = requests.get(self.urlhost, headers=headers)
self.resultshosts = responsehost.text
self.total_resultshosts += self.resultshosts
except Exception as e:
print(f'Error occurred in the Censys module downloading pages from Censys - IP search: + {e}')
def do_searchcertificateurl(self):
try:
headers = {'user-agent': Core.get_user_agent(), 'Accept': '*/*', 'Referer': self.urlcert}
responsecert = requests.get(self.urlcert, headers=headers)
self.resultcerts = responsecert.text
self.total_resultscerts += self.resultcerts
except Exception as e:
print(f'Error occurred in the Censys module downloading pages from Censys - certificates search: {e}')
def process(self):
try:
self.urlhost = 'https://' + self.server + '/ipv4/_search?q=' + str(self.word) + '&page=1'
self.urlcert = 'https://' + self.server + '/certificates/_search?q=' + str(self.word) + '&page=1'
self.do_searchhosturl()
self.do_searchcertificateurl()
counter = 2
pages = censysparser.Parser(self)
totalpages = pages.search_totalpageshosts()
pagestosearch = int(self.limit / 25) # 25 results/page
if totalpages is None:
totalpages = 0
if totalpages <= pagestosearch:
while counter <= totalpages:
try:
self.page = str(counter)
self.urlhost = 'https://' + self.server + '/ipv4/_search?q=' + str(self.word) + '&page=' + str(
self.page)
print('\tSearching IP results page ' + self.page + '.')
self.do_searchhosturl()
counter += 1
except Exception as e:
print(f'Error occurred in the Censys module requesting the pages: {e}')
else:
while counter <= pagestosearch:
try:
self.page = str(counter)
self.urlhost = 'https://' + self.server + '/ipv4/_search?q=' + str(self.word) + '&page=' + str(
self.page)
print(f'\tSearching results page {self.page}.')
self.do_searchhosturl()
counter += 1
except Exception as e:
print(f'Error occurred in the Censys module requesting the pages: {e}')
counter = 2
totalpages = pages.search_totalpagescerts()
if totalpages is None:
totalpages = 0
if totalpages <= pagestosearch:
while counter <= totalpages:
try:
self.page = str(counter)
self.urlhost = 'https://' + self.server + '/certificates/_search?q=' + str(
self.word) + '&page=' + str(self.page)
print(f'\tSearching certificates results page {self.page}.')
self.do_searchcertificateurl()
counter += 1
except Exception as e:
print(f'Error occurred in the Censys module requesting the pages: {e}')
else:
while counter <= pagestosearch:
try:
self.page = str(counter)
self.urlhost = 'https://' + self.server + '/ipv4/_search?q=' + str(self.word) + '&page=' + str(
self.page)
print('\tSearching IP results page ' + self.page + '.')
self.do_searchhosturl()
counter += 1
except Exception as e:
print(f'Error occurred in the Censys module requesting the pages: {e}')
except Exception as e:
print(f'Error occurred in the main Censys module: {e}')
def get_hostnames(self):
try:
ips = self.get_ipaddresses()
headers = {'user-agent': Core.get_user_agent(), 'Accept': '*/*', 'Referer': self.urlcert}
response = requests.post('https://censys.io/ipv4/getdns', json={'ips': ips}, headers=headers)
responsejson = response.json()
domainsfromcensys = []
for key, jdata in responsejson.items():
if jdata is not None:
domainsfromcensys.append(jdata)
else:
pass
matchingdomains = [s for s in domainsfromcensys if str(self.word) in s]
self.hostnamesall.extend(matchingdomains)
hostnamesfromcerts = censysparser.Parser(self)
self.hostnamesall.extend(hostnamesfromcerts.search_hostnamesfromcerts())
return self.hostnamesall
except Exception as e:
print(f'Error occurred in the Censys module - hostname search: {e}')
def get_ipaddresses(self):
try:
ips = censysparser.Parser(self)
self.ips = ips.search_ipaddresses()
return self.ips
except Exception as e:
print(f'Error occurred in the main Censys module - IP address search: {e}')

View file

@ -1,50 +0,0 @@
import random
googleUA = 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36'
def filter(lst):
"""
Method that filters list
:param lst: list to be filtered
:return: new filtered list
"""
lst = set(lst) # Remove duplicates.
new_lst = []
for item in lst:
item = str(item)
if (item[0].isalpha() or item[0].isdigit()) and ('xxx' not in item) and ('..' not in item):
if '252f' in item:
item = item.replace('252f', '')
if '2F' in item:
item = item.replace('2F', '')
if '2f' in item:
item = item.replace('2f', '')
new_lst.append(item.lower())
return new_lst
def getDelay():
return random.randint(1, 3) - .5
def search(text):
# Helper function to check if Google has blocked traffic.
for line in text.strip().splitlines():
if 'This page appears when Google automatically detects requests coming from your computer network' in line:
print('\tGoogle is blocking your IP due to too many automated requests, wait or change your IP')
return True
return False
class MissingKey(Exception):
def __init__(self, identity_flag):
if identity_flag:
self.message = '\n\033[93m[!] Missing API key. \033[0m'
else:
self.message = '\n\033[93m[!] Missing CSE id. \033[0m'
def __str__(self):
return self.message

View file

@ -1,67 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import requests
import time
class search_crtsh:
def __init__(self, word):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = 'https://crt.sh/?q='
self.quantity = '100'
self.counter = 0
def do_search(self):
try:
urly = self.server + self.word
except Exception as e:
print(e)
try:
params = {'User-Agent': Core.get_user_agent()}
r = requests.get(urly, headers=params)
except Exception as e:
print(e)
links = self.get_info(r.text)
for link in links:
params = {'User-Agent': Core.get_user_agent()}
r = requests.get(link, headers=params)
time.sleep(getDelay())
self.results = r.text
self.totalresults += self.results
"""
Function goes through text from base request and parses it for links
@param text requests text
@return list of links
"""
def get_info(self, text):
lines = []
for line in str(text).splitlines():
line = line.strip()
if 'id=' in line:
lines.append(line)
links = []
for i in range(len(lines)):
if i % 2 == 0: # Way html is formatted only care about every other one.
current = lines[i]
current = current[43:] # 43 is not an arbitrary number, the id number always starts at 43rd index.
link = ''
for ch in current:
if ch == '"':
break
else:
link += ch
links.append(('https://crt.sh?id=' + str(link)))
return links
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def process(self):
self.do_search()
print('\tSearching results.')

View file

@ -1,38 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import cymonparser
import requests
import time
class search_cymon:
def __init__(self, word):
self.word = word
self.url = ""
self.results = ""
self.server = 'cymon.io'
def do_search(self):
try:
headers = {'user-agent': Core.get_user_agent(), 'Accept': '*/*', 'Referer': self.url}
response = requests.get(self.url, headers=headers)
time.sleep(getDelay())
self.results = response.content
except Exception as e:
print(e)
def process(self):
try:
self.url = 'https://' + self.server + '/domain/' + str(self.word)
print('\tSearching results.')
self.do_search()
except Exception as e:
print(f'Error occurred: {e}')
def get_ipaddresses(self):
try:
ips = cymonparser.Parser(self)
return ips.search_ipaddresses()
except Exception as e:
print(f'Error occurred: {e}')

View file

@ -1,43 +0,0 @@
from lib.core import *
from parsers import myparser
import requests
class search_dnsdumpster:
def __init__(self, word):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = 'dnsdumpster.com'
def do_search(self):
try:
agent = Core.get_user_agent()
headers = {'User-Agent': agent}
session = requests.session()
# create a session to properly verify
url = f'https://{self.server}'
request = session.get(url, headers=headers)
cookies = str(request.cookies)
# extract csrftoken from cookies
csrftoken = ''
for ch in cookies.split("=")[1]:
if ch == ' ':
break
csrftoken += ch
data = {
'Cookie': f'csfrtoken={csrftoken}', 'csrfmiddlewaretoken': csrftoken, 'targetip': self.word}
headers['Referer'] = url
post_req = session.post(url, headers=headers, data=data)
self.results = post_req.text
except Exception as e:
print(f'An exception occured: {e}')
self.totalresults += self.results
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def process(self):
self.do_search() # Only need to do it once.

View file

@ -1,233 +0,0 @@
import discovery.DNS as DNS
import discovery.IPy as IPy
import os
import sys
class dns_reverse():
def __init__(self, range, verbose=True):
self.range = range
self.iplist = ''
self.results = []
self.verbose = verbose
try:
DNS.ParseResolvConf('/etc/resolv.conf')
nameserver = DNS.defaults['server'][0]
except:
print('Error in DNS resolvers')
sys.exit()
def run(self, host):
a = host.split('.')
a.reverse()
s = '.'
b = s.join(a) + '.in-addr.arpa'
nameserver = DNS.defaults['server'][0]
if self.verbose:
ESC = chr(27)
sys.stdout.write(ESC + '[2K' + ESC + '[G')
sys.stdout.write('\r\t' + host)
sys.stdout.flush()
try:
name = DNS.Base.DnsRequest(b, qtype='ptr').req().answers[0]['data']
return host + ':' + name
except:
pass
def get_ip_list(self, ips):
"""Generates the list of IPs to reverse"""
try:
list = IPy.IP(ips)
except:
print('Error in IP format, check the input and try again. (Eg. 192.168.1.0/24)')
sys.exit()
name = []
for x in list:
name.append(str(x))
return name
def list(self):
self.iplist = self.get_ip_list(self.range)
return self.iplist
def process(self):
for x in self.iplist:
host = self.run(x)
if host is not None:
self.results.append(host)
return self.results
class dns_force():
def __init__(self, domain, dnsserver, verbose=False):
self.domain = domain
self.nameserver = dnsserver
self.file = 'wordlists/dns-big.txt'
self.subdo = False
self.verbose = verbose
try:
fileDir = os.path.dirname(os.path.realpath('__file__'))
res_path = os.path.join(fileDir,'lib/resolvers.txt')
with open(res_path) as f:
self.resolvers = f.read().splitlines()
except Exception:
print("Resolvers file can't be open.")
try:
f = open(self.file, 'r')
except:
print('Error opening DNS dictionary file.')
sys.exit()
self.list = f.readlines()
def getdns(self, domain):
DNS.ParseResolvConf('/etc/resolv.conf')
dom = domain
if self.subdo is True:
dom = domain.split('.')
dom.pop(0)
rootdom = '.'.join(dom)
else:
rootdom = dom
if self.nameserver == "":
try:
r = DNS.Request(rootdom, qtype='SOA').req()
primary, email, serial, refresh, retry, expire, minimum = r.answers[
0]['data']
test = DNS.Request(
rootdom,
qtype='NS',
server=primary,
aa=1).req()
except Exception as e:
print(e)
try:
# Check if variable is defined.
test
except NameError:
print('Error, test is not defined.')
sys.exit()
if test.header['status'] != 'NOERROR':
print('[!] Error')
sys.exit()
self.nameserver = test.answers[0]['data']
elif self.nameserver == 'local':
self.nameserver = nameserver
return self.nameserver
def run(self, host):
if self.nameserver == "":
self.nameserver = self.getdns(self.domain)
print('\n\033[94m[-] Using DNS server: ' + self.nameserver + '\033[1;33;40m\n')
hostname = str(host.split('\n')[0]) + '.' + str(self.domain)
if self.verbose:
ESC = chr(27)
sys.stdout.write(ESC + '[2K' + ESC + '[G')
sys.stdout.write('\r' + hostname)
sys.stdout.flush()
try:
test = DNS.Request(
hostname,
qtype='a',
server=self.nameserver).req(
)
# TODO FIX test is sometimes not getting answers and leads to an indexing error.
hostip = test.answers[0]['data']
return hostname + ':' + hostip
except Exception:
pass
def process(self):
results = []
for x in self.list:
host = self.run(x)
if host is not None:
print(' : ' + host.split(':')[1])
results.append(host)
return results
class dns_tld():
def __init__(self, domain, dnsserver, verbose=False):
self.domain = domain
self.nameserver = dnsserver
self.subdo = False
self.verbose = verbose
# Updated from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
self.tlds = [
'ac', 'academy', 'ad', 'ae', 'aero', 'af', 'ag', 'ai', 'al', 'am', 'an', 'ao', 'aq', 'ar', 'arpa', 'as',
'asia', 'at', 'au', 'aw', 'ax', 'az', 'ba', 'bb', 'bd', 'be', 'bf', 'bg', 'bh', 'bi', 'bike', 'biz', 'bj',
'bm', 'bn', 'bo', 'br', 'bs', 'bt', 'builders', 'buzz', 'bv', 'bw', 'by', 'bz', 'ca', 'cab', 'camera',
'camp', 'careers', 'cat', 'cc', 'cd', 'center', 'ceo', 'cf', 'cg', 'ch', 'ci', 'ck', 'cl', 'clothing',
'cm', 'cn', 'co', 'codes', 'coffee', 'com', 'company', 'computer', 'construction', 'contractors', 'coop',
'cr', 'cu', 'cv', 'cw', 'cx', 'cy', 'cz', 'de', 'diamonds', 'directory', 'dj', 'dk', 'dm', 'do',
'domains', 'dz', 'ec', 'edu', 'education', 'ee', 'eg', 'email', 'enterprises', 'equipment', 'er', 'es',
'estate', 'et', 'eu', 'farm', 'fi', 'fj', 'fk', 'florist', 'fm', 'fo', 'fr', 'ga', 'gallery', 'gb', 'gd',
'ge', 'gf', 'gg', 'gh', 'gi', 'gl', 'glass', 'gm', 'gn', 'gov', 'gp', 'gq', 'gr', 'graphics', 'gs', 'gt',
'gu', 'guru', 'gw', 'gy', 'hk', 'hm', 'hn', 'holdings', 'holiday', 'house', 'hr', 'ht', 'hu', 'id', 'ie',
'il', 'im', 'immobilien', 'in', 'info', 'institute', 'int', 'international', 'io', 'iq', 'ir', 'is', 'it',
'je', 'jm', 'jo', 'jobs', 'jp', 'kaufen', 'ke', 'kg', 'kh', 'ki', 'kitchen', 'kiwi', 'km', 'kn', 'kp',
'kr', 'kw', 'ky', 'kz', 'la', 'land', 'lb', 'lc', 'li', 'lighting', 'limo', 'lk', 'lr', 'ls', 'lt', 'lu',
'lv', 'ly', 'ma', 'management', 'mc', 'md', 'me', 'menu', 'mg', 'mh', 'mil', 'mk', 'ml', 'mm', 'mn', 'mo',
'mobi', 'mp', 'mq', 'mr', 'ms', 'mt', 'mu', 'museum', 'mv', 'mw', 'mx', 'my', 'mz', 'na', 'name', 'nc',
'ne', 'net', 'nf', 'ng', 'ni', 'ninja', 'nl', 'no', 'np', 'nr', 'nu', 'nz', 'om', 'onl', 'org', 'pa', 'pe',
'pf', 'pg', 'ph', 'photography', 'photos', 'pk', 'pl', 'plumbing', 'pm', 'pn', 'post', 'pr', 'pro', 'ps',
'pt', 'pw', 'py', 'qa', 're', 'recipes', 'repair', 'ro', 'rs', 'ru', 'ruhr', 'rw', 'sa', 'sb', 'sc', 'sd',
'se', 'sexy', 'sg', 'sh', 'shoes', 'si', 'singles', 'sj', 'sk', 'sl', 'sm', 'sn', 'so', 'solar',
'solutions', 'sr', 'st', 'su', 'support', 'sv', 'sx', 'sy', 'systems', 'sz', 'tattoo', 'tc', 'td',
'technology', 'tel', 'tf', 'tg', 'th', 'tips', 'tj', 'tk', 'tl', 'tm', 'tn', 'to', 'today', 'tp', 'tr',
'training', 'travel', 'tt', 'tv', 'tw', 'tz', 'ua', 'ug', 'uk', 'uno', 'us', 'uy', 'uz', 'va', 'vc',
've', 'ventures', 'vg', 'vi', 'viajes', 'vn', 'voyage', 'vu', 'wang', 'wf', 'wien', 'ws', 'xxx', 'ye',
'yt', 'za', 'zm', 'zw']
def getdns(self, domain):
dom = domain
if self.subdo is True:
dom = domain.split('.')
dom.pop(0)
rootdom = '.'.join(dom)
else:
rootdom = dom
if self.nameserver is False:
r = DNS.Request(rootdom, qtype='SOA').req()
primary, email, serial, refresh, retry, expire, minimum = r.answers[
0]['data']
test = DNS.Request(rootdom, qtype='NS', server=primary, aa=1).req()
if test.header['status'] != 'NOERROR':
print('Error')
sys.exit()
self.nameserver = test.answers[0]['data']
elif self.nameserver == 'local':
self.nameserver = nameserver
return self.nameserver
def run(self, tld):
self.nameserver = self.getdns(self.domain)
hostname = self.domain.split('.')[0] + '.' + tld
if self.verbose:
ESC = chr(27)
sys.stdout.write(ESC + '[2K' + ESC + '[G')
sys.stdout.write('\r\tSearching for: ' + hostname)
sys.stdout.flush()
try:
test = DNS.Request(
hostname,
qtype='a',
server=self.nameserver).req(
)
hostip = test.answers[0]['data']
return hostip + ':' + hostname
except Exception:
pass
def process(self):
results = []
for x in self.tlds:
host = self.run(x)
if host is not None:
results.append(host)
return results

View file

@ -1,48 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import requests
import time
class SearchDogpile:
def __init__(self, word, limit):
self.word = word
self.total_results = ""
self.server = 'www.dogpile.com'
self.hostname = 'www.dogpile.com'
self.limit = limit
self.counter = 0
def do_search(self):
#import ssl
#ssl._create_default_https_context = ssl._create_unverified_context
# Dogpile is hardcoded to return 10 results.
url = 'https://' + self.server + "/search/web?qsi=" + str(self.counter) \
+ "&q=\"%40" + self.word + "\""
headers = {
'Host': self.hostname,
'User-agent': Core.get_user_agent()
}
try:
h = requests.get(url=url, headers=headers, verify=False)
#print(h.text)
self.total_results += h.text
except Exception as e:
print(f'Error Occurred: {e}')
def process(self):
while self.counter <= self.limit and self.counter <= 1000:
self.do_search()
time.sleep(getDelay())
print(f'\tSearching {self.counter} results.')
self.counter += 10
def get_emails(self):
rawres = myparser.Parser(self.total_results, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.Parser(self.total_results, self.word)
return rawres.hostnames()

View file

@ -1,93 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import json
import requests
import time
class SearchDuckDuckGo:
def __init__(self, word, limit):
self.word = word
self.results = ""
self.totalresults = ""
self.dorks = []
self.links = []
self.database = 'https://duckduckgo.com/?q='
self.api = 'https://api.duckduckgo.com/?q=x&format=json&pretty=1' # Currently using API.
self.quantity = '100'
self.limit = limit
def do_search(self):
try: # Do normal scraping.
url = self.api.replace('x', self.word)
headers = {'User-Agent': googleUA}
r = requests.get(url, headers=headers)
except Exception as e:
print(e)
time.sleep(getDelay())
self.results = r.text
self.totalresults += self.results
urls = self.crawl(self.results)
for url in urls:
try:
self.totalresults += requests.get(url, headers={'User-Agent': Core.get_user_agent()}).text
time.sleep(getDelay())
except Exception:
continue
def crawl(self, text):
"""
Function parses json and returns URLs.
:param text: formatted json
:return: set of URLs
"""
urls = set()
try:
load = json.loads(text)
for key in load.keys(): # Iterate through keys of dict.
val = load.get(key)
if isinstance(val, int) or isinstance(val, dict) or val is None:
continue
if isinstance(val, list):
if len(val) == 0: # Make sure not indexing an empty list.
continue
val = val[0] # First value should be dict.
if isinstance(val, dict): # Sanity check.
for key in val.keys():
value = val.get(key)
if isinstance(value, str) and value != '' and 'https://' in value or 'http://' in value:
urls.add(value)
if isinstance(val, str) and val != '' and 'https://' in val or 'http://' in val:
urls.add(val)
tmp = set()
for url in urls:
if '<' in url and 'href=' in url: # Format is <href="https://www.website.com"/>
equal_index = url.index('=')
true_url = ''
for ch in url[equal_index + 1:]:
if ch == '"':
tmp.add(true_url)
break
true_url += ch
else:
if url != '':
tmp.add(url)
return tmp
except Exception as e:
print(f'Exception occurred: {e}')
import traceback as t
print(t.print_exc())
return []
def get_emails(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def process(self):
self.do_search() # Only need to search once since using API.

View file

@ -1,81 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import re
import requests
import time
class search_exalead:
def __init__(self, word, limit, start):
self.word = word
self.files = 'pdf'
self.results = ""
self.totalresults = ""
self.server = 'www.exalead.com'
self.hostname = 'www.exalead.com'
self.limit = limit
self.counter = start
def do_search(self):
url = 'http:// ' + self.server + '/search/web/results/?q=%40' + self.word \
+ '&elements_per_page=50&start_index=' + str(self.counter)
headers = {
'Host': self.hostname,
'Referer': ('http://' + self.hostname + '/search/web/results/?q=%40' + self.word),
'User-agent': Core.get_user_agent()
}
h = requests.get(url=url, headers=headers)
self.results = h.text
self.totalresults += self.results
def do_search_files(self, files):
url = 'http:// ' + self.server + '/search/web/results/?q=%40' + self.word \
+ 'filetype:' + self.files + '&elements_per_page=50&start_index=' + str(self.counter)
headers = {
'Host': self.hostname,
'Referer': ('http://' + self.hostname + '/search/web/results/?q=%40' + self.word),
'User-agent': Core.get_user_agent()
}
h = requests.get(url=url, headers=headers)
self.results = h.text
self.totalresults += self.results
def check_next(self):
renext = re.compile('topNextUrl')
nextres = renext.findall(self.results)
if nextres != []:
nexty = '1'
print(str(self.counter))
else:
nexty = '0'
return nexty
def get_emails(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def get_files(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.fileurls(self.files)
def process(self):
while self.counter <= self.limit:
self.do_search()
self.counter += 50
print(f'\tSearching {self.counter} results.')
def process_files(self, files):
while self.counter < self.limit:
self.do_search_files(files)
time.sleep(getDelay())
more = self.check_next()
if more == '1':
self.counter += 50
else:
break

View file

@ -1,38 +0,0 @@
from lib.core import *
import json
import requests
class SearchGoogleCertificates:
# https://www.google.com/transparencyreport/api/v3/httpsreport/ct/certsearch?include_expired=true&include_subdomains=true&domain=
def __init__(self, word, limit, start):
self.word = word
self.results = ""
self.totalresults = ""
self.server = 'www.google.com'
self.quantity = '100'
self.limit = limit
self.counter = start
def do_search(self):
try:
urly = 'https://' + self.server + '/transparencyreport/api/v3/httpsreport/ct/certsearch?include_expired=true&include_subdomains=true&domain=' + self.word
except Exception as e:
print(e)
try:
headers = {'User-Agent': Core.get_user_agent()}
r = requests.get(urly, headers=headers)
except Exception as e:
print(e)
self.results = r.text
self.totalresults += self.results
def get_domains(self):
domains = []
rawres = json.loads(self.totalresults.split('\n', 2)[2])
for array in rawres[0][1]:
domains.append(array[1])
return list(set(domains))
def process(self):
self.do_search()

View file

@ -1,147 +0,0 @@
from discovery.constants import *
from parsers import myparser
import requests
import time
class search_google:
def __init__(self, word, limit, start):
self.word = word
self.results = ""
self.totalresults = ""
self.server = 'www.google.com'
self.dorks = []
self.links = []
self.database = 'https://www.google.com/search?q='
self.quantity = '100'
self.limit = limit
self.counter = start
def do_search(self):
try: # Do normal scraping.
urly = 'http://' + self.server + '/search?num=' + self.quantity + '&start=' + str(
self.counter) + '&hl=en&meta=&q=%40\"' + self.word + '\"'
except Exception as e:
print(e)
try:
headers = {'User-Agent': googleUA}
r = requests.get(urly, headers=headers)
except Exception as e:
print(e)
self.results = r.text
if search(self.results):
time.sleep(getDelay() * 5) # Sleep for a longer time.
else:
time.sleep(getDelay())
self.totalresults += self.results
def do_search_profiles(self):
try:
urly = 'http://' + self.server + '/search?num=' + self.quantity + '&start=' + str(
self.counter) + '&hl=en&meta=&q=site:www.google.com%20intitle:\"Google%20Profile\"%20\"Companies%20I%27ve%20worked%20for\"%20\"at%20' + self.word + '\"'
except Exception as e:
print(e)
try:
headers = {'User-Agent': googleUA}
r = requests.get(urly, headers=headers)
except Exception as e:
print(e)
self.results = r.text
if search(self.results):
time.sleep(getDelay() * 5) # Sleep for a longer time.
else:
time.sleep(getDelay())
self.totalresults += self.results
def get_emails(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def get_files(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.fileurls(self.files)
def get_profiles(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.profiles()
def process(self, google_dorking):
if google_dorking is False:
while self.counter <= self.limit and self.counter <= 1000:
self.do_search()
print(f'\tSearching {self.counter} results.')
self.counter += 100
else: # Google dorking is true.
self.counter = 0 # Reset counter.
print('\n')
print('[-] Searching with Google Dorks: ')
self.googledork() # Call Google dorking method if user wanted it!
def process_profiles(self):
while self.counter < self.limit:
self.do_search_profiles()
time.sleep(getDelay())
self.counter += 100
print(f'\tSearching {self.counter} results.')
def append_dorks(self):
# Wrap in try-except incase filepaths are messed up.
try:
with open('wordlists/dorks.txt', mode='r') as fp:
self.dorks = [dork.strip() for dork in fp]
except FileNotFoundError as error:
print(error)
def construct_dorks(self):
# Format is: site:targetwebsite.com + space + inurl:admindork
colon = '%3A'
plus = '%2B'
space = '+'
period = '%2E'
double_quote = '%22'
asterick = '%2A'
left_bracket = '%5B'
right_bracket = '%5D'
question_mark = '%3F'
slash = '%2F'
single_quote = '%27'
ampersand = '%26'
left_peren = '%28'
right_peren = '%29'
pipe = '%7C'
# Format is google.com/search?q=dork+space+self.word
self.links = tuple(self.database +
str(dork).replace(':', colon).replace('+', plus).replace('.', period).replace('"', double_quote)
.replace('*', asterick).replace('[', left_bracket).replace(']', right_bracket)
.replace('?', question_mark).replace(' ', space).replace('/', slash).replace("'",single_quote)
.replace('&', ampersand).replace('(', left_peren).replace(')', right_peren).replace('|', pipe)
+ space + self.word
for dork in self.dorks)
def googledork(self):
self.append_dorks() # Call functions to create list.
self.construct_dorks()
self.send_dorks()
def send_dorks(self): # Helper function to minimize code reusability.
headers = {'User-Agent': googleUA}
# Get random user agent to try and prevent google from blocking IP.
for num in range(len(self.links)):
try:
if num % 10 == 0 and num > 0:
print(f'\tSearching through {num} results')
link = self.links[num]
req = requests.get(link, headers=headers)
self.results = req.text
if search(self.results):
time.sleep(getDelay() * 5) # Sleep for a longer time.
else:
time.sleep(getDelay())
self.totalresults += self.results
except Exception as e:
print(f'\tException Occurred {e}')

View file

@ -1,42 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import requests
class SearchHunter:
def __init__(self, word, limit, start):
self.word = word
self.limit = 100
self.start = start
self.key = Core.hunter_key()
if self.key is None:
raise MissingKey(True)
self.results = ""
self.totalresults = ""
self.counter = start
self.database = "https://api.hunter.io/v2/domain-search?domain=" + word + "&api_key=" + self.key + "&limit=" + str(self.limit)
def do_search(self):
try:
r = requests.get(self.database)
except Exception as e:
print(e)
self.results = r.text
self.totalresults += self.results
def process(self):
self.do_search() # Only need to do it once.
def get_emails(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def get_profiles(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.profiles()

View file

@ -1,56 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import intelxparser
import requests
import time
class SearchIntelx:
def __init__(self, word, limit):
self.word = word
# default key is public key
self.key = Core.intelx_key()
if self.key is None:
raise MissingKey(True)
self.database = 'https://public.intelx.io/'
self.results = None
self.info = ()
self.limit = limit
def do_search(self):
try:
user_agent = Core.get_user_agent()
headers = {'User-Agent': user_agent, 'x-key': self.key}
# data is json that corresponds to what we are searching for, sort:2 means sort by most relevant
data = f'{{"term": "{self.word}", "maxresults": {self.limit}, "media": 0, "sort": 2 , "terminate": []}}'
r = requests.post(f'{self.database}phonebook/search', data=data, headers=headers)
if r.status_code == 400:
raise Exception('Invalid json was passed in.')
time.sleep(1)
# grab uuid to send get request to fetch data
uuid = r.json()['id']
url = f'{self.database}phonebook/search/result?id={uuid}&offset=0&limit={self.limit}'
r = requests.get(url, headers=headers)
time.sleep(1)
# TODO: add in future grab status from r.text and check if more results can be gathered
if r.status_code != 200:
raise Exception('Error occurred while searching intelx.')
self.results = r.json()
except Exception as e:
print(f'An exception has occurred: {e}')
def process(self):
self.do_search()
intelx_parser = intelxparser.Parser()
self.info = intelx_parser.parse_dictionaries(self.results)
# Create parser and set self.info to tuple returned from parsing text.
def get_emails(self):
return self.info[0]
def get_hostnames(self):
return self.info[1]

View file

@ -1,42 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import requests
import time
class SearchLinkedin:
def __init__(self, word, limit):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = 'www.google.com'
self.userAgent = '(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6'
self.quantity = '100'
self.limit = int(limit)
self.counter = 0
def do_search(self):
try:
urly = 'http://' + self.server + '/search?num=100&start=' + str(self.counter) + '&hl=en&meta=&q=site%3Alinkedin.com/in%20' + self.word
except Exception as e:
print(e)
try:
headers = {'User-Agent': Core.get_user_agent()}
r = requests.get(urly, headers=headers)
except Exception as e:
print(e)
self.results = r.text
self.totalresults += self.results
def get_people(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.people_linkedin()
def process(self):
while self.counter < self.limit:
self.do_search()
time.sleep(getDelay())
self.counter += 100
print(f'\tSearching {self.counter} results.')

View file

@ -1,72 +0,0 @@
from lib.core import *
from parsers import myparser
import requests
import hashlib
import urllib.parse as urllib
import re
class SearchNetcraft:
# this module was inspired by sublist3r's netcraft module
def __init__(self, word):
self.word = word.replace(' ', '%20')
self.totalresults = ""
self.server = 'netcraft.com'
self.base_url = 'https://searchdns.netcraft.com/?restriction=site+ends+with&host={domain}'
self.session = requests.session()
self.headers = {
'User-Agent': Core.get_user_agent()
}
self.timeout = 25
self.domain = f"https://searchdns.netcraft.com/?restriction=site+ends+with&host={self.word}"
def request(self, url, cookies=None):
cookies = cookies or {}
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, cookies=cookies)
except Exception as e:
print(e)
resp = None
return resp
def get_next(self, resp):
link_regx = re.compile('<A href="(.*?)"><b>Next page</b></a>')
link = link_regx.findall(resp)
link = re.sub(f'host=.*?{self.word}', f'host={self.domain}', link[0])
url = f'http://searchdns.netcraft.com{link}'
return url
def create_cookies(self, cookie):
cookies = dict()
cookies_list = cookie[0:cookie.find(';')].split("=")
cookies[cookies_list[0]] = cookies_list[1]
# get js verification response
cookies['netcraft_js_verification_response'] = hashlib.sha1(
urllib.unquote(cookies_list[1]).encode('utf-8')).hexdigest()
return cookies
def get_cookies(self, headers):
if 'set-cookie' in headers:
cookies = self.create_cookies(headers['set-cookie'])
else:
cookies = {}
return cookies
def do_search(self):
start_url = self.base_url
resp = self.request(start_url)
cookies = self.get_cookies(resp.headers)
url = self.base_url.format(domain="yale.edu")
while True:
resp = self.request(url, cookies).text
self.totalresults += resp
if 'Next page' not in resp or resp is None:
break
url = self.get_next(resp)
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def process(self):
self.do_search()

View file

@ -1,32 +0,0 @@
import socket
import threading
class PortScan:
def __init__(self, host, ports):
self.threads = 25
self.host = host
self.ports = ports
self.lock = threading.BoundedSemaphore(value=self.threads)
def port_scanner(self, host, ports):
openports = []
self.lock.acquire()
for port in ports:
try:
connect = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect.settimeout(2)
result = connect.connect_ex((host, int(port)))
if result == 0:
openports.append(port)
connect.close()
except Exception as e:
print(e)
pass
self.lock.release()
return openports
def process(self):
ports = self.port_scanner(self.host, self.ports)
return ports

View file

@ -1,46 +0,0 @@
import re
import requests
class s3_scanner:
def __init__(self, host):
self.host = host
self.results = ""
self.totalresults = ""
self.fingerprints = ['www.herokucdn.com/error-pages/no-such-app.html', '<title>Squarespace - No Such Account</title>', "<p> If you're trying to publish one, <a href=\"https://help.github.com/pages/\">read the full documentation</a> to learn how to set up <strong>GitHub Pages</strong> for your repository, organization, or user account. </p>","<p> If you\'re trying to publish one, <a href=\"https://help.github.com/pages/\">read the full documentation</a> to learn how to set up <strong>GitHub Pages</strong> for your repository, organization, or user account. </p>","<span class=\"title\">Bummer. It looks like the help center that you are trying to reach no longer exists.</span>","<head> <title>The page you\'re looking for could not be found (404)</title> <style> body { color: #666; text-align: center; font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif; margin: 0; width: 800px; margin: auto; font-size: 14px; } h1 { font-size: 56px; line-height: 100px; font-weight: normal; color: #456; } h2 { font-size: 24px; color: #666; line-height: 1.5em; } h3 { color: #456; font-size: 20px; font-weight: normal; line-height: 28px; } hr { margin: 18px 0; border: 0; border-top: 1px solid #EEE; border-bottom: 1px solid white; } </style> </head>"]
def __check_http(self, bucket_url):
check_response = self.session.head(
S3_URL, timeout=3, headers={'Host': bucket_url})
# if not ARGS.ignore_rate_limiting\
# and (check_response.status_code == 503 and check_response.reason == 'Slow Down'):
# self.q.rate_limited = True
# Add it back to the bucket for re-processing.
# self.q.put(bucket_url)
if check_response.status_code == 307: # valid bucket, lets check if its public
new_bucket_url = check_response.headers['Location']
bucket_response = requests.request(
'GET' if ARGS.only_interesting else 'HEAD', new_bucket_url, timeout=3)
if bucket_response.status_code == 200\
and (not ARGS.only_interesting or
(ARGS.only_interesting and any(keyword in bucket_response.text for keyword in KEYWORDS))):
print(f"Found bucket '{new_bucket_url}'")
self.__log(new_bucket_url)
def do_s3(self):
try:
print('\t Searching takeovers for ' + self.host)
r = requests.get('https://' + self.host, verify=False)
for x in self.fingerprints:
take_reg = re.compile(x)
self.temp = take_reg.findall(r.text)
if self.temp != []:
print('\t\033[91m Takeover detected! - ' + self.host + '\033[1;32;40m')
except Exception as e:
print(e)
def process(self):
self.do_take()

View file

@ -1,62 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import securitytrailsparser
import requests
import sys
import time
class search_securitytrail:
def __init__(self, word):
self.word = word
self.key = Core.security_trails_key()
if self.key is None:
raise MissingKey(True)
self.results = ""
self.totalresults = ""
self.database = "https://api.securitytrails.com/v1/"
self.info = ()
def authenticate(self):
# Method to authenticate API key before sending requests.
headers = {'APIKEY': self.key}
url = self.database + 'ping'
r = requests.get(url, headers=headers).text
if 'False' in r or 'Invalid authentication' in r:
print('\tKey could not be authenticated exiting program.')
sys.exit(-2)
time.sleep(2)
def do_search(self):
url = ''
headers = {}
try:
# https://api.securitytrails.com/v1/domain/domain.com
url = self.database + 'domain/' + self.word
headers = {'APIKEY': self.key}
r = requests.get(url, headers=headers)
time.sleep(2) # Not random delay because 2 seconds is required due to rate limit.
except Exception as e:
print(e)
self.results = r.text
self.totalresults += self.results
url += '/subdomains' # Get subdomains now.
r = requests.get(url, headers=headers)
time.sleep(2)
self.results = r.text
self.totalresults += self.results
def process(self):
self.authenticate()
self.do_search()
parser = securitytrailsparser.Parser(word=self.word, text=self.totalresults)
self.info = parser.parse_text()
# Create parser and set self.info to tuple returned from parsing text.
print('\tDone Searching Results')
def get_ips(self):
return self.info[0]
def get_hostnames(self):
return self.info[1]

View file

@ -1,43 +0,0 @@
from discovery.constants import *
from lib.core import *
from shodan import exception
from shodan import Shodan
class SearchShodan:
def __init__(self):
self.key = Core.shodan_key()
if self.key is None:
raise MissingKey(True)
self.api = Shodan(self.key)
self.hostdatarow = []
def search_ip(self, ip):
try:
ipaddress = ip
results = self.api.host(ipaddress)
technologies = []
servicesports = []
for result in results['data']:
try:
for key in result['http']['components'].keys():
technologies.append(key)
except KeyError:
pass
port = str(result.get('port'))
product = str(result.get('product'))
servicesports.append(str(product)+':'+str(port))
technologies = list(set(technologies))
self.hostdatarow = [
str(results.get('ip_str')), str(results.get('hostnames')).strip('[]\''),
str(results.get('org')), str(servicesports).replace('\'', '').strip('[]'),
str(technologies).replace('\'', '').strip('[]')]
except exception.APIError:
print(f'{ipaddress}: Not in Shodan')
self.hostdatarow = [ipaddress, "Not in Shodan", "Not in Shodan", "Not in Shodan", "Not in Shodan"]
except Exception as e:
print(f'Error occurred in the Shodan IP search module: {e}')
finally:
return self.hostdatarow

View file

@ -1,44 +0,0 @@
import re
import requests
class take_over:
def __init__(self, host):
self.host = host
self.results = ""
self.totalresults = ""
self.fingerprints = ["<title>Squarespace - Domain Not Claimed</title>",
'www.herokucdn.com/error-pages/no-such-app.html',
'<title>Squarespace - No Such Account</title>',
"<p> If you're trying to publish one, <a href=\"https://help.github.com/pages/\">read the full documentation</a> to learn how to set up <strong>GitHub Pages</strong> for your repository, organization, or user account. </p>",
"<p> If you\'re trying to publish one, <a href=\"https://help.github.com/pages/\">read the full documentation</a> to learn how to set up <strong>GitHub Pages</strong> for your repository, organization, or user account. </p>",
"<span class=\"title\">Bummer. It looks like the help center that you are trying to reach no longer exists.</span>",
"<head> <title>The page you\'re looking for could not be found (404)</title> <style> body { color: #666; text-align: center; font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif; margin: 0; width: 800px; margin: auto; font-size: 14px; } h1 { font-size: 56px; line-height: 100px; font-weight: normal; color: #456; } h2 { font-size: 24px; color: #666; line-height: 1.5em; } h3 { color: #456; font-size: 20px; font-weight: normal; line-height: 28px; } hr { margin: 18px 0; border: 0; border-top: 1px solid #EEE; border-bottom: 1px solid white; } </style> </head>",
'The specified bucket does not exist',
'Bad Request: ERROR: The request could not be satisfied',
'Fastly error: unknown domain:',
"There isn't a Github Pages site here.",
'No such app',
'Unrecognized domain',
'Sorry, this shop is currently unavailable.',
"Whatever you were looking for doesn't currently exist at this address",
'The requested URL was not found on this server.',
'This UserVoice subdomain is currently available!',
'Do you want to register *.wordpress.com?',
'Help Center Closed']
def do_take(self):
try:
print('\t Searching takeovers for ' + self.host)
r = requests.get('https://' + self.host, verify=False)
for x in self.fingerprints:
take_reg = re.compile(x)
self.temp = take_reg.findall(r.text)
if self.temp != []:
print(f'\t\033[91m Takeover detected! - {self.host} \033[1;32;40m')
except Exception as e:
print(e)
def process(self):
self.do_take()

View file

@ -1,36 +0,0 @@
from lib.core import *
from parsers import myparser
import requests
class search_threatcrowd:
def __init__(self, word):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = 'www.google.com'
self.hostname = 'www.google.com'
self.quantity = '100'
self.counter = 0
def do_search(self):
try:
urly = 'https://www.threatcrowd.org/searchApi/v2/domain/report/?domain=' + self.word
except Exception as e:
print(e)
headers = {'User-Agent': Core.get_user_agent()}
try:
r = requests.get(urly, headers=headers)
except Exception as e:
print(e)
self.results = r.text
self.totalresults += self.results
def get_hostnames(self):
rawres = myparser.Parser(self.results, self.word)
return rawres.hostnames()
def process(self):
self.do_search()
print('\tSearching results.')

View file

@ -1,61 +0,0 @@
from discovery.constants import *
from parsers import myparser
import requests
import time
class search_trello:
def __init__(self, word, limit):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = 'www.google.com'
self.hostname = 'www.google.com'
self.quantity = '100'
self.limit = limit
self.counter = 0
def do_search(self):
try:
urly = 'https://' + self.server + '/search?num=100&start=' + str(
self.counter) + '&hl=en&q=site%3Atrello.com%20' + self.word
except Exception as e:
print(e)
headers = {'User-Agent': googleUA}
try:
r = requests.get(urly, headers=headers)
time.sleep(getDelay())
except Exception as e:
print(e)
self.results = r.text
self.totalresults += self.results
def get_emails(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.emails()
def get_urls(self):
try:
rawres = myparser.Parser(self.totalresults, 'trello.com')
trello_urls = rawres.urls()
visited = set()
for url in trello_urls:
# Iterate through Trello URLs gathered and visit them, append text to totalresults.
if url not in visited: # Make sure visiting unique URLs.
visited.add(url)
self.totalresults += requests.get(url=url, headers={'User-Agent': googleUA}).text
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames(), trello_urls
except Exception as e:
print(f'Error occurred: {e}')
def process(self):
while self.counter < self.limit:
self.do_search()
if search(self.results):
time.sleep(getDelay() * 5)
else:
time.sleep(getDelay())
self.counter += 100
print(f'\tSearching {self.counter} results.')

View file

@ -1,64 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import requests
import time
class search_twitter:
def __init__(self, word, limit):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = 'www.google.com'
self.hostname = 'www.google.com'
self.quantity = '100'
self.limit = int(limit)
self.counter = 0
def do_search(self):
try:
urly = 'https://' + self.server + '/search?num=100&start=' + str(self.counter) + '&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20' + self.word
except Exception as e:
print(e)
headers = {'User-Agent': Core.get_user_agent()}
try:
r = requests.get(urly, headers=headers)
except Exception as e:
print(e)
self.results = r.text
self.totalresults += self.results
def get_people(self):
rawres = myparser.Parser(self.totalresults, self.word)
to_parse = rawres.people_twitter()
# fix invalid handles that look like @user other_output
handles = set()
for handle in to_parse:
handle = str(handle).strip()
if len(handle) > 2:
if ' ' in handle:
handle = handle.split(' ')[0]
# strip off period at the end if exists
if handle[len(handle) - 1] == '.':
handle = handle[:len(handle) - 1]
# strip periods if contains three of them
if '...' in handle:
handle = handle[:handle.index('.')]
if '-' == handle[0]:
handle = handle[1:]
if '-' == handle[1]:
handle = handle[0] + handle[2:]
handles.add(handle)
if '@' in handles:
handles.remove('@')
return handles
def process(self):
while self.counter < self.limit:
self.do_search()
time.sleep(getDelay())
self.counter += 100
print(f'\tSearching {self.counter} results.')

View file

@ -1,36 +0,0 @@
from lib.core import *
from parsers import myparser
import requests
class SearchVirustotal:
def __init__(self, word):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = 'www.google.com'
self.hostname = 'www.google.com'
self.quantity = '100'
self.counter = 0
def do_search(self):
try:
urly = 'https://www.virustotal.com/en/domain/' + self.word + '/information/'
except Exception as e:
print(e)
headers = {'User-Agent': Core.get_user_agent()}
try:
r = requests.get(urly, headers=headers)
except Exception as e:
print(e)
self.results = r.text
self.totalresults += self.results
def get_hostnames(self):
rawres = myparser.Parser(self.results, self.word)
return rawres.hostnames()
def process(self):
self.do_search()
print('\tSearching results.')

View file

@ -1,32 +0,0 @@
try:
import wfuzz
except ImportError as e:
pass
class search_wfuzz:
def __init__(self, host):
self.host = host
self.results = ""
self.totalresults = ""
def do_search(self):
print('elo')
try:
for r in wfuzz.fuzz(url='https://'+self.host+'/FUZZ', hc=[404], payloads=[('file', dict(fn='wordlists/general/common.txt'))]):
print(r)
self.results += r
except Exception as e:
print(e)
self.totalresults += self.results
def get_results(self):
return self.totalresults
def do_check(self):
return
def process(self):
self.do_search()
print('\tSearching Wfuzz')

View file

@ -1,49 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import requests
import time
class search_yahoo:
def __init__(self, word, limit):
self.word = word
self.total_results = ""
self.server = 'search.yahoo.com'
self.hostname = 'search.yahoo.com'
self.limit = limit
self.counter = 0
def do_search(self):
url = 'http://' + self.server + '/search?p=\"%40' + self.word + '\"&b=' + str(self.counter) + '&pz=10'
headers = {
'Host': self.hostname,
'User-agent': Core.get_user_agent()
}
h = requests.get(url=url, headers=headers)
self.total_results += h.text
def process(self):
while self.counter <= self.limit and self.counter <= 1000:
self.do_search()
time.sleep(getDelay())
print(f'\tSearching {self.counter} results.')
self.counter += 10
def get_emails(self):
rawres = myparser.Parser(self.total_results, self.word)
toparse_emails = rawres.emails()
emails = set()
# strip out numbers and dashes for emails that look like xxx-xxx-xxxemail@host.tld
for email in toparse_emails:
email = str(email)
if '-' in email and email[0].isdigit() and email.index('-') <= 9:
while email[0] == '-' or email[0].isdigit():
email = email[1:]
emails.add(email)
return list(emails)
def get_hostnames(self):
rawres = myparser.Parser(self.total_results, self.word)
return rawres.hostnames()

View file

@ -1,73 +0,0 @@
from discovery.constants import *
from lib.core import *
from parsers import myparser
import re
import requests
import time
class search_yandex:
def __init__(self, word, limit, start):
self.word = word
self.results = ""
self.totalresults = ""
self.server = 'yandex.com'
self.hostname = 'yandex.com'
self.limit = limit
self.counter = start
def do_search(self):
url = 'http://' + self.server + '/search?text=%40' + self.word + '&numdoc=50&lr=' + str(self.counter)
headers = {
'Host': self.hostname,
'User-agent': Core.get_user_agent()
}
h = requests.get(url=url, headers=headers)
self.results = h.text
self.totalresults += self.results
print(self.results)
def do_search_files(self, files): # TODO
url = 'http://' + self.server + '/search?text=%40' + self.word + '&numdoc=50&lr=' + str(self.counter)
headers = {
'Host': self.hostname,
'User-agent': Core.get_user_agent()
}
h = requests.get(url=url, headers=headers)
self.results = h.text
self.totalresults += self.results
def check_next(self):
renext = re.compile('topNextUrl')
nextres = renext.findall(self.results)
if nextres != []:
nexty = '1'
print(str(self.counter))
else:
nexty = '0'
return nexty
def get_emails(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def get_files(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.fileurls(self.files) # self.files is not init?
def process(self):
while self.counter <= self.limit:
self.do_search()
self.counter += 50
print(f'Searching {self.counter} results.')
def process_files(self, files):
while self.counter < self.limit:
self.do_search_files(files)
time.sleep(getDelay())
self.counter += 50