theHarvester/discovery/googlesearch.py

153 lines
5.7 KiB
Python
Raw Normal View History

2019-01-11 10:09:47 +08:00
from discovery.constants import *
from parsers import myparser
import requests
2019-01-11 10:09:47 +08:00
import time
class search_google:
def __init__(self, word, limit, start):
self.word = word
self.results = ""
self.totalresults = ""
2019-01-14 07:58:38 +08:00
self.server = 'www.google.com'
self.dorks = []
self.links = []
2019-01-14 07:58:38 +08:00
self.database = 'https://www.google.com/search?q='
self.quantity = '100'
self.limit = limit
self.counter = start
def do_search(self):
2019-01-11 10:09:47 +08:00
try: # Do normal scraping.
2019-01-14 07:58:38 +08:00
urly = 'http://' + self.server + '/search?num=' + self.quantity + '&start=' + str(
self.counter) + '&hl=en&meta=&q=%40\"' + self.word + '\"'
except Exception as e:
print(e)
try:
headers = {'User-Agent': googleUA}
r = requests.get(urly, headers=headers)
except Exception as e:
print(e)
self.results = r.text
if search(self.results):
2019-01-11 10:09:47 +08:00
time.sleep(getDelay() * 5) # Sleep for a longer time.
else:
time.sleep(getDelay())
self.totalresults += self.results
def do_search_profiles(self):
try:
2019-01-14 07:58:38 +08:00
urly = 'http://' + self.server + '/search?num=' + self.quantity + '&start=' + str(
self.counter) + '&hl=en&meta=&q=site:www.google.com%20intitle:\"Google%20Profile\"%20\"Companies%20I%27ve%20worked%20for\"%20\"at%20' + self.word + '\"'
except Exception as e:
print(e)
try:
headers = {'User-Agent': googleUA}
r = requests.get(urly, headers=headers)
except Exception as e:
print(e)
self.results = r.text
if search(self.results):
2019-01-11 10:09:47 +08:00
time.sleep(getDelay() * 5) # Sleep for a longer time.
else:
time.sleep(getDelay())
self.totalresults += self.results
def get_emails(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.hostnames()
def get_files(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.fileurls(self.files)
def get_profiles(self):
rawres = myparser.Parser(self.totalresults, self.word)
return rawres.profiles()
def process(self, google_dorking):
if google_dorking is False:
while self.counter <= self.limit and self.counter <= 1000:
self.do_search()
2019-01-11 10:09:47 +08:00
print(f'\tSearching {self.counter} results.')
self.counter += 100
2019-01-11 10:09:47 +08:00
else: # Google dorking is true.
self.counter = 0 # Reset counter.
print('\n')
2019-01-14 07:58:38 +08:00
print('[-] Searching with Google Dorks: ')
2019-01-11 10:09:47 +08:00
while self.counter <= self.limit and self.counter <= 200: # Only 200 dorks in list.
self.googledork() # Call Google dorking method if user wanted it!
print(f'\tSearching {self.counter} results.')
self.counter += 100
def process_profiles(self):
while self.counter < self.limit:
self.do_search_profiles()
time.sleep(getDelay())
self.counter += 100
2019-01-11 10:09:47 +08:00
print(f'\tSearching {self.counter} results.')
def append_dorks(self):
2019-01-11 10:09:47 +08:00
# Wrap in try-except incase filepaths are messed up.
try:
with open('wordlists/dorks.txt', mode='r') as fp:
self.dorks = [dork.strip() for dork in fp]
except FileNotFoundError as error:
print(error)
def construct_dorks(self):
2019-01-11 10:09:47 +08:00
# Format is: site:targetwebsite.com + space + inurl:admindork
2019-01-14 07:58:38 +08:00
colon = '%3A'
plus = '%2B'
space = '+'
2019-01-14 07:58:38 +08:00
period = '%2E'
double_quote = '%22'
asterick = '%2A'
left_bracket = '%5B'
right_bracket = '%5D'
question_mark = '%3F'
slash = '%2F'
single_quote = '%27'
ampersand = '%26'
left_peren = '%28'
right_peren = '%29'
pipe = '%7C'
2019-01-11 10:09:47 +08:00
# Replace links with html encoding.
self.links = [self.database + space + self.word + space +
str(dork).replace(':', colon).replace('+', plus).replace('.', period).replace('"', double_quote)
2019-01-14 07:58:38 +08:00
.replace('*', asterick).replace('[', left_bracket).replace(']', right_bracket)
.replace('?', question_mark).replace(' ', space).replace('/', slash).replace("'",single_quote)
2019-01-14 07:58:38 +08:00
.replace('&', ampersand).replace('(', left_peren).replace(')', right_peren).replace('|', pipe)
for dork in self.dorks]
def googledork(self):
2019-01-11 10:09:47 +08:00
self.append_dorks() # Call functions to create list.
self.construct_dorks()
if self.counter >= 0 and self.counter <= 100:
self.send_dork(start=0, end=100)
elif self.counter >= 100 and self.counter <= 200:
self.send_dork(start=101, end=200)
2019-01-11 10:09:47 +08:00
else: # Only 200 dorks to prevent Google from blocking IP.
pass
2019-01-11 10:09:47 +08:00
def send_dork(self, start, end): # Helper function to minimize code reusability.
headers = {'User-Agent': googleUA}
2019-01-11 10:09:47 +08:00
# Get random user agent to try and prevent google from blocking IP.
for i in range(start, end):
try:
2019-01-11 10:09:47 +08:00
link = self.links[i] # Get link from dork list.
req = requests.get(link, headers=headers)
self.results = req.text
if search(self.results):
2019-01-11 10:09:47 +08:00
time.sleep(getDelay() * 5) # Sleep for a longer time.
else:
time.sleep(getDelay())
self.totalresults += self.results
except:
continue