Implemented grequests in exaleadsearch and fixed module.

This commit is contained in:
NotoriousRebel 2019-08-17 23:18:36 -04:00
parent 0c3dac58a4
commit 3478aa3b15

View file

@ -2,9 +2,9 @@
from theHarvester.lib.core import * from theHarvester.lib.core import *
from theHarvester.parsers import myparser from theHarvester.parsers import myparser
import re import re
import requests
import time import time
import grequests
import requests
class search_exalead: class search_exalead:
@ -12,27 +12,33 @@ def __init__(self, word, limit, start):
self.word = word self.word = word
self.files = 'pdf' self.files = 'pdf'
self.results = "" self.results = ""
self.totalresults = "" self.total_results = ""
self.server = 'www.exalead.com' self.server = 'www.exalead.com'
self.hostname = 'www.exalead.com' self.hostname = 'www.exalead.com'
self.limit = limit self.limit = limit
self.counter = start self.counter = start
def do_search(self): def do_search(self):
url = 'http:// ' + self.server + '/search/web/results/?q=%40' + self.word \ base_url = f'https://{self.server}/search/web/results/?q=%40{self.word}&elements_per_page=50&start_index=xx'
+ '&elements_per_page=50&start_index=' + str(self.counter)
headers = { headers = {
'Host': self.hostname, 'Host': self.hostname,
'Referer': ('http://' + self.hostname + '/search/web/results/?q=%40' + self.word), 'Referer': ('http://' + self.hostname + '/search/web/results/?q=%40' + self.word),
'User-agent': Core.get_user_agent() 'User-agent': Core.get_user_agent()
} }
h = requests.get(url=url, headers=headers) urls = [base_url.replace("xx", str(num)) for num in range(self.counter, self.limit, 50) if num <= self.limit]
self.results = h.text req = []
self.totalresults += self.results for u in urls:
req.append(grequests.get(u, headers=headers, timeout=5))
time.sleep(3)
resp = grequests.imap(tuple(req), size=3)
for x in resp:
# TODO if decoded content contains information about solving captcha print message to user to visit website
# TODO to solve it or use a vpn as it appears to be ip based
self.total_results += x.content.decode('UTF-8')
def do_search_files(self, files): def do_search_files(self, files):
url = 'http:// ' + self.server + '/search/web/results/?q=%40' + self.word \ url = f'https://{self.server}/search/web/results/?q=%40{self.word}filetype:{self.files}&elements_per_page' \
+ 'filetype:' + self.files + '&elements_per_page=50&start_index=' + str(self.counter) f'=50&start_index={self.counter} '
headers = { headers = {
'Host': self.hostname, 'Host': self.hostname,
'Referer': ('http://' + self.hostname + '/search/web/results/?q=%40' + self.word), 'Referer': ('http://' + self.hostname + '/search/web/results/?q=%40' + self.word),
@ -40,7 +46,7 @@ def do_search_files(self, files):
} }
h = requests.get(url=url, headers=headers) h = requests.get(url=url, headers=headers)
self.results = h.text self.results = h.text
self.totalresults += self.results self.total_results += self.results
def check_next(self): def check_next(self):
renext = re.compile('topNextUrl') renext = re.compile('topNextUrl')
@ -53,22 +59,20 @@ def check_next(self):
return nexty return nexty
def get_emails(self): def get_emails(self):
rawres = myparser.Parser(self.totalresults, self.word) rawres = myparser.Parser(self.total_results, self.word)
return rawres.emails() return rawres.emails()
def get_hostnames(self): def get_hostnames(self):
rawres = myparser.Parser(self.totalresults, self.word) rawres = myparser.Parser(self.total_results, self.word)
return rawres.hostnames() return rawres.hostnames()
def get_files(self): def get_files(self):
rawres = myparser.Parser(self.totalresults, self.word) rawres = myparser.Parser(self.total_results, self.word)
return rawres.fileurls(self.files) return rawres.fileurls(self.files)
def process(self): def process(self):
while self.counter <= self.limit: print('Searching 0 results')
self.do_search() self.do_search()
self.counter += 50
print(f'\tSearching {self.counter} results.')
def process_files(self, files): def process_files(self, files):
while self.counter < self.limit: while self.counter < self.limit: