mirror of
https://github.com/laramies/theHarvester.git
synced 2024-09-20 15:26:31 +08:00
Added Trello Search
New plugin to search public Trello boards
This commit is contained in:
parent
bd67719f49
commit
e1ef6288ba
|
@ -8,6 +8,7 @@ def __init__(self, results):
|
|||
self.ipaddresses = []
|
||||
self.soup = BeautifulSoup(results.results,features="html.parser")
|
||||
self.hostnames = []
|
||||
self.urls = []
|
||||
self.numberofpages = 0
|
||||
|
||||
def search_hostnames(self):
|
||||
|
@ -36,4 +37,7 @@ def search_numberofpages(self):
|
|||
self.numberofpages+=1
|
||||
return self.numberofpages
|
||||
except Exception as e:
|
||||
print("Error occurred: " + str(e))
|
||||
print("Error occurred: " + str(e))
|
||||
|
||||
|
||||
|
|
@ -22,4 +22,5 @@
|
|||
"takeover",
|
||||
"googlecertificates",
|
||||
"googleCSE",
|
||||
"censys"]
|
||||
"censys",
|
||||
"trello"]
|
|
@ -40,6 +40,7 @@ def do_search(self):
|
|||
links = self.get_info(r.text)
|
||||
for link in links:
|
||||
params = {'User-Agent': random.choice(self.userAgent)}
|
||||
print ("\t\tSearching " + link)
|
||||
r = requests.get(link, headers=params)
|
||||
time.sleep(1)
|
||||
self.results = r.text
|
||||
|
|
47
discovery/trello.py
Normal file
47
discovery/trello.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
import requests
|
||||
import myparser
|
||||
import censysparser
|
||||
|
||||
class search_trello:
|
||||
|
||||
def __init__(self, word, limit):
|
||||
self.word = word.replace(' ', '%20')
|
||||
self.results = ""
|
||||
self.totalresults = ""
|
||||
self.server = "www.google.com"
|
||||
self.hostname = "www.google.com"
|
||||
self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7"
|
||||
self.quantity = "100"
|
||||
self.limit = limit
|
||||
self.counter = 0
|
||||
|
||||
def do_search(self):
|
||||
try:
|
||||
urly="https://"+ self.server + "/search?num=100&start=" + str(self.counter) + "&hl=en&meta=&q=site%3Atrello.com%20" + self.word
|
||||
except Exception as e:
|
||||
print(e)
|
||||
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'}
|
||||
try:
|
||||
r=requests.get(urly,headers=headers)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
self.results = r.text
|
||||
self.totalresults += self.results
|
||||
|
||||
def get_emails(self):
|
||||
rawres = myparser.parser(self.totalresults, self.word)
|
||||
return rawres.emails()
|
||||
|
||||
|
||||
def get_urls(self):
|
||||
try:
|
||||
urls = myparser.parser(self.totalresults,"trello.com")
|
||||
return urls.urls()
|
||||
except Exception as e:
|
||||
print("Error occurred: " + str(e))
|
||||
|
||||
def process(self):
|
||||
while (self.counter < self.limit):
|
||||
self.do_search()
|
||||
self.counter += 100
|
||||
print("\tSearching " + str(self.counter) + " results..")
|
10
myparser.py
10
myparser.py
|
@ -138,6 +138,16 @@ def hostnames(self):
|
|||
hostnames = self.unique()
|
||||
return hostnames
|
||||
|
||||
def urls(self):
|
||||
#self.genericClean()
|
||||
#reg_hosts = re.compile("https://"+ self.word +'*[a-zA-Z0-9.-:/]')
|
||||
#reg_urls = re.compile('https://trello.com'+'[a-zA-Z0-9]+')
|
||||
found = re.finditer('https://(www\.)?trello.com/([a-zA-Z0-9\-_\.]+/?)*', self.results)
|
||||
for x in found:
|
||||
self.temp.append(x.group())
|
||||
urls = self.unique()
|
||||
return urls
|
||||
|
||||
def set(self):
|
||||
reg_sets = re.compile('>[a-zA-Z0-9]*</a></font>')
|
||||
self.temp = reg_sets.findall(self.results)
|
||||
|
|
|
@ -133,7 +133,7 @@ def start(argv):
|
|||
dnstld = True
|
||||
elif opt == '-b':
|
||||
engines = set(arg.split(','))
|
||||
supportedengines = set(["baidu","bing","crtsh","censys","bingapi","dogpile","google","googleCSE","virustotal","threatcrowd","googleplus","google-profiles",'google-certificates',"linkedin","pgp","twitter","vhost","yahoo","netcraft","hunter","all"])
|
||||
supportedengines = set(["baidu","bing","crtsh","censys","bingapi","dogpile","google","googleCSE","virustotal","threatcrowd","googleplus","google-profiles",'google-certificates',"linkedin","pgp","twitter","trello","vhost","yahoo","netcraft","hunter","all"])
|
||||
if set(engines).issubset(supportedengines):
|
||||
print("found supported engines")
|
||||
print(("[-] Starting harvesting process for domain: " + word + "\n"))
|
||||
|
@ -313,7 +313,18 @@ def start(argv):
|
|||
search.process(5)
|
||||
all_emails = []
|
||||
all_hosts = search.get_hostnames()
|
||||
|
||||
|
||||
elif engineitem == "trello":
|
||||
print("[-] Searching in Trello:")
|
||||
from discovery import trello
|
||||
#import locally or won't work
|
||||
search = trello.search_trello(word,limit)
|
||||
search.process()
|
||||
all_emails = search.get_emails()
|
||||
all_hosts = search.get_urls()
|
||||
for x in all_hosts:
|
||||
print (x)
|
||||
sys.exit()
|
||||
|
||||
elif engineitem == "all":
|
||||
print(("Full harvest on " + word))
|
||||
|
|
Loading…
Reference in a new issue