mirror of
https://github.com/laramies/theHarvester.git
synced 2024-09-22 00:06:30 +08:00
Fix Intelx module
This commit is contained in:
parent
c9ddb1620d
commit
5aae4510cb
|
@ -2,47 +2,63 @@
|
|||
from theHarvester.lib.core import *
|
||||
from theHarvester.parsers import intelxparser
|
||||
import asyncio
|
||||
import json
|
||||
import requests
|
||||
|
||||
|
||||
class SearchIntelx:
|
||||
|
||||
def __init__(self, word, limit):
|
||||
def __init__(self, word):
|
||||
self.word = word
|
||||
# default key is public key
|
||||
self.key = Core.intelx_key()
|
||||
if self.key is None:
|
||||
raise MissingKey('Intelx')
|
||||
self.database = 'https://public.intelx.io/'
|
||||
self.database = 'https://2.intelx.io'
|
||||
self.results = None
|
||||
self.info = ()
|
||||
self.limit = limit
|
||||
self.limit = 10000
|
||||
self.proxy = False
|
||||
self.offset = -1
|
||||
|
||||
async def do_search(self):
|
||||
try:
|
||||
user_agent = Core.get_user_agent()
|
||||
headers = {'User-Agent': user_agent, 'x-key': self.key}
|
||||
# data is json that corresponds to what we are searching for, sort:2 means sort by most relevant
|
||||
data = f'{{"term": "{self.word}", "maxresults": {self.limit}, "media": 0, "sort": 2 , "terminate": []}}'
|
||||
resp = await AsyncFetcher.post_fetch(url=f'{self.database}phonebook/search', headers=headers, data=data,
|
||||
json=True, proxy=self.proxy)
|
||||
uuid = resp['id']
|
||||
# grab uuid to send get request to fetch data
|
||||
# Based on: https://github.com/IntelligenceX/SDK/blob/master/Python/intelxapi.py
|
||||
# API requests self identification
|
||||
# https://intelx.io/integrations
|
||||
headers = {'x-key': self.key, 'User-Agent': f'{Core.get_user_agent()}-theHarvester'}
|
||||
data = {
|
||||
"term": self.word,
|
||||
"buckets": [],
|
||||
"lookuplevel": 0,
|
||||
"maxresults": self.limit,
|
||||
"timeout": 5,
|
||||
"datefrom": "",
|
||||
"dateto": "",
|
||||
"sort": 2,
|
||||
"media": 0,
|
||||
"terminate": [],
|
||||
"target": 0
|
||||
}
|
||||
|
||||
total_resp = requests.post(f'{self.database}/phonebook/search', headers=headers, json=data)
|
||||
phonebook_id = json.loads(total_resp.text)['id']
|
||||
await asyncio.sleep(2)
|
||||
url = f'{self.database}phonebook/search/result?id={uuid}&offset=0&limit={self.limit}'
|
||||
resp = await AsyncFetcher.fetch_all([url], headers=headers, json=True, proxy=self.proxy)
|
||||
|
||||
# Fetch results from phonebook based on ID
|
||||
resp = await AsyncFetcher.fetch_all(
|
||||
[f'{self.database}/phonebook/search/result?id={phonebook_id}&limit={self.limit}&offset={self.offset}'],
|
||||
headers=headers, json=True, proxy=self.proxy)
|
||||
resp = resp[0]
|
||||
# TODO: Check if more results can be gathered depending on status
|
||||
self.results = resp
|
||||
except Exception as e:
|
||||
print(f'An exception has occurred: {e}')
|
||||
print(f'An exception has occurred in Intelx: {e}')
|
||||
|
||||
async def process(self, proxy=False):
|
||||
self.proxy = proxy
|
||||
await self.do_search()
|
||||
intelx_parser = intelxparser.Parser()
|
||||
self.info = await intelx_parser.parse_dictionaries(self.results)
|
||||
# Create parser and set self.info to tuple returned from parsing text.
|
||||
|
||||
async def get_emails(self):
|
||||
return self.info[0]
|
||||
|
|
Loading…
Reference in a new issue