import re import requests class s3_scanner: def __init__(self, host): self.host = host self.results = "" self.totalresults = "" self.fingerprints = ['www.herokucdn.com/error-pages/no-such-app.html', 'Squarespace - No Such Account', "

If you're trying to publish one, read the full documentation to learn how to set up GitHub Pages for your repository, organization, or user account.

","

If you\'re trying to publish one, read the full documentation to learn how to set up GitHub Pages for your repository, organization, or user account.

","Bummer. It looks like the help center that you are trying to reach no longer exists."," The page you\'re looking for could not be found (404) "] def __check_http(self, bucket_url): check_response = self.session.head( S3_URL, timeout=3, headers={'Host': bucket_url}) # if not ARGS.ignore_rate_limiting\ # and (check_response.status_code == 503 and check_response.reason == 'Slow Down'): # self.q.rate_limited = True # Add it back to the bucket for re-processing. # self.q.put(bucket_url) if check_response.status_code == 307: # valid bucket, lets check if its public new_bucket_url = check_response.headers['Location'] bucket_response = requests.request( 'GET' if ARGS.only_interesting else 'HEAD', new_bucket_url, timeout=3) if bucket_response.status_code == 200\ and (not ARGS.only_interesting or (ARGS.only_interesting and any(keyword in bucket_response.text for keyword in KEYWORDS))): print(f"Found bucket '{new_bucket_url}'") self.__log(new_bucket_url) def do_s3(self): try: print('\t Searching takeovers for ' + self.host) r = requests.get('https://' + self.host, verify=False) for x in self.fingerprints: take_reg = re.compile(x) self.temp = take_reg.findall(r.text) if self.temp != []: print('\t\033[91m Takeover detected! - ' + self.host + '\033[1;32;40m') except Exception as e: print(e) def process(self): self.do_take()