Skip to content

Commit

Permalink
Multiple updates -> 3.1.3
Browse files Browse the repository at this point in the history
- Removed browser engine emulation (closes #220, closes #217, closes #200 ) 
- Fixed a few bugs
- Added a plugin to scan for outdated JS libraries
- Improved crawling and DOM scanning
  • Loading branch information
s0md3v authored Apr 6, 2019
2 parents e66cfdd + 3d7fbca commit 7684889
Show file tree
Hide file tree
Showing 15 changed files with 1,835 additions and 96 deletions.
7 changes: 0 additions & 7 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
language: python
cache: pip
addons:
firefox: "45.4.0esr"
os:
- linux
python:
Expand All @@ -10,11 +8,6 @@ install:
- pip install -r requirements.txt
- pip install flake8
before_script:
# download and extract geckodrive to /usr/local/bin
- wget https://github.com/mozilla/geckodriver/releases/download/v0.23.0/geckodriver-v0.23.0-linux64.tar.gz
- mkdir geckodriver
- tar -xzf geckodriver-v0.23.0-linux64.tar.gz -C geckodriver
- export PATH=$PATH:$PWD/geckodriver # stop the build if there are Python syntax errors or undefined names
- flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
- flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
Expand Down
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
### 3.1.3
- Removed browser engine emulation
- Fixed a few bugs
- Added a plugin to scan for outdated JS libraries
- Improved crawling and DOM scanning

### 3.1.2
- Fixed POST data handling
- Support for JSON POST data
Expand Down
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ Apart from that, XSStrike has crawling, fuzzing, parameter discovery, WAF detect
- Context analysis
- Configurable core
- WAF detection & evasion
- Browser engine integration for zero false positive rate
- Outdated JS lib scanning
- Intelligent payload generator
- Handmade HTML & JavaScript parser
- Powerful fuzzing engine
Expand All @@ -65,7 +65,6 @@ Apart from that, XSStrike has crawling, fuzzing, parameter discovery, WAF detect
- [Compatibility & Dependencies](https://github.com/s0md3v/XSStrike/wiki/Compatibility-&-Dependencies)

### FAQ
- [There's some error related to `geckodriver`.](https://github.com/s0md3v/XSStrike/wiki/FAQ#theres-some-error-related-to-geckodriver)
- [It says fuzzywuzzy isn't installed but it is.](https://github.com/s0md3v/XSStrike/wiki/FAQ#it-says-fuzzywuzzy-is-not-installed-but-its)
- [What's up with Blind XSS?](https://github.com/s0md3v/XSStrike/wiki/FAQ#whats-up-with-blind-xss)
- [Why XSStrike boasts that it is the most advanced XSS detection suite?](https://github.com/s0md3v/XSStrike/wiki/FAQ#why-xsstrike-boasts-that-it-is-the-most-advanced-xss-detection-suite)
Expand Down Expand Up @@ -103,4 +102,5 @@ Ways to contribute

Licensed under the GNU GPLv3, see [LICENSE](LICENSE) for more information.

The WAF signatures in `/db/wafSignatures.json` are taken & modified from [sqlmap](https://github.com/sqlmapproject/sqlmap). I extracted them from sqlmap's waf detection modules which can found [here](https://github.com/sqlmapproject/sqlmap/blob/master/waf/) and converted them to JSON.
The WAF signatures in `/db/wafSignatures.json` are taken & modified from [sqlmap](https://github.com/sqlmapproject/sqlmap). I extracted them from sqlmap's waf detection modules which can found [here](https://github.com/sqlmapproject/sqlmap/blob/master/waf/) and converted them to JSON.\
`/plugins/retireJS.py` is a modified version of [retirejslib](https://github.com/FallibleInc/retirejslib/).
28 changes: 0 additions & 28 deletions core/browserEngine.py

This file was deleted.

2 changes: 1 addition & 1 deletion core/config.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
changes = '''better dom xss scanning;add headers from command line;many bug fixes'''
changes = '''Removed browser engine emulation;Fixed a few bugs;Added a plugin to scan for outdated JS libraries;Improved crawling and DOM scanning'''
globalVariables = {} # it holds variables during runtime for collaboration across modules

defaultEditor = 'nano'
Expand Down
2 changes: 1 addition & 1 deletion core/dom.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ def dom(response):
highlighted = []
sources = r'''document\.(URL|documentURI|URLUnencoded|baseURI|cookie|referrer)|location\.(href|search|hash|pathname)|window\.name|history\.(pushState|replaceState)(local|session)Storage'''
sinks = r'''eval|evaluate|execCommand|assign|navigate|getResponseHeaderopen|showModalDialog|Function|set(Timeout|Interval|Immediate)|execScript|crypto.generateCRMFRequest|ScriptElement\.(src|text|textContent|innerText)|.*?\.onEventName|document\.(write|writeln)|.*?\.innerHTML|Range\.createContextualFragment|(document|window)\.location'''
scripts = re.findall(r'(?i)(?s)<scrip[^>]*(.*?)</script>', response)
scripts = re.findall(r'(?i)(?s)<script[^>]*>(.*?)</script>', response)
for script in scripts:
script = script.split('\n')
num = 1
Expand Down
6 changes: 5 additions & 1 deletion core/photon.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from urllib.parse import urlparse


from plugins.retireJs import retireJs
from core.utils import getUrl, getParams
from core.requester import requester
from core.zetanize import zetanize
Expand Down Expand Up @@ -36,6 +37,7 @@ def rec(target):
inps.append({'name': name, 'value': value})
forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}})
response = requester(url, params, headers, True, delay, timeout).text
retireJs(url, response)
forms.append(zetanize(response))
matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response)
for link in matches: # iterate over the matches
Expand All @@ -53,9 +55,11 @@ def rec(target):
storage.add(main_url + '/' + link)
for x in range(level):
urls = storage - processed # urls to crawl = all urls - urls that have been crawled
# for url in urls:
# rec(url)
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=threadCount)
futures = (threadpool.submit(rec, url) for url in urls)
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
for i in concurrent.futures.as_completed(futures):
pass
return [forms, processed]
9 changes: 4 additions & 5 deletions core/requester.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
import warnings

import core.config
from core.config import globalVariables
from core.utils import converter
from core.utils import converter, getVar
from core.log import setup_logger

logger = setup_logger(__name__)
Expand All @@ -15,9 +14,9 @@


def requester(url, data, headers, GET, delay, timeout):
if core.config.globalVariables['jsonData']:
if getVar('jsonData'):
data = converter(data)
elif core.config.globalVariables['path']:
elif getVar('path'):
url = converter(data, url)
data = []
GET, POST = True, False
Expand All @@ -37,7 +36,7 @@ def requester(url, data, headers, GET, delay, timeout):
if GET:
response = requests.get(url, params=data, headers=headers,
timeout=timeout, verify=False, proxies=core.config.proxies)
elif core.config.globalVariables['jsonData']:
elif getVar('jsonData'):
response = requests.get(url, json=data, headers=headers,
timeout=timeout, verify=False, proxies=core.config.proxies)
else:
Expand Down
49 changes: 47 additions & 2 deletions core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def getParams(url, data, GET):
if data[:1] == '?':
data = data[1:]
elif data:
if core.config.globalVariables['jsonData'] or core.config.globalVariables['path']:
if getVar('jsonData') or getVar('path'):
params = data
else:
try:
Expand Down Expand Up @@ -197,6 +197,51 @@ def writer(obj, path):

def reader(path):
with open(path, 'r') as f:
result = [line.strip(
result = [line.rstrip(
'\n').encode('utf-8').decode('utf-8') for line in f]
return result

def js_extractor(response):
"""Extract js files from the response body"""
scripts = []
matches = re.findall(r'<(?:script|SCRIPT).*?(?:src|SRC)=([^\s>]+)', response)
for match in matches:
match = match.replace('\'', '').replace('"', '').replace('`', '')
scripts.append(match)
return scripts


def handle_anchor(parent_url, url):
if parent_url.count('/') > 2:
replacable = re.search(r'/[^/]*?$', parent_url).group()
if replacable != '/':
parent_url = parent_url.replace(replacable, '')
scheme = urlparse(parent_url).scheme
if url[:4] == 'http':
return url
elif url[:2] == '//':
return scheme + ':' + url
elif url[:1] == '/':
return parent_url + url
else:
if parent_url.endswith('/') or url.startswith('/'):
return parent_url + url
else:
return parent_url + '/' + url


def deJSON(data):
return data.replace('\\\\', '\\')


def getVar(name):
return core.config.globalVariables[name]

def updateVar(name, data, mode=None):
if mode:
if mode == 'append':
core.config.globalVariables[name].append(data)
elif mode == 'add':
core.config.globalVariables[name].add(data)
else:
core.config.globalVariables[name] = data
Loading

0 comments on commit 7684889

Please sign in to comment.