summaryrefslogtreecommitdiffstats
path: root/scripts/http-devframework.nse
blob: ce0f1aee236f7904ff5e02c174254855eb6417e3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
description = [[

Tries to find out the technology behind the target website.

The script checks for certain defaults that might not have been changed, like
common headers or URLs or HTML content.

While the script does some guessing, note that overall there's no way to
determine what technologies a given site is using.

You can help improve this script by adding new entries to
nselib/data/http-devframework-fingerprints.lua

Each entry must have:
* <code>rapidDetect</code> - Callback function that is called in the beginning
of detection process. It takes the host and port of target website as arguments.
* <code>consumingDetect</code> - Callback function that is called for each
spidered page. It takes the body of the response (HTML code) and the requested
path as arguments.

Note that the <code>consumingDetect</code> callback will not take place only if
<code>rapid</code> option is enabled.

]]

---
-- @usage nmap -p80 --script http-devframework.nse <target>
--
-- @args http-devframework.rapid boolean value that determines if a rapid detection
--       should take place. The main difference of a rapid vs a lengthy detection
--       is that second one requires crawling through the website. Default: false
--       (lengthy detection is performed)
-- @args http-devframework.fingerprintfile File containing fingerprints. Default: nselib/data/http-devframework-fingerprints.lua
--
-- @output
-- PORT   STATE SERVICE REASON
-- 80/tcp open  http    syn-ack
-- |_http-devframework: Django detected. Found Django admin login page on /admin/
---

categories = {"discovery", "intrusive"}
author = "George Chatzisofroniou"
license = "Same as Nmap--See https://nmap.org/book/man-legal.html"

local nmap = require "nmap"
local shortport = require "shortport"
local stdnse = require "stdnse"
local httpspider = require "httpspider"
local _G = require "_G"

portrule = shortport.port_or_service( {80, 443}, {"http", "https"}, "tcp", "open")

local function loadFingerprints(filename)

  local file, fingerprints

  -- Find the file
  filename = nmap.fetchfile('nselib/data/' .. filename) or filename

  -- Load the file
  stdnse.debug1("Loading fingerprints: %s", filename)
  local env = setmetatable({fingerprints = {}}, {__index = _G});
  file = loadfile(filename, "t", env)

  if( not(file) ) then
    stdnse.debug1("Couldn't load the file: %s", filename)
    return
  end

  file()
  fingerprints = env.tools

  return fingerprints

end

action = function(host, port)

  local filename = stdnse.get_script_args("http-devframework.fingerprintfile") or "http-devframework-fingerprints.lua"
  local tools = loadFingerprints(filename)
  if not tools then
    stdnse.debug1("Failed to load fingerprints")
    return nil
  end
  local rapid = stdnse.get_script_args("http-devframework.rapid")

  local d

  -- Run rapidDetect() callbacks.
  for f, method in pairs(tools) do
    d = method["rapidDetect"](host, port)
    if d then
      return d
    end
  end

  local crawler = httpspider.Crawler:new(host, port, '/', { scriptname = SCRIPT_NAME,
      maxpagecount = 40,
      maxdepth = -1,
      withinhost = 1
    })

  if rapid then
    return "Couldn't determine the underlying framework or CMS. Try turning off 'rapid' mode."
  end

  crawler.options.doscraping = function(url)
    if crawler:iswithinhost(url)
      and not crawler:isresource(url, "js")
      and not crawler:isresource(url, "css") then
      return true
    end
  end

  crawler:set_timeout(10000)

  while (true) do

    local response, path

    local status, r = crawler:crawl()
    -- if the crawler fails it can be due to a number of different reasons
    -- most of them are "legitimate" and should not be reason to abort
    if (not(status)) then
      if (r.err) then
        return stdnse.format_output(false, r.reason)
      else
        break
      end
    end

    response = r.response
    path = tostring(r.url)

    if (response.body) then

      -- Run consumingDetect() callbacks.
      for f, method in pairs(tools) do
        d = method["consumingDetect"](response.body, path)
        if d then
          return d
        end
      end
    end

    return "Couldn't determine the underlying framework or CMS. Try increasing 'httpspider.maxpagecount' value to spider more pages."

  end

end