aboutsummaryrefslogtreecommitdiff
path: root/tools/addon-sdk-1.4/python-lib/cuddlefish/tests/test_generate.py
blob: f8b1fa9b19ce0f18f1e457727b4628cbbb590074 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
import shutil
import unittest
import StringIO
import tarfile
import HTMLParser
import urlparse
import urllib

from cuddlefish.docs import generate
from cuddlefish.tests import env_root

INITIAL_FILESET = [ ["static-files", "base.html"], \
                    ["dev-guide", "welcome.html"], \
                    ["packages", "aardvark", "aardvark.html"] ]

EXTENDED_FILESET = [ ["static-files", "base.html"], \
                    ["dev-guide", "extra.html"], \
                    ["dev-guide", "welcome.html"], \
                    ["packages", "aardvark", "aardvark.html"] ]

EXTRAFILE = ["dev-guide", "extra.html"]

def get_test_root():
    return os.path.join(env_root, "python-lib", "cuddlefish", "tests", "static-files")

def get_sdk_docs_root():
    return os.path.join(get_test_root(), "sdk-docs")

def get_base_url_path():
    return os.path.join(get_sdk_docs_root(), "doc")

def get_base_url():
    base_url_path = get_base_url_path().lstrip("/")
    return "file://"+"/"+"/".join(base_url_path.split(os.sep))+"/"

class Link_Checker(HTMLParser.HTMLParser):
    def __init__(self, tester, filename, base_url):
        HTMLParser.HTMLParser.__init__(self)
        self.tester = tester
        self.filename = filename
        self.base_url = base_url

    def handle_starttag(self, tag, attrs):
        if tag == "a":
            href = dict(attrs).get('href', '')
            if href:
                self.validate_href(href)

    def validate_href(self, href):
        parsed = urlparse.urlparse(href)
        # there should not be any file:// URLs
        self.tester.assertNotEqual(parsed.scheme, "file")
        # any other absolute URLs will not be checked
        if parsed.scheme:
            return
        # otherwise try to open the file at: baseurl + path
        absolute_url = self.base_url + parsed.path
        try:
            urllib.urlopen(absolute_url)
        except IOError:
            self.tester.assertFalse(True, absolute_url + " link in " + self.filename + " is broken.")

class Generate_Docs_Tests(unittest.TestCase):

    def test_generate_static_docs(self):
        # make sure we start clean
        if os.path.exists(get_base_url_path()):
            shutil.rmtree(get_base_url_path())
        # generate a doc tarball, and extract it
        base_url = get_base_url()
        tar_filename = generate.generate_static_docs(env_root, base_url)
        tgz = tarfile.open(tar_filename)
        tgz.extractall(get_sdk_docs_root())
        # get each HTML file...
        for root, subFolders, filenames in os.walk(get_sdk_docs_root()):
            for filename in filenames:
                if not filename.endswith(".html"):
                    continue
                filename = os.path.join(root, filename)
                # ...and feed it to the link checker
                linkChecker = Link_Checker(self, filename, base_url)
                linkChecker.feed(open(filename, "r").read())
        # clean up
        shutil.rmtree(get_base_url_path())
        tgz.close()
        os.remove(tar_filename)
        generate.clean_generated_docs(os.path.join(env_root, "doc"))

    def test_generate_docs(self):
        test_root = get_test_root()
        docs_root = os.path.join(test_root, "doc")
        generate.clean_generated_docs(docs_root)
        new_digest = self.check_generate_regenerate_cycle(test_root, INITIAL_FILESET)
        # touching an MD file under packages **does** cause a regenerate
        os.utime(os.path.join(test_root, "packages", "aardvark", "doc", "main.md"), None)
        new_digest = self.check_generate_regenerate_cycle(test_root, INITIAL_FILESET, new_digest)
        # touching a non MD file under packages **does not** cause a regenerate
        os.utime(os.path.join(test_root, "packages", "aardvark", "lib", "main.js"), None)
        self.check_generate_is_skipped(test_root, INITIAL_FILESET, new_digest)
        # touching a non MD file under static-files **does not** cause a regenerate
        os.utime(os.path.join(docs_root, "static-files", "another.html"), None)
        new_digest = self.check_generate_is_skipped(test_root, INITIAL_FILESET, new_digest)
        # touching an MD file under dev-guide **does** cause a regenerate
        os.utime(os.path.join(docs_root, "dev-guide-source", "welcome.md"), None)
        new_digest = self.check_generate_regenerate_cycle(test_root, INITIAL_FILESET, new_digest)
        # adding a file **does** cause a regenerate
        open(os.path.join(docs_root, "dev-guide-source", "extra.md"), "w").write("some content")
        new_digest = self.check_generate_regenerate_cycle(test_root, EXTENDED_FILESET, new_digest)
        # deleting a file **does** cause a regenerate
        os.remove(os.path.join(docs_root, "dev-guide-source", "extra.md"))
        new_digest = self.check_generate_regenerate_cycle(test_root, INITIAL_FILESET, new_digest)
        # remove the files
        generate.clean_generated_docs(docs_root)

    def check_generate_is_skipped(self, test_root, files_to_expect, initial_digest):
        generate.generate_docs(test_root, stdout=StringIO.StringIO())
        docs_root = os.path.join(test_root, "doc")
        for file_to_expect in files_to_expect:
            self.assertTrue(os.path.exists(os.path.join(docs_root, *file_to_expect)))
        self.assertTrue(initial_digest == open(os.path.join(docs_root, "status.md5"), "r").read())

    def check_generate_regenerate_cycle(self, test_root, files_to_expect, initial_digest = None):
        # test that if we generate, files are getting generated
        generate.generate_docs(test_root, stdout=StringIO.StringIO())
        docs_root = os.path.join(test_root, "doc")
        for file_to_expect in files_to_expect:
            self.assertTrue(os.path.exists(os.path.join(docs_root, *file_to_expect)))
        if initial_digest:
            self.assertTrue(initial_digest != open(os.path.join(docs_root, "status.md5"), "r").read())
        # and that if we regenerate, nothing changes...
        new_digest = open(os.path.join(docs_root, "status.md5"), "r").read()
        self.check_generate_is_skipped(test_root, files_to_expect, new_digest)
        return new_digest

if __name__ == '__main__':
    unittest.main()