aboutsummaryrefslogtreecommitdiff
path: root/management/commands/missingtimetables.py
blob: 1b388968268c1bb99c7e23a62e177b1239d7c4cc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#    Copyright (C) 2017  Alban Gruin
#
#    celcatsanitizer is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as published
#    by the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    celcatsanitizer is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with celcatsanitizer.  If not, see <http://www.gnu.org/licenses/>.

from django.core.management.base import BaseCommand
from ...models import Year
from ._private import get_from_db_or_create, get_xml

import json
import re


class Command(BaseCommand):
    help = "List timetables not scrapped by scraptimetables"

    def add_arguments(self, parser):
        parser.add_argument("--url", type=str, required=True)

    def handle(self, *args, **options):
        gdict = {}
        for year, name, finder in self.__get_finders(options["url"]):
            soup = get_xml(finder)
            handled = False
            groups = []

            for link in soup.find_all("a"):
                if link.text.startswith("Groupe: "):
                    url_base = finder.rsplit("/", 1)
                    group_url = "/".join([url_base[0], link.parent.parent.find("a", attrs={"class": "xmllink"})["href"]])
                    if link.text[9:].startswith(year.name):
                        group_name = link.text[9:]
                    else:
                        group_name = year.name + " " + link.text[9:]

                    groups.append((group_name, group_url))

                if "toutes sections et semestres confondus" in link.text:
                    handled = True
                    break

            if not handled and len(groups) > 0:
                gdict["{} {}".format(year, name)] = groups

        print(json.dumps(gdict, indent=4))

    def __get_finders(self, url):
        soup = get_xml(url)
        choose_regex = re.compile("^- Choisissez votre ([\w ]+) -$")

        for form in soup.find_all("form"):
            for i, option in enumerate(form.find_all("option")):
                if i == 0 and option.text == "- Choisissez le niveau -":
                    break

                search = choose_regex.search(option.text)
                if search is not None:
                    current_year = get_from_db_or_create(Year, name=search.groups(0)[0])
                else:
                    finder = option["value"].replace("finder", "finder2")
                    if option.text.startswith(current_year.name):
                        name = option.text[len(current_year.name):].strip()
                    else:
                        name = option.text

                    yield current_year, name, finder