aboutsummaryrefslogtreecommitdiff
path: root/management/commands/scraptimetables.py
blob: 98856a523845df1e75a93664b72b92889e4d74e4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#    Copyright (C) 2017-2018  Alban Gruin
#
#    celcatsanitizer is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as published
#    by the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    celcatsanitizer is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with celcatsanitizer.  If not, see <http://www.gnu.org/licenses/>.

from bs4 import BeautifulSoup
from django.core.management.base import BaseCommand
from django.db import transaction
from edt.models import Source, Timetable, Year

import re
import requests


class Command(BaseCommand):
    help = "Fetch timetables from a specified URL"

    def add_arguments(self, parser):
        parser.add_argument("--url", type=str, required=True)

    @transaction.atomic
    def handle(self, *args, **options):
        for year, name, source in self.__get_timetables(options["url"]):
            source, _ = Source.objects.get_or_create(url=source)
            timetable = Timetable(year=year, name=name, source=source)
            timetable.save()

    def __get_timetables(self, url):
        req = requests.get(url)
        soup = BeautifulSoup(req.content, "html.parser")
        choose_regex = re.compile("^- Choisissez votre ([\w ]+) -$")
        base_url = ("https://edt.univ-tlse3.fr/calendar/default.aspx"
                    "?View=month&Type=group&ResourceName=formation_")

        for form in soup.find_all("form"):
            for i, option in enumerate(form.find_all("option")):
                if i == 0 and option.text == "- Choisissez le niveau -":
                    break

                search = choose_regex.search(option.text)
                if search is not None:
                    current_year, _ = Year.objects.get_or_create(
                        name=search.groups(0)[0])
                else:
                    g = option["value"].split("-")[-1][:-5]
                    if g.endswith("s1"):
                        g = g[:-2]

                    url = base_url + g.upper()
                    if option.text.startswith(current_year.name):
                        name = option.text[len(current_year.name):].strip()
                    else:
                        name = option.text
                    yield current_year, name, url