diff options
author | Alban Gruin | 2018-04-25 21:27:28 +0200 |
---|---|---|
committer | Alban Gruin | 2018-04-25 21:27:28 +0200 |
commit | 080606776d14aa5fdac21d43d028b01ed3a2ed4f (patch) | |
tree | cb85f61343d1e561ee5080724e949d801c9839f3 /management/commands/missingtimetables.py | |
parent | 54c5dbb98293acea9b470808e5a35e99c004f265 (diff) |
Ajout d’un script pour lister les emplois du temps pas récupérésv0.13.0-pa1chprod/pa1ch/0.13.z
Diffstat (limited to 'management/commands/missingtimetables.py')
-rw-r--r-- | management/commands/missingtimetables.py | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/management/commands/missingtimetables.py b/management/commands/missingtimetables.py new file mode 100644 index 0000000..1b38896 --- /dev/null +++ b/management/commands/missingtimetables.py @@ -0,0 +1,76 @@ +# Copyright (C) 2017 Alban Gruin +# +# celcatsanitizer is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# celcatsanitizer is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with celcatsanitizer. If not, see <http://www.gnu.org/licenses/>. + +from django.core.management.base import BaseCommand +from ...models import Year +from ._private import get_from_db_or_create, get_xml + +import json +import re + + +class Command(BaseCommand): + help = "List timetables not scrapped by scraptimetables" + + def add_arguments(self, parser): + parser.add_argument("--url", type=str, required=True) + + def handle(self, *args, **options): + gdict = {} + for year, name, finder in self.__get_finders(options["url"]): + soup = get_xml(finder) + handled = False + groups = [] + + for link in soup.find_all("a"): + if link.text.startswith("Groupe: "): + url_base = finder.rsplit("/", 1) + group_url = "/".join([url_base[0], link.parent.parent.find("a", attrs={"class": "xmllink"})["href"]]) + if link.text[9:].startswith(year.name): + group_name = link.text[9:] + else: + group_name = year.name + " " + link.text[9:] + + groups.append((group_name, group_url)) + + if "toutes sections et semestres confondus" in link.text: + handled = True + break + + if not handled and len(groups) > 0: + gdict["{} {}".format(year, name)] = groups + + print(json.dumps(gdict, indent=4)) + + def __get_finders(self, url): + soup = get_xml(url) + choose_regex = re.compile("^- Choisissez votre ([\w ]+) -$") + + for form in soup.find_all("form"): + for i, option in enumerate(form.find_all("option")): + if i == 0 and option.text == "- Choisissez le niveau -": + break + + search = choose_regex.search(option.text) + if search is not None: + current_year = get_from_db_or_create(Year, name=search.groups(0)[0]) + else: + finder = option["value"].replace("finder", "finder2") + if option.text.startswith(current_year.name): + name = option.text[len(current_year.name):].strip() + else: + name = option.text + + yield current_year, name, finder |