#    guppy Copyright (C) 2010-2011 guppy team members.
#
#    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
#    This is free software, and you are welcome to redistribute it
#    under certain conditions; type `show c' for details.
#  This program is free software; you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation; either version 2 of the License, or
#  (at your option) any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with this program; if not, write to the Free Software
#  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
#  MA 02110-1301, USA.

import time, threading
import json
import urllib.request
import urllib
import datetime
import traceback
import re
import urllib.parse
#import time


@plugin
class wikinews(object):
    """Test looping a message. """
    def __init__(self, server):
        self.server = server
        self.commands = []
        self.loop = True
        self.channel = "#wikinews-spam"
        self.timeListAllFeeds = 24*60*60 # 24h
        self.timeCheckNewEntries = 1*60 # 1 minutes
        self.timeCheckPublished = 1*60 # 1 minute
        self.lastEntries = []
        self.url='http://en.wikinews.org/w/api.php?action=query&list=categorymembers&format=json&cmtitle=Category%3AReview&cmprop=ids%7Ctitle%7Ctimestamp&cmnamespace=0&cmlimit=10&cmsort=timestamp'
        def loop(plugin, server):
            timeWaited = 0
            while self.loop:
                if timeWaited >= self.timeListAllFeeds:
                    self.listAllFeeds()
                    timeWaited = 0
                time.sleep(10)
                timeWaited += 10
        def loop2(plugin, server):
            timeWaited = 0
            while self.loop:
                if timeWaited >= self.timeCheckNewEntries:
                    self.checkNewEntries()
                    timeWaited = 0
                time.sleep(10)
                timeWaited += 10
        def loop3(plugin, server):
            timeWaited = 0
            while self.loop:
                if timeWaited >= self.timeCheckPublished:
                    self.checkPublished()
                    timeWaited = 0
                time.sleep(10)
                timeWaited += 10
        self.t1 = threading.Thread(target=loop, args=(self, server,))
        self.t2 = threading.Thread(target=loop2, args=(self, server,))
        self.t3 = threading.Thread(target=loop3, args=(self, server,))
        self.t1.daemon = True
        self.t2.daemon = True
        self.t3.daemon = True
        self.t1.start()
        self.t2.start()
        self.t3.start()
        server.handle("message", self.handle_message)
    def handle_message(self, channel, nick, message):
        matchesList = re.findall("\[\[(.*?)\]\]", message)
        if matchesList == []: return
        urlsList=[]
        for match in matchesList:
            url="https://en.wikinews.org/w/api.php?action=query&prop=info&format=json&inprop=url&iwurl=1&titles=" + urllib.parse.quote(match)
            try:
                data = json.loads(urllib.request.urlopen(url).read().decode("utf8"))['query']
            except:
                self.server.prnt("Exception occured, ignoring -- START")
                traceback.print_exc()
                self.server.prnt("Exception occured, ignoring -- END")
                return
            if 'pages' in data:
                pageUrl = data['pages'].popitem()[1]['fullurl']
            elif 'interwiki' in data:
                pageUrl = data['interwiki'][0]['url']
            else:
                return
            pageUrl = bytes(pageUrl, "utf-8").decode("unicode_escape")
            urlsList.append(pageUrl)
        self.server.doMessage(channel, " ".join(urlsList))
    def checkNewEntries(self):
        try:
            response = urllib.request.urlopen(self.url).read()
        except:
            self.server.prnt("Exception occured, ignoring -- START")
            traceback.print_exc()
            self.server.prnt("Exception occured, ignoring -- END")
            return
        j = json.loads(response.decode("utf8"))
        lastentries = self.lastEntries
        self.lastEntries = []
        for entry in j['query']['categorymembers']:
            self.lastEntries.append(entry['pageid'])
            if entry['pageid'] not in lastentries:
                self.listEntry(entry, "submitted for review")
    def listAllFeeds(self):
        #
        self.lastEntries = []
# <marienz> gry: sounds like you just get one json object, so something like json.loads(urlopen(...).read().decode('utf-8')) would work. json.load(urlopen(...)) would arguably be better, but I don't know off the top of my head how to get unicode vs bytes right in python 3 with that one.
#  {'query': {'categorymembers': [{'timestamp': '2012-10-13T20:54:35Z', 'ns': 0, 'pageid': 558335, 'title': 'Test 34'}]}}
        try:
            response = urllib.request.urlopen(self.url).read()
        except:
            self.server.prnt("Exception occured, ignoring -- START")
            traceback.print_exc()
            self.server.prnt("Exception occured, ignoring -- END")
            return
        j = json.loads(response.decode("utf8"))
        if(len(j['query']['categorymembers']) > 0):
            self.server.doMessage(self.channel, "Review queue:")
            for entry in j['query']['categorymembers']:
                self.lastEntries.append(entry['pageid'])
                self.listEntry(entry, "submitted for review")
    def listEntry(self, entry, comment):
        url2 = "https://en.wikinews.org/w/api.php?action=query&prop=info&format=json&inprop=url&pageids=" + str(entry['pageid'])
        try:
            fullurl = json.loads(urllib.request.urlopen(url2).read().decode("utf8"))['query']['pages'][str(entry['pageid'])]['fullurl']
            fullurl = urllib.request.urlopen("http://tinyurl.com/api-create.php?url=" + fullurl).readline().decode('utf8')
        except:
            self.server.prnt("Exception occured, ignoring -- START")
            traceback.print_exc()
            self.server.prnt("Exception occured, ignoring -- END")
            return
        hoursAgo = datetime.datetime.utcnow() - datetime.datetime.strptime(entry['timestamp'], "%Y-%m-%dT%H:%M:%SZ")
        hoursAgo = datetime.timedelta(hoursAgo.days, hoursAgo.seconds)
        self.server.doMessage(self.channel, "%s %s *%s* ago - %s" % (fullurl, comment, hoursAgo, entry['title']))
    def checkPublished(self):
        url='https://en.wikinews.org/w/api.php?action=query&list=categorymembers&format=json&cmtitle=Category%3APublished&cmprop=ids%7Ctitle%7Ctimestamp&cmnamespace=0&cmlimit=10&cmsort=timestamp&cmdir=desc'
        try:
            response = urllib.request.urlopen(url).read()
        except:
            self.server.prnt("Exception occured, ignoring -- START")
            traceback.print_exc()
            self.server.prnt("Exception occured, ignoring -- END")
            return
        j = json.loads(response.decode("utf8"))
        if(len(j['query']['categorymembers']) > 0):
            for entry in j['query']['categorymembers']:
                hoursAgo = datetime.datetime.utcnow() - datetime.datetime.strptime(entry['timestamp'], "%Y-%m-%dT%H:%M:%SZ")
                if (hoursAgo.days == 0 and hoursAgo.seconds < self.timeCheckPublished):
                    self.listEntry(entry, "published")
                else:
                    break
    def destroy(self):
        self.loop = False
        self.t1.join()
        self.t2.join()