home *** CD-ROM | disk | FTP | other *** search
- __license__ = 'GPL v3'
- __copyright__ = '2008-2010, Darko Miletic <darko.miletic at gmail.com>'
- '''
- mondediplo.com
- '''
-
- import urllib
- from calibre import strftime
- from calibre.web.feeds.news import BasicNewsRecipe
-
- class LeMondeDiplomatiqueEn(BasicNewsRecipe):
- title = 'Le Monde diplomatique - English edition'
- __author__ = 'Darko Miletic'
- description = 'Real journalism making sense of the world around us'
- publisher = 'Le Monde diplomatique'
- category = 'news, politics, world'
- no_stylesheets = True
- oldest_article = 31
- delay = 1
- encoding = 'utf-8'
- needs_subscription = True
- masthead_url = 'http://mondediplo.com/squelettes/pics/logo-30.gif'
- publication_type = 'magazine'
- PREFIX = 'http://mondediplo.com/'
- LOGIN = PREFIX + '2009/09/02congo'
- INDEX = PREFIX + strftime('%Y/%m/')
- use_embedded_content = False
- language = 'en'
- extra_css = ' body{font-family: "Luxi sans","Lucida sans","Lucida Grande",Lucida,"Lucida Sans Unicode",sans-serif} .surtitre{font-size: 1.2em; font-variant: small-caps; margin-bottom: 0.5em} .chapo{font-size: 1.2em; font-weight: bold; margin: 1em 0 0.5em} .texte{font-family: Georgia,"Times New Roman",serif} h1{color: #990000} .notes{border-top: 1px solid #CCCCCC; font-size: 0.9em; line-height: 1.4em} '
-
- conversion_options = {
- 'comment' : description
- , 'tags' : category
- , 'publisher' : publisher
- , 'language' : language
- }
-
- def get_browser(self):
- br = BasicNewsRecipe.get_browser()
- br.open(self.LOGIN)
- if self.username is not None and self.password is not None:
- data = urllib.urlencode({ 'login':self.username
- ,'pass':self.password
- ,'enter':'enter'
- })
- br.open(self.LOGIN,data)
- return br
-
- keep_only_tags =[
- dict(name='div', attrs={'id':'contenu'})
- , dict(name='div',attrs={'class':'notes surlignable'})
- ]
- remove_tags = [dict(name=['object','link','script','iframe','base'])]
- remove_attributes = ['height','width']
-
- def parse_index(self):
- articles = []
- soup = self.index_to_soup(self.INDEX)
- cnt = soup.find('div',attrs={'class':'som_num'})
- for item in cnt.findAll('li'):
- description = ''
- feed_link = item.find('a')
- desc = item.find('div',attrs={'class':'chapo'})
- if desc:
- description = desc.string
- if feed_link and feed_link.has_key('href'):
- url = self.PREFIX + feed_link['href'].partition('/../')[2]
- title = self.tag_to_string(feed_link)
- date = strftime(self.timefmt)
- articles.append({
- 'title' :title
- ,'date' :date
- ,'url' :url
- ,'description':description
- })
- return [(self.title, articles)]
-
-