home *** CD-ROM | disk | FTP | other *** search
/ Maximum CD 2011 January / maximum-cd-2011-01.iso / DiscContents / calibre-0.7.26.msi / file_4042 < prev    next >
Encoding:
Text File  |  2010-10-08  |  4.6 KB  |  123 lines

  1. #!/usr/bin/env  python
  2. __license__   = 'GPL v3'
  3. __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
  4. __docformat__ = 'restructuredtext en'
  5.  
  6. '''
  7. www.guardian.co.uk
  8. '''
  9. from calibre import strftime
  10. from calibre.web.feeds.news import BasicNewsRecipe
  11. from datetime import date
  12.  
  13. class Guardian(BasicNewsRecipe):
  14.  
  15.     title = u'The Guardian / The Observer'
  16.     if date.today().weekday() == 6:
  17.         base_url = "http://www.guardian.co.uk/theobserver"
  18.     else:
  19.         base_url = "http://www.guardian.co.uk/theguardian"
  20.  
  21.     __author__ = 'Seabound and Sujata Raman'
  22.     language = 'en_GB'
  23.  
  24.     oldest_article = 7
  25.     max_articles_per_feed = 100
  26.     remove_javascript = True
  27.  
  28.     # List of section titles to ignore
  29.     # For example: ['Sport']
  30.     ignore_sections = []
  31.  
  32.     timefmt = ' [%a, %d %b %Y]'
  33.     keep_only_tags = [
  34.                       dict(name='div', attrs={'id':["content","article_header","main-article-info",]}),
  35.                            ]
  36.     remove_tags = [
  37.                         dict(name='div', attrs={'class':["video-content","videos-third-column"]}),
  38.                         dict(name='div', attrs={'id':["article-toolbox","subscribe-feeds",]}),
  39.                         dict(name='ul', attrs={'class':["pagination"]}),
  40.                         dict(name='ul', attrs={'id':["content-actions"]}),
  41.                         #dict(name='img'),
  42.                         ]
  43.     use_embedded_content    = False
  44.  
  45.     no_stylesheets = True
  46.     extra_css = '''
  47.                     .article-attributes{font-size: x-small; font-family:Arial,Helvetica,sans-serif;}
  48.                     .h1{font-size: large ;font-family:georgia,serif; font-weight:bold;}
  49.                     .stand-first-alone{color:#666666; font-size:small; font-family:Arial,Helvetica,sans-serif;}
  50.                     .caption{color:#666666; font-size:x-small; font-family:Arial,Helvetica,sans-serif;}
  51.                     #article-wrapper{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
  52.                     .main-article-info{font-family:Arial,Helvetica,sans-serif;}
  53.                     #full-contents{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
  54.                     #match-stats-summary{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
  55.                 '''
  56.  
  57.     def get_article_url(self, article):
  58.           url = article.get('guid', None)
  59.           if '/video/' in url or '/flyer/' in url or '/quiz/' in url or \
  60.               '/gallery/' in url  or 'ivebeenthere' in url or \
  61.               'pickthescore' in url or 'audioslideshow' in url :
  62.               url = None
  63.           return url
  64.  
  65.     def preprocess_html(self, soup):
  66.  
  67.           for item in soup.findAll(style=True):
  68.               del item['style']
  69.  
  70.           for item in soup.findAll(face=True):
  71.               del item['face']
  72.           for tag in soup.findAll(name=['ul','li']):
  73.                 tag.name = 'div'
  74.  
  75.           return soup
  76.  
  77.     def find_sections(self):
  78.         # soup = self.index_to_soup("http://www.guardian.co.uk/theobserver")
  79.         soup = self.index_to_soup(self.base_url)
  80.         # find cover pic
  81.         img = soup.find( 'img',attrs ={'alt':'Guardian digital edition'})
  82.         if img is not None:
  83.             self.cover_url = img['src']
  84.         # end find cover pic
  85.  
  86.         idx = soup.find('div', id='book-index')
  87.         for s in idx.findAll('strong', attrs={'class':'book'}):
  88.             a = s.find('a', href=True)
  89.             yield (self.tag_to_string(a), a['href'])
  90.  
  91.     def find_articles(self, url):
  92.         soup = self.index_to_soup(url)
  93.         div = soup.find('div', attrs={'class':'book-index'})
  94.         for ul in div.findAll('ul', attrs={'class':'trailblock'}):
  95.             for li in ul.findAll('li'):
  96.                 a = li.find(href=True)
  97.                 if not a:
  98.                     continue
  99.                 title = self.tag_to_string(a)
  100.                 url = a['href']
  101.                 if not title or not url:
  102.                     continue
  103.                 tt = li.find('div', attrs={'class':'trailtext'})
  104.                 if tt is not None:
  105.                     for da in tt.findAll('a'): da.extract()
  106.                     desc = self.tag_to_string(tt).strip()
  107.                 yield {
  108.                         'title': title, 'url':url, 'description':desc,
  109.                         'date' : strftime('%a, %d %b'),
  110.                         }
  111.  
  112.     def parse_index(self):
  113.         try:
  114.             feeds = []
  115.             for title, href in self.find_sections():
  116.                 if not title in self.ignore_sections:
  117.                     feeds.append((title, list(self.find_articles(href))))
  118.             return feeds
  119.         except:
  120.             raise NotImplementedError
  121.  
  122.  
  123.