home *** CD-ROM | disk | FTP | other *** search
/ Maximum CD 2011 January / maximum-cd-2011-01.iso / DiscContents / calibre-0.7.26.msi / file_4455 < prev    next >
Encoding:
Text File  |  2010-09-30  |  7.5 KB  |  178 lines

  1. #!/usr/bin/env  python
  2. __license__   = 'GPL v3'
  3. __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
  4. __docformat__ = 'restructuredtext en'
  5.  
  6. from calibre.web.feeds.news import BasicNewsRecipe
  7. import copy
  8.  
  9. # http://online.wsj.com/page/us_in_todays_paper.html
  10.  
  11. class WallStreetJournal(BasicNewsRecipe):
  12.  
  13.     title = 'The Wall Street Journal'
  14.     __author__ = 'Kovid Goyal, Sujata Raman, and Joshua Oster-Morris'
  15.     description = 'News and current affairs'
  16.     needs_subscription = True
  17.     language = 'en'
  18.  
  19.     max_articles_per_feed = 1000
  20.     timefmt  = ' [%a, %b %d, %Y]'
  21.     no_stylesheets = True
  22.  
  23.     extra_css      = '''h1{color:#093D72 ; font-size:large ; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; }
  24.                     h2{color:#474537; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small; font-style:italic;}
  25.                     .subhead{color:gray; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small; font-style:italic;}
  26.                     .insettipUnit {color:#666666; font-family:Arial,Sans-serif;font-size:xx-small }
  27.                     .targetCaption{ font-size:x-small; color:#333333; font-family:Arial,Helvetica,sans-serif}
  28.                     .article{font-family :Arial,Helvetica,sans-serif; font-size:x-small}
  29.                     .tagline {color:#333333; font-size:xx-small}
  30.                     .dateStamp {color:#666666; font-family:Arial,Helvetica,sans-serif}
  31.                         h3{color:blue ;font-family:Arial,Helvetica,sans-serif; font-size:xx-small}
  32.                         .byline{color:blue;font-family:Arial,Helvetica,sans-serif; font-size:xx-small}
  33.                         h6{color:#333333; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small;font-style:italic; }
  34.                     .paperLocation{color:#666666; font-size:xx-small}'''
  35.  
  36.     remove_tags_before = dict(name='h1')
  37.     remove_tags = [
  38.                     dict(id=["articleTabs_tab_article", "articleTabs_tab_comments", "articleTabs_tab_interactive","articleTabs_tab_video","articleTabs_tab_map","articleTabs_tab_slideshow"]),
  39.                     {'class':['footer_columns','network','insetCol3wide','interactive','video','slideshow','map','insettip','insetClose','more_in', "insetContent", 'articleTools_bottom', 'aTools', "tooltip", "adSummary", "nav-inline"]},
  40.                     dict(rel='shortcut icon'),
  41.                     ]
  42.     remove_tags_after = [dict(id="article_story_body"), {'class':"article story"},]
  43.  
  44.  
  45.     def get_browser(self):
  46.         br = BasicNewsRecipe.get_browser()
  47.         if self.username is not None and self.password is not None:
  48.             br.open('http://commerce.wsj.com/auth/login')
  49.             br.select_form(nr=0)
  50.             br['user']   = self.username
  51.             br['password'] = self.password
  52.             res = br.submit()
  53.             raw = res.read()
  54.             if 'Welcome,' not in raw:
  55.                 raise ValueError('Failed to log in to wsj.com, check your '
  56.                         'username and password')
  57.         return br
  58.  
  59.     def postprocess_html(self, soup, first):
  60.         for tag in soup.findAll(name=['table', 'tr', 'td']):
  61.             tag.name = 'div'
  62.  
  63.         for tag in soup.findAll('div', dict(id=["articleThumbnail_1", "articleThumbnail_2", "articleThumbnail_3", "articleThumbnail_4", "articleThumbnail_5", "articleThumbnail_6", "articleThumbnail_7"])):
  64.             tag.extract()
  65.  
  66.         return soup
  67.  
  68.     def wsj_get_index(self):
  69.         return self.index_to_soup('http://online.wsj.com/itp')
  70.  
  71.     def wsj_add_feed(self,feeds,title,url):
  72.         self.log('Found section:', title)
  73.         try:
  74.             if url.endswith('whatsnews'):
  75.                 articles = self.wsj_find_wn_articles(url)
  76.             else:
  77.                 articles = self.wsj_find_articles(url)
  78.         except:
  79.             articles = []
  80.         if articles:
  81.            feeds.append((title, articles))
  82.         return feeds
  83.  
  84.     def parse_index(self):
  85.         soup = self.wsj_get_index()
  86.  
  87.         date = soup.find('span', attrs={'class':'date-date'})
  88.         if date is not None:
  89.             self.timefmt = ' [%s]'%self.tag_to_string(date)
  90.  
  91.         cov = soup.find('a', attrs={'class':'icon pdf'}, href=True)
  92.         if cov is not None:
  93.             self.cover_url = cov['href']
  94.  
  95.         feeds = []
  96.         div = soup.find('div', attrs={'class':'itpHeader'})
  97.         div = div.find('ul', attrs={'class':'tab'})
  98.         for a in div.findAll('a', href=lambda x: x and '/itp/' in x):
  99.             pageone = a['href'].endswith('pageone')
  100.             if pageone:
  101.                title = 'Front Section'
  102.                url = 'http://online.wsj.com' + a['href']
  103.                feeds = self.wsj_add_feed(feeds,title,url)
  104.                title = 'What''s News'
  105.                url = url.replace('pageone','whatsnews')
  106.                feeds = self.wsj_add_feed(feeds,title,url)
  107.             else:
  108.                title = self.tag_to_string(a)
  109.                url = 'http://online.wsj.com' + a['href']
  110.                feeds = self.wsj_add_feed(feeds,title,url)
  111.         return feeds
  112.  
  113.     def wsj_find_wn_articles(self, url):
  114.         soup = self.index_to_soup(url)
  115.         articles = []
  116.  
  117.         whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x})
  118.         if whats_news is not None:
  119.           for a in whats_news.findAll('a', href=lambda x: x and '/article/' in x):
  120.             container = a.findParent(['p'])
  121.             meta = a.find(attrs={'class':'meta_sectionName'})
  122.             if meta is not None:
  123.                 meta.extract()
  124.             title = self.tag_to_string(a).strip()
  125.             url = a['href']
  126.             desc = ''
  127.             if container is not None:
  128.                 desc = self.tag_to_string(container)
  129.  
  130.             articles.append({'title':title, 'url':url,
  131.                 'description':desc, 'date':''})
  132.  
  133.             self.log('\tFound WN article:', title)
  134.  
  135.         return articles
  136.  
  137.     def wsj_find_articles(self, url):
  138.         soup = self.index_to_soup(url)
  139.  
  140.         whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x})
  141.         if whats_news is not None:
  142.            whats_news.extract()
  143.  
  144.         articles = []
  145.  
  146.         flavorarea = soup.find('div', attrs={'class':lambda x: x and 'ahed' in x})
  147.         if flavorarea is not None:
  148.            flavorstory = flavorarea.find('a', href=lambda x: x and x.startswith('/article'))
  149.            if flavorstory is not None:
  150.               flavorstory['class'] = 'mjLinkItem'
  151.               metapage = soup.find('span', attrs={'class':lambda x: x and 'meta_sectionName' in x})
  152.               if metapage is not None:
  153.                  flavorstory.append( copy.copy(metapage) ) #metapage should always be A1 because that should be first on the page
  154.  
  155.         for a in soup.findAll('a', attrs={'class':'mjLinkItem'}, href=True):
  156.             container = a.findParent(['li', 'div'])
  157.             meta = a.find(attrs={'class':'meta_sectionName'})
  158.             if meta is not None:
  159.                 meta.extract()
  160.             title = self.tag_to_string(a).strip() + ' [%s]'%self.tag_to_string(meta)
  161.             url = 'http://online.wsj.com'+a['href']
  162.             desc = ''
  163.             p = container.find('p')
  164.             if p is not None:
  165.                 desc = self.tag_to_string(p)
  166.  
  167.             articles.append({'title':title, 'url':url,
  168.                 'description':desc, 'date':''})
  169.  
  170.             self.log('\tFound article:', title)
  171.  
  172.         return articles
  173.  
  174.  
  175.     def cleanup(self):
  176.         self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com')
  177.  
  178.