home *** CD-ROM | disk | FTP | other *** search
- # -*- coding: utf-8 -*-
-
- from calibre.web.feeds.recipes import BasicNewsRecipe
-
- class JournalofHospitalMedicine(BasicNewsRecipe):
-
- title = 'Journal of Hospital Medicine'
- __author__ = 'Krittika Goyal'
- description = 'Medical news'
- timefmt = ' [%d %b, %Y]'
- needs_subscription = True
- language = 'en'
-
- no_stylesheets = True
- #remove_tags_before = dict(name='div', attrs={'align':'center'})
- #remove_tags_after = dict(name='ol', attrs={'compact':'COMPACT'})
- remove_tags = [
- dict(name='iframe'),
- dict(name='div', attrs={'class':'subContent'}),
- dict(name='div', attrs={'id':['contentFrame']}),
- #dict(name='form', attrs={'onsubmit':"return verifySearch(this.w,'Keyword, citation, or author')"}),
- #dict(name='table', attrs={'align':'RIGHT'}),
- ]
-
-
-
- # TO LOGIN
- def get_browser(self):
- br = BasicNewsRecipe.get_browser()
- br.open('http://www3.interscience.wiley.com/cgi-bin/home')
- br.select_form(name='siteLogin')
- br['LoginName'] = self.username
- br['Password'] = self.password
- response = br.submit()
- raw = response.read()
- if 'userName = ""' in raw:
- raise Exception('Login failed. Check your username and password')
- return br
-
- #TO GET ARTICLE TOC
- def johm_get_index(self):
- return self.index_to_soup('http://www3.interscience.wiley.com/journal/111081937/home')
-
- # To parse artice toc
- def parse_index(self):
- parse_soup = self.johm_get_index()
-
- div = parse_soup.find(id='contentCell')
-
- current_section = None
- current_articles = []
- feeds = []
- for x in div.findAll(True):
- if x.name == 'h4':
- # Section heading found
- if current_articles and current_section:
- feeds.append((current_section, current_articles))
- current_section = self.tag_to_string(x)
- current_articles = []
- self.log('\tFound section:', current_section)
- if current_section is not None and x.name == 'strong':
- title = self.tag_to_string(x)
- p = x.parent.parent.find('a', href=lambda x: x and '/HTMLSTART' in x)
- if p is None:
- continue
- url = p.get('href', False)
- if not url or not title:
- continue
- if url.startswith('/'):
- url = 'http://www3.interscience.wiley.com'+url
- url = url.replace('/HTMLSTART', '/main.html,ftx_abs')
- self.log('\t\tFound article:', title)
- self.log('\t\t\t', url)
- #if url.startswith('/'):
- #url = 'http://online.wsj.com'+url
- current_articles.append({'title': title, 'url':url,
- 'description':'', 'date':''})
-
- if current_articles and current_section:
- feeds.append((current_section, current_articles))
-
- return feeds
-
- def preprocess_html(self, soup):
- for img in soup.findAll('img', src=True):
- img['src'] = img['src'].replace('tfig', 'nfig')
- return soup
-
-