pax_global_header 0000666 0000000 0000000 00000000064 14731101544 0014512 g ustar 00root root 0000000 0000000 52 comment=046084dc0f18e6ece7f720d8aa56e9ecd500dd93
python-bibtexparser-1.4.3/ 0000775 0000000 0000000 00000000000 14731101544 0015530 5 ustar 00root root 0000000 0000000 python-bibtexparser-1.4.3/.coveragerc 0000664 0000000 0000000 00000000143 14731101544 0017647 0 ustar 00root root 0000000 0000000 [run]
branch = True
source = bibtexparser
[report]
exclude_lines =
if __name__ == .__main__.:
python-bibtexparser-1.4.3/.editorconfig 0000775 0000000 0000000 00000000221 14731101544 0020203 0 ustar 00root root 0000000 0000000 root = true
[*]
charset=utf-8
indent_style = space
indent_size = 4
insert_final_newline = true
end_of_line = lf
[*.{yaml,yml}]
indent_size = 2
python-bibtexparser-1.4.3/.github/ 0000775 0000000 0000000 00000000000 14731101544 0017070 5 ustar 00root root 0000000 0000000 python-bibtexparser-1.4.3/.github/workflows/ 0000775 0000000 0000000 00000000000 14731101544 0021125 5 ustar 00root root 0000000 0000000 python-bibtexparser-1.4.3/.github/workflows/codeql-analysis.yml 0000664 0000000 0000000 00000005254 14731101544 0024746 0 ustar 00root root 0000000 0000000 # For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ "master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
schedule:
- cron: '25 22 * * 1'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# ℹ️ Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
python-bibtexparser-1.4.3/.github/workflows/tests.yml 0000664 0000000 0000000 00000001306 14731101544 0023012 0 ustar 00root root 0000000 0000000
name: Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.8, 3.9, "3.10", "3.11"]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2-beta
with:
node-version: '12'
check-latest: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies (tf ${{ matrix.tf-version }} )
run: |
pip install -r requirements.txt
- name: Run Tests
run: |
python -m unittest discover -s ./bibtexparser/tests
python-bibtexparser-1.4.3/.gitignore 0000664 0000000 0000000 00000000540 14731101544 0017517 0 ustar 00root root 0000000 0000000 *.py[cod]
# C extensions
*.so
# Packages
*.egg
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
.coverage
.tox
nosetests.xml
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Pycharm
.idea
# Vim.
*.swp
# Vscode
.vscode
python-bibtexparser-1.4.3/CHANGELOG 0000664 0000000 0000000 00000010471 14731101544 0016745 0 ustar 00root root 0000000 0000000 v1.4.0
======
Breaking Changes
----------------
* Using common strings in string interpolation is now the default (#311 by @MiWeiss).
See the PR for more details, and how to fall back to old behavior.
New Features / Improvements
---------------------------
* Add option to adjust alignment of text of multi-line values. (#290 by @michaelfruth)
* Raise warning if parser is used multiple times (#312 by @mrn97),
which leads to a merged library. Set `parser.expect_multiple_parse = True` to disable the warning.
* Allow preservation of existing order of entry fields in writer (#317 by @michaelfruth)
v1.3.0
======
Most of these changes come from @MiWeiss personal fork, as one,
hence have a commit id instead of a PR number in CHANGELOG.
* Support for python 3.10
* Removing unused dependencies (fb86407 and d4e3f42)
* No empty extra lines at end of generated files (f84e318)
* Allow capital AND when splitting author list (a8527ff)
* Fix problem in `homogenize_latex_encoding` when authors are lists (528714c)
* Long description in setup.py (#304)
* Typo fixes (2b2c8ee, 11340f3)
v1.2.0
======
* Support for python 3 only
v1.1.0
======
* Handles declarations on lines starting with spaces after comments (#199)
* accept extra characters in @string (#148)
* Add support for BibLaTeX annotations (#208)
* Strip the latex command str for latex_to_unicode() (#182)
* Add comma_last to BibTexWriter (#219)
* Feature: crossref Support (#216)
* BUGFIX: Fix for pyparsing 2.3.1 (#226)
* NEW: Add support for BibLaTeX annotations (#208)
* NEW: Feature: crossref Support (#216)
* ENH: Handles declarations on lines starting with spaces after comments (#199)
* ENH: Checks for empty citekeys and whitespaces (#213)
v1.0.1
======
* BUGFIX: future missing in setup.py
v1.0
====
* ENH: we use pyparsing (#64) by Olivier Magin.
* DOC: Refactoring of the tutorial
* DOC: include docs/ in manifest
* API: fix spelling "homogenize". Affects customization and bparser
* API: BibtexParser: homogenize_fields is now False by default (#94)
v0.6.2
======
* ENH: customization: handle various hyphens (#76).
* ENH: writer: all values according to this maximal key width (#83).
* END: writer: display_order allows to have custom ordering of the fields of
each entry instead of just alphabetical (#83) by cschaffner.
* FIX: bad support of braces in string (#90) by sangkilc.
v0.6.1
======
* API: Previous type and id keywords which are automatically added to
the dictionnary are now ENTRYTYPE and ID, respectively (#42).
* ENH: comma first syntax support (#49) by Michal Grochmal.
v0.6.0
======
* DOC: clarify version number
* ENH: support for bibtex with leading spaces (#34)
* FIX: if title contained multiples words in braces
* ENH: code refactoring (#33)
* ENH: support for comment blocks (#32)
* ENH: Removed comma after last key-value pair by faph (#28)
* ENH: optional keys sanitising by faph (#29)
* FIX: missing coma at the end of a record (#24)
* DOC: clarify the usecase of to_bibtex
* FIX: raise exception for TypeError in to_bibtex (#22)
v0.5.5
======
* ENH: json output
* ENH: Add (optional) support for non-standard entry types by Georg C. Brückmann
* FIX: protect uppercase only on unprotected characters. #18
* ENH: string replacement by Uwe Schmidt (#13 #20)
v0.5.4
======
* ENH: json output
* API: enhance the naming choice for bwriter
v0.5.3
======
* ENH: add writer (#16), thanks to Lucas Verney
* MAINT: Remove non-standard --BREAK-- command detection
* FIX: missing strip() (#14) by Sebastien Diemer
* API breakage: the parser takes data instead of a filehandler
v0.5.2
======
* ENH: fix tests latex encoding
* ENH: support @comment @preambule (escaped)
* ENH: check that bibtype belongs to a known type
v0.5.1
======
* ENH: split keywords with various separators
* ENH: get_entry_dict make the dict once
* ENH: add messages with logging
* FIX: fix unittest related to braces detection
v0.5
====
* Permission from original authors and OKFN to use LGPLv3
* ENH: Python 2.7 support
* FIX: issue related to accents
v0.4
====
* ENH: Transformations on characters are now considered as a customization
* ENH: New customization: clean latex style
* FIX: issue related to name processing
v0.3
====
* DOC: moved to readsthedoc
* DOC: several improvements
* MAINT: separate customizations
v0.2
====
* TEST: initialized
* DOC: initialized
v0.1
====
* First preliminary release
python-bibtexparser-1.4.3/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000012204 14731101544 0020326 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
< various means of contact provided at mweiss.ch >.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
python-bibtexparser-1.4.3/CONTRIBUTING.md 0000664 0000000 0000000 00000001577 14731101544 0017773 0 ustar 00root root 0000000 0000000 Thanks heaps for being interested in contributing to python-bibtexparser.
We are always looking for people to improve the library. Contributions include, but are not limited to:
1. Opening well described issues for bugs or feature requests.
2. Providing bugfixing PRs
3. Implementing any of the issues or continuing any of the PRs labelled with `needs help` or `good first issue`.
Some guidelines:
1. Be nice! Were all doing this in our free time; no one is obligated to do anything.
2. Add sufficient tests to your PRs
3. Document your code.
4. Don't hesitate to ask questions.
Also note that there are currently two independent "default" branches:
First, `master`, where we maintain a stable `v1.x` and where we merge almost only backwards-compatible bugfixes.
Second, `v2` where we work on a full re-implementation of the library.
Issues targeting the `v2` branch are labelled `v2`.
python-bibtexparser-1.4.3/COPYING 0000664 0000000 0000000 00000022171 14731101544 0016566 0 ustar 00root root 0000000 0000000 Copyright (c) 2012-2018, François Boulogne and the python-bibtexparser contributors
All rights reserved.
The code is distributed under a dual license (at your choice).
#####################################################################
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
(1) Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
(2) Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
(3)The name of the author may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
#####################################################################
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
python-bibtexparser-1.4.3/MANIFEST.in 0000664 0000000 0000000 00000000127 14731101544 0017266 0 ustar 00root root 0000000 0000000 include *.txt
include *.md
include COPYING
include docs/Makefile
include docs/source/*
python-bibtexparser-1.4.3/README.rst 0000664 0000000 0000000 00000004244 14731101544 0017223 0 ustar 00root root 0000000 0000000 python-bibtexparser
===================
Python 3 library to parse `bibtex `_ files.
.. contents::
Documentation
-------------
Our documentation includes the installation procedure, a tutorial, the API and advices to report a bug:
`Documentation on readthedocs.io `_
Upgrading
---------
Please, read the changelog before upgrading regarding API modifications.
License
-------
Dual license (at your choice):
* LGPLv3.
* BSD
See COPYING for details.
History and evolutions
----------------------
The original source code was part of bibserver from `OKFN `_. This project is released under the AGPLv3. OKFN and the original authors kindly provided the permission to use a subpart of their project (ie the bibtex parser) under LGPLv3. Many thanks to them!
The parser evolved to a new core based on pyparsing.
Since 2022, after a long stale period, this library has a new maintainer (`@MiWeiss `_).
`v2` Announcement
-----------------
Version 1.x, is trusted and used by more than 1300 projects, with much of its code being ~10 years old. Our primary objective in maintaining v1.x is to provide stability and backwards compatibility to these projects - such that they can safely and easily migrate to new versions.
Still, there's much room for large-scale improvements and changes to modernize bibtexparser. Hence, we are working on a new version 2.0.0 which is a complete rewrite of the library, providing amongst other the following advantages:
- Order of magnitudes faster
- Type-Hints and extensive documentation
- Easily customizable parsing **and** writing
- Access to raw, unparsed bibtex.
- Fault-Tolerant: Able to parse files with syntax errors
- Massively simplified, robuster handling of de- and encoding (special chars, ...).
Check out the `v2 dev branch `_ and the `v2 coordination issue `_ to get a sneak preview. Also - if you're keen - we're always looking for contributors. Do not hesitate to get in contact with us.
python-bibtexparser-1.4.3/RELEASE 0000664 0000000 0000000 00000000744 14731101544 0016540 0 ustar 00root root 0000000 0000000 How to release
==============
* Update CHANGELOG
* Update version in __init__.py
* Commit and open PR on Github to see that all tests pass,
and then merge the PR.
* Check out master locally
* Build wheel
python setup.py sdist upload
* Check long description
twine check dist/*
* Upload to pypi-test
twine upload --repository testpypi dist/*
* Upload to pypi
twine upload dist/*
* Create release on Github
* Verify docs are automatically updated
python-bibtexparser-1.4.3/bibtexparser/ 0000775 0000000 0000000 00000000000 14731101544 0020222 5 ustar 00root root 0000000 0000000 python-bibtexparser-1.4.3/bibtexparser/__init__.py 0000664 0000000 0000000 00000006206 14731101544 0022337 0 ustar 00root root 0000000 0000000 """
`BibTeX `_ is a bibliographic data file format.
The :mod:`bibtexparser` module can parse BibTeX files and write them. The API is similar to the
:mod:`json` module. The parsed data is returned as a simple :class:`BibDatabase` object with the main attribute being
:attr:`entries` representing bibliographic sources such as books and journal articles.
The following functions provide a quick and basic way to manipulate a BibTeX file.
More advanced features are also available in this module.
Parsing a file is as simple as::
import bibtexparser
with open('bibtex.bib') as bibtex_file:
bibtex_database = bibtexparser.load(bibtex_file)
And writing::
import bibtexparser
with open('bibtex.bib', 'w') as bibtex_file:
bibtexparser.dump(bibtex_database, bibtex_file)
"""
__all__ = [
'loads', 'load', 'dumps', 'dump', 'bibdatabase',
'bparser', 'bwriter', 'bibtexexpression', 'latexenc', 'customization',
]
__version__ = '1.4.3'
from . import bibdatabase, bibtexexpression, bparser, bwriter, latexenc, customization
def loads(bibtex_str, parser=None):
"""
Load :class:`BibDatabase` object from a string
:param bibtex_str: input BibTeX string to be parsed
:type bibtex_str: str or unicode
:param parser: custom parser to use (optional)
:type parser: BibTexParser
:returns: bibliographic database object
:rtype: BibDatabase
"""
if parser is None:
parser = bparser.BibTexParser()
return parser.parse(bibtex_str)
def load(bibtex_file, parser=None):
"""
Load :class:`BibDatabase` object from a file
:param bibtex_file: input file to be parsed
:type bibtex_file: file
:param parser: custom parser to use (optional)
:type parser: BibTexParser
:returns: bibliographic database object
:rtype: BibDatabase
Example::
import bibtexparser
with open('bibtex.bib') as bibtex_file:
bibtex_database = bibtexparser.load(bibtex_file)
"""
if parser is None:
parser = bparser.BibTexParser()
return parser.parse_file(bibtex_file)
def dumps(bib_database, writer=None):
"""
Dump :class:`BibDatabase` object to a BibTeX string
:param bib_database: bibliographic database object
:type bib_database: BibDatabase
:param writer: custom writer to use (optional)
:type writer: BibTexWriter
:returns: BibTeX string
:rtype: unicode
"""
if writer is None:
writer = bwriter.BibTexWriter()
return writer.write(bib_database)
def dump(bib_database, bibtex_file, writer=None):
"""
Dump :class:`BibDatabase` object as a BibTeX text file
:param bib_database: bibliographic database object
:type bib_database: BibDatabase
:param bibtex_file: file to write to
:type bibtex_file: file
:param writer: custom writer to use (optional)
:type writer: BibTexWriter
Example::
import bibtexparser
with open('bibtex.bib', 'w') as bibtex_file:
bibtexparser.dump(bibtex_database, bibtex_file)
"""
if writer is None:
writer = bwriter.BibTexWriter()
bibtex_file.write(writer.write(bib_database))
python-bibtexparser-1.4.3/bibtexparser/bibdatabase.py 0000664 0000000 0000000 00000020461 14731101544 0023020 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
STANDARD_TYPES = set([
'article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'])
COMMON_STRINGS = OrderedDict([
('jan', 'January'),
('feb', 'February'),
('mar', 'March'),
('apr', 'April'),
('may', 'May'),
('jun', 'June'),
('jul', 'July'),
('aug', 'August'),
('sep', 'September'),
('oct', 'October'),
('nov', 'November'),
('dec', 'December'),
])
class UndefinedString(KeyError):
pass
class BibDatabase(object):
"""
Bibliographic database object that follows the data structure of a BibTeX file.
"""
def __init__(self):
#: List of BibTeX entries, for example `@book{...}`, `@article{...}`, etc. Each entry is a simple dict with
#: BibTeX field-value pairs, for example `'author': 'Bird, R.B. and Armstrong, R.C. and Hassager, O.'` Each
#: entry will always have the following dict keys (in addition to other BibTeX fields):
#:
#: * `ID` (BibTeX key)
#: * `ENTRYTYPE` (entry type in lowercase, e.g. `book`, `article` etc.)
self.entries = []
self._entries_dict = {}
#: List of BibTeX comment (`@comment{...}`) blocks.
self.comments = []
#: OrderedDict of BibTeX string definitions (`@string{...}`). In order of definition.
self.strings = OrderedDict() # Not sure if order is import, keep order just in case
#: List of BibTeX preamble (`@preamble{...}`) blocks.
self.preambles = []
#: List of fields that should not be updated when resolving crossrefs
self._not_updated_by_crossref = ['_FROM_CROSSREF']
def load_common_strings(self):
self.strings.update(COMMON_STRINGS)
def get_entry_list(self):
"""Get a list of bibtex entries.
:returns: BibTeX entries
:rtype: list
.. deprecated:: 0.5.6
Use :attr:`entries` instead.
"""
return self.entries
@staticmethod
def entry_sort_key(entry, fields):
result = []
for field in fields:
result.append(str(entry.get(field, '')).lower()) # Sorting always as string
return tuple(result)
def get_entry_dict(self):
"""Return a dictionary of BibTeX entries, where dict key is the BibTeX entry key.
This method re-creates the dict every time it is called,
hence subsequent calls should be avoided with large databases.
"""
self._entries_dict = dict()
for entry in self.entries:
self._entries_dict[entry['ID']] = entry
return self._entries_dict
entries_dict = property(get_entry_dict)
def expand_string(self, name):
try:
return BibDataStringExpression.expand_if_expression(
self.strings[name])
except KeyError:
raise(UndefinedString(name))
def _add_missing_from_crossref_entry(self, entry, dependencies=set()):
if entry['ID'] in self._crossref_updated:
return
if entry['_crossref'] not in self.entries_dict:
logger.error("Crossref reference %s for %s is missing.",
entry['_crossref'],
entry['ID'])
return
if entry['_crossref'] in dependencies:
logger.error("Circular crossref dependency: %s->%s->%s.",
"->".join(dependencies),
entry['ID'],
entry['_crossref'])
return
crossref_entry = self.entries_dict[entry['_crossref']]
if '_crossref' in crossref_entry:
dependencies.add(entry['ID'])
self._add_missing_from_crossref_entry(crossref_entry, dependencies)
dependencies.remove(entry['ID'])
from_crossref = {bibfield: bibvalue
for (bibfield, bibvalue) in crossref_entry.items()
if bibfield not in entry.keys() and
bibfield not in self._not_updated_by_crossref}
entry.update(from_crossref)
self._crossref_updated.append(entry['ID'])
entry['_FROM_CROSSREF'] = sorted(from_crossref.keys())
del entry['_crossref']
def add_missing_from_crossref(self):
"""Resolve crossrefs and update entries accordingly.
"""
self._crossref_updated = []
for entry in self.entries:
if "_crossref" in entry:
self._add_missing_from_crossref_entry(entry)
class BibDataString(object):
"""
Represents a bibtex string.
This object enables maintaining string expressions as list of strings
and BibDataString. Can be interpolated from Bibdatabase.
"""
def __init__(self, bibdatabase, name):
self._bibdatabase = bibdatabase
self.name = name.lower()
def __eq__(self, other):
return isinstance(other, BibDataString) and self.name == other.name
def __repr__(self):
return "BibDataString({})".format(self.name.__repr__())
def get_value(self):
"""
Query value from string name.
:returns: string
"""
return self._bibdatabase.expand_string(self.name)
def get_dependencies(self, known_dependencies=set()):
"""Recursively tracks strings on which the expression depends.
:param kown_dependencies: dependencies to ignore
"""
raise NotImplementedError
@staticmethod
def expand_string(string_or_bibdatastring):
"""
Eventually replaces a bibdatastring by its value.
:param string_or_bibdatastring: the parsed token
:type string_expr: string or BibDataString
:returns: string
"""
if isinstance(string_or_bibdatastring, BibDataString):
return string_or_bibdatastring.get_value()
else:
return string_or_bibdatastring
class BibDataStringExpression(object):
"""
Represents a bibtex string expression.
String expressions are sequences of regular strings and bibtex strings.
This object enables maintaining string expressions as list of strings.
The expression are represented as lists of regular strings and
BibDataStrings. They can be interpolated from Bibdatabase.
BibDataStringExpression(e)
:param e: list of strings and BibDataStrings
"""
def __init__(self, expression):
self.expr = expression
def __eq__(self, other):
return isinstance(other, BibDataStringExpression) and self.expr == other.expr
def __repr__(self):
return "BibDataStringExpression({})".format(self.expr.__repr__())
def get_value(self):
"""
Replaces bibdatastrings by their values in the expression.
:returns: string
"""
return ''.join([BibDataString.expand_string(s) for s in self.expr])
def apply_on_strings(self, fun):
"""
Maps a function on strings in expression, keeping unchanged
BibDataStrings.
:param fun: function from strings to strings
"""
self.expr = [s if isinstance(s, BibDataString) else fun(s)
for s in self.expr]
@staticmethod
def expand_if_expression(string_or_expression):
"""
Eventually replaces a BibDataStringExpression by its value.
:param string_or_expression: the object to expand
:type string_expr: string or BibDataStringExpression
:returns: string
"""
if isinstance(string_or_expression, BibDataStringExpression):
return string_or_expression.get_value()
else:
return string_or_expression
@staticmethod
def expression_if_needed(tokens):
"""Build expression only if tokens are not a regular value.
"""
if len(tokens) == 1 and not isinstance(tokens[0], BibDataString):
return tokens[0]
else:
return BibDataStringExpression(tokens)
def as_text(text_string_or_expression):
if isinstance(text_string_or_expression,
(BibDataString, BibDataStringExpression)):
return text_string_or_expression.get_value()
else:
return str(text_string_or_expression)
python-bibtexparser-1.4.3/bibtexparser/bibtexexpression.py 0000664 0000000 0000000 00000023212 14731101544 0024171 0 ustar 00root root 0000000 0000000 import pyparsing as pp
from .bibdatabase import BibDataStringExpression
# General helpers
def _strip_after_new_lines(s):
"""Removes leading and trailing whitespaces in all but first line."""
lines = s.splitlines()
if len(lines) > 1:
lines = [lines[0]] + [l.lstrip() for l in lines[1:]]
return '\n'.join(lines)
def strip_after_new_lines(s):
"""Removes leading and trailing whitespaces in all but first line.
:param s: string or BibDataStringExpression
"""
if isinstance(s, BibDataStringExpression):
s.apply_on_strings(_strip_after_new_lines)
return s
else:
return _strip_after_new_lines(s)
def add_logger_parse_action(expr, log_func):
"""Register a callback on expression parsing with the adequate message."""
def action(s, l, t):
log_func("Found {}: {}".format(expr.resultsName, t))
expr.addParseAction(action)
# Parse action helpers
# Helpers for returning values from the parsed tokens. Shaped as pyparsing's
# parse actions. See pyparsing documentation for the arguments.
def first_token(string_, location, token):
# TODO Handle this case correctly!
assert(len(token) == 1)
return token[0]
def remove_trailing_newlines(string_, location, token):
if token[0]:
return token[0].rstrip('\n')
def remove_braces(string_, location, token):
if len(token[0]) < 1:
return ''
else:
start = 1 if token[0][0] == '{' else 0
end = -1 if token[0][-1] == '}' else None
return token[0][start:end]
def field_to_pair(string_, location, token):
"""
Looks for parsed element named 'Field'.
:returns: (name, value).
"""
field = token.get('Field')
value = field.get('Value')
if isinstance(value, pp.ParseResults):
# For pyparsing >= 2.3.1 (see #225 and API change note in pyparsing's
# Changelog).
value = value[0]
return (field.get('FieldName'),
strip_after_new_lines(value))
# Expressions helpers
def in_braces_or_pars(exp):
"""
exp -> (exp)|{exp}
"""
return ((pp.Suppress('{') + exp + pp.Suppress('}')) |
(pp.Suppress('(') + exp + pp.Suppress(')')))
class BibtexExpression(object):
"""Gives access to pyparsing expressions.
Attributes are pyparsing expressions for the following elements:
* main_expression: the bibtex file
* string_def: a string definition
* preamble_decl: a preamble declaration
* explicit_comment: an explicit comment
* entry: an entry definition
* implicit_comment: an implicit comment
"""
ParseException = pp.ParseException
def __init__(self):
# Init parse action functions
self.set_string_name_parse_action(lambda s, l, t: None)
# Bibtex keywords
string_def_start = pp.CaselessKeyword("@string")
preamble_start = pp.CaselessKeyword("@preamble")
comment_line_start = pp.CaselessKeyword('@comment')
# String names
string_name = pp.Word(pp.alphanums + '_-:')('StringName')
self.set_string_name_parse_action(lambda s, l, t: None)
string_name.addParseAction(self._string_name_parse_action)
# Values inside bibtex fields
# Values can be integer or string expressions. The latter may use
# quoted or braced values.
# Integer values
integer = pp.Word(pp.nums)('Integer')
# Braced values: braced values can contain nested (but balanced) braces
braced_value_content = pp.CharsNotIn('{}')
braced_value = pp.Forward() # Recursive definition for nested braces
braced_value <<= pp.originalTextFor(
'{' + pp.ZeroOrMore(braced_value | braced_value_content) + '}'
)('BracedValue')
braced_value.setParseAction(remove_braces)
# TODO add ignore for "\}" and "\{" ?
# TODO @ are not parsed by bibtex in braces
# Quoted values: may contain braced content with balanced braces
brace_in_quoted = pp.nestedExpr('{', '}', ignoreExpr=None)
text_in_quoted = pp.CharsNotIn('"{}')
# (quotes should be escaped by braces in quoted value)
quoted_value = pp.originalTextFor(
'"' + pp.ZeroOrMore(text_in_quoted | brace_in_quoted) + '"'
)('QuotedValue')
quoted_value.addParseAction(pp.removeQuotes)
# String expressions
string_expr = pp.delimitedList(
(quoted_value | braced_value | string_name), delim='#'
)('StringExpression')
string_expr.addParseAction(self._string_expr_parse_action)
value = (integer | string_expr)('Value')
# Entries
# @EntryType { ...
entry_type = (pp.Suppress('@') + pp.Word(pp.alphas))('EntryType')
entry_type.setParseAction(first_token)
# Entry key: any character up to a ',' without leading and trailing
# spaces. Also exclude spaces and prevent it from being empty.
key = pp.SkipTo(',')('Key') # TODO Maybe also exclude @',\#}{~%
def citekeyParseAction(string_, location, token):
"""Parse action for validating citekeys.
It ensures citekey is not empty and has no space.
:args: see pyparsing documentation.
"""
key = first_token(string_, location, token).strip()
if len(key) < 1:
raise self.ParseException(
string_, loc=location, msg="Empty citekeys are not allowed.")
for i, c in enumerate(key):
if c.isspace():
raise self.ParseException(
string_, loc=(location + i),
msg="Whitespace not allowed in citekeys.")
return key
key.setParseAction(citekeyParseAction)
# Field name: word of letters, digits, dashes and underscores
field_name = pp.Word(pp.alphanums + '_-().+')('FieldName')
field_name.setParseAction(first_token)
# Field: field_name = value
field = pp.Group(field_name + pp.Suppress('=') + value)('Field')
field.setParseAction(field_to_pair)
# List of fields: comma separeted fields
field_list = (pp.delimitedList(field) + pp.Suppress(pp.Optional(','))
)('Fields')
field_list.setParseAction(
lambda s, l, t: {k: v for (k, v) in reversed(t.get('Fields'))})
# Entry: type, key, and fields
self.entry = (entry_type +
in_braces_or_pars(key + pp.Suppress(',') + field_list)
)('Entry')
# Other stuff: comments, string definitions, and preamble declarations
# Explicit comments: @comment + everything up to next valid declaration
# starting on new line.
not_an_implicit_comment = (pp.LineEnd() + pp.Literal('@')
) | pp.StringEnd()
self.explicit_comment = (
pp.Suppress(comment_line_start) +
pp.originalTextFor(pp.SkipTo(not_an_implicit_comment),
asString=True))('ExplicitComment')
self.explicit_comment.addParseAction(remove_trailing_newlines)
self.explicit_comment.addParseAction(remove_braces)
# Previous implementation included comment until next '}'.
# This is however not inline with bibtex behavior that is to only
# ignore until EOL. Brace stipping is arbitrary here but avoids
# duplication on bibtex write.
# Empty implicit_comments lead to infinite loop of zeroOrMore
def mustNotBeEmpty(t):
if not t[0]:
raise pp.ParseException("Match must not be empty.")
# Implicit comments: not anything else
self.implicit_comment = pp.originalTextFor(
pp.SkipTo(not_an_implicit_comment).setParseAction(mustNotBeEmpty),
asString=True)('ImplicitComment')
self.implicit_comment.addParseAction(remove_trailing_newlines)
# String definition
self.string_def = (pp.Suppress(string_def_start) + in_braces_or_pars(
string_name +
pp.Suppress('=') +
string_expr('StringValue')
))('StringDefinition')
# Preamble declaration
self.preamble_decl = (pp.Suppress(preamble_start) +
in_braces_or_pars(value))('PreambleDeclaration')
# Main bibtex expression
self.main_expression = pp.ZeroOrMore(
self.string_def |
self.preamble_decl |
self.explicit_comment |
self.entry |
self.implicit_comment)
def add_log_function(self, log_fun):
"""Add notice to logger on entry, comment, preamble, string definitions.
:param log_fun: logger function
"""
for e in [self.entry,
self.implicit_comment,
self.explicit_comment,
self.preamble_decl,
self.string_def]:
add_logger_parse_action(e, log_fun)
def set_string_name_parse_action(self, fun):
"""Set the parseAction for string name expression.
.. Note::
For some reason pyparsing duplicates the string_name
expression so setting its parseAction a posteriori has no effect
in the context of a string expression. This is why this function
should be used instead.
"""
self._string_name_parse_action_fun = fun
def _string_name_parse_action(self, s, l, t):
return self._string_name_parse_action_fun(s, l, t)
def _string_expr_parse_action(self, s, l, t):
return BibDataStringExpression.expression_if_needed(t)
def parseFile(self, file_obj):
return self.main_expression.parseFile(file_obj, parseAll=True)
python-bibtexparser-1.4.3/bibtexparser/bparser.py 0000664 0000000 0000000 00000026747 14731101544 0022252 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Original source: github.com/okfn/bibserver
# Authors:
# markmacgillivray
# Etienne Posthumus (epoz)
# Francois Boulogne
import io
import logging
import warnings
from bibtexparser.bibdatabase import (BibDatabase, BibDataString, as_text,
BibDataStringExpression, STANDARD_TYPES)
from bibtexparser.bibtexexpression import BibtexExpression
logger = logging.getLogger(__name__)
__all__ = ['BibTexParser']
def parse(data, *args, **kwargs):
parser = BibTexParser(*args, **kwargs)
return parser.parse(data)
class BibTexParser(object):
"""
A parser for reading BibTeX bibliographic data files.
Example::
from bibtexparser.bparser import BibTexParser
bibtex_str = ...
parser = BibTexParser()
parser.ignore_nonstandard_types = False
parser.homogenize_fields = False
parser.common_strings = False
bib_database = bibtexparser.loads(bibtex_str, parser)
:param customization: function or None (default)
Customization to apply to parsed entries.
:param ignore_nonstandard_types: bool (default True)
If True ignores non-standard bibtex entry types.
:param homogenize_fields: bool (default False)
Common field name replacements (as set in alt_dict attribute).
:param interpolate_strings: bool (default True)
If True, replace bibtex string by their value, else uses
BibDataString objects.
:param common_strings: bool (default True)
Include common string definitions (e.g. month abbreviations) to
the bibtex file.
:param add_missing_from_crossref: bool (default False)
Resolve BibTeX references set in the crossref field for BibTeX entries
and add the fields from the referenced entry to the referencing entry.
"""
def __new__(cls, data=None, **args):
"""
To catch the old API structure in which creating the parser would
immediately parse and return data.
"""
if data is None:
return super(BibTexParser, cls).__new__(cls)
else:
# For backwards compatibility: if data is given, parse
# and return the `BibDatabase` object instead of the parser.
return parse(data, **args)
def __init__(self, data=None,
customization=None,
ignore_nonstandard_types=True,
homogenize_fields=False,
interpolate_strings=True,
common_strings=True,
add_missing_from_crossref=False):
"""
Creates a parser for reading BibTeX files
:return: parser
:rtype: `BibTexParser`
"""
self._parse_call_count = 0
#: Parsers can be called multiple times, merging databases, but raising a warning.
# Set this to `True` to disable the warning. Default: `False`
self.expect_multiple_parse = False
self.bib_database = BibDatabase()
#: Load common strings such as months abbreviation
#: Default: `True` (new in 1.4.0 - was previously `False`)
self.common_strings = common_strings
if self.common_strings:
self.bib_database.load_common_strings()
#: Callback function to process BibTeX entries after parsing,
#: for example to create a list from a string with multiple values.
#: By default all BibTeX values are treated as simple strings.
#: Default: `None`.
self.customization = customization
#: Ignore non-standard BibTeX types (`book`, `article`, etc).
#: Default: `True`.
self.ignore_nonstandard_types = ignore_nonstandard_types
#: Sanitize BibTeX field names, for example change `url` to `link` etc.
#: Field names are always converted to lowercase names.
#: Default: `False`.
self.homogenize_fields = homogenize_fields
#: Interpolate Bibtex Strings or keep the structure
self.interpolate_strings = interpolate_strings
# On some sample data files, the character encoding detection simply
# hangs We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# Add missing field from cross-ref
self.add_missing_from_crossref = add_missing_from_crossref
# pre-defined set of key changes
self.alt_dict = {
'keyw': u'keyword',
'keywords': u'keyword',
'authors': u'author',
'editors': u'editor',
'urls': u'url',
'link': u'url',
'links': u'url',
'subjects': u'subject',
'xref': u'crossref'
}
# Setup the parser expression
self._init_expressions()
def parse(self, bibtex_str, partial=False):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:param partial: If True, print errors only on parsing failures.
If False, an exception is raised.
:type: boolean
:return: bibliographic database
:rtype: BibDatabase
"""
self._parse_call_count += 1
if self._parse_call_count == 2 and not self.expect_multiple_parse:
warnings.warn("The parser has been called more than once. "
"Subsequent parse calls lead to a combined BibTeX library. \n"
"To avoid the warning, set property `parser.expect_multiple_parse` to `True`.",
category=UserWarning, stacklevel=2)
bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
try:
self._expr.parseFile(bibtex_file_obj)
except self._expr.ParseException as exc:
logger.error("Could not parse properly, starting at %s", exc.line)
if not partial:
raise exc
if self.add_missing_from_crossref:
self.bib_database.add_missing_from_crossref()
return self.bib_database
def parse_file(self, file, partial=False):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:param partial: If True, print errors only on parsing failures.
If False, an exception is raised.
:type: boolean
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read(), partial=partial)
def _init_expressions(self):
"""
Defines all parser expressions used internally.
"""
self._expr = BibtexExpression()
# Handle string as BibDataString object
self._expr.set_string_name_parse_action(
lambda s, l, t:
BibDataString(self.bib_database, t[0]))
# Add notice to logger
self._expr.add_log_function(logger.debug)
# Set actions
self._expr.entry.addParseAction(
lambda s, l, t: self._add_entry(
t.get('EntryType'), t.get('Key'), t.get('Fields'))
)
self._expr.implicit_comment.addParseAction(
lambda s, l, t: self._add_comment(t[0])
)
self._expr.explicit_comment.addParseAction(
lambda s, l, t: self._add_comment(t[0])
)
self._expr.preamble_decl.addParseAction(
lambda s, l, t: self._add_preamble(t[0])
)
self._expr.string_def.addParseAction(
lambda s, l, t: self._add_string(t['StringName'].name,
t['StringValue'])
)
def _bibtex_file_obj(self, bibtex_str):
# Some files have Byte-order marks inserted at the start
byte = b'\xef\xbb\xbf'
if isinstance(bibtex_str, str):
byte = str(byte, self.encoding, 'ignore')
if len(bibtex_str) >= 1 and bibtex_str[0] == byte:
bibtex_str = bibtex_str[1:]
else:
if len(bibtex_str) >= 3 and bibtex_str[:3] == byte:
bibtex_str = bibtex_str[3:]
bibtex_str = bibtex_str.decode(encoding=self.encoding)
return io.StringIO(bibtex_str)
def _clean_val(self, val):
""" Clean instring before adding to dictionary
:param val: a value
:type val: string
:returns: string -- value
"""
if not val or val == "{}":
return ''
elif self.interpolate_strings:
return as_text(val)
else:
return val
def _clean_key(self, key):
""" Lowercase a key and return as unicode.
:param key: a key
:type key: string
:returns: (unicode) string -- value
"""
key = key.lower()
if not isinstance(key, str):
return str(key, 'utf-8')
else:
return key
def _clean_field_key(self, key):
""" Clean a bibtex field key and homogenize alternative forms.
:param key: a key
:type key: string
:returns: string -- value
"""
key = self._clean_key(key)
if self.homogenize_fields:
if key in list(self.alt_dict.keys()):
key = self.alt_dict[key]
return key
def _add_entry(self, entry_type, entry_id, fields):
""" Adds a parsed entry.
Includes checking type and fields, cleaning, applying customizations.
:param entry_type: the entry type
:type entry_type: string
:param entry_id: the entry bibid
:type entry_id: string
:param fields: the fields and values
:type fields: dictionary
:returns: string -- value
"""
d = {}
entry_type = self._clean_key(entry_type)
if self.ignore_nonstandard_types and entry_type not in STANDARD_TYPES:
logger.warning('Entry type %s not standard. Not considered.',
entry_type)
return
for key in fields:
d[self._clean_field_key(key)] = self._clean_val(fields[key])
d['ENTRYTYPE'] = entry_type
d['ID'] = entry_id
crossref = d.get('crossref', None)
if self.add_missing_from_crossref and crossref is not None:
d['_crossref'] = crossref
if self.customization is not None:
logger.debug('Apply customizations and return dict')
d = self.customization(d)
self.bib_database.entries.append(d)
def _add_comment(self, comment):
"""
Stores a comment in the list of comment.
:param comment: the parsed comment
:type comment: string
"""
logger.debug('Store comment in list of comments: ' +
comment.__repr__())
self.bib_database.comments.append(comment)
def _add_string(self, string_key, string):
"""
Stores a new string in the string dictionary.
:param string_key: the string key
:type string_key: string
:param string: the string value
:type string: string
"""
if string_key in self.bib_database.strings:
logger.warning('Overwriting existing string for key: %s.',
string_key)
logger.debug(u'Store string: {} -> {}'.format(string_key, string))
self.bib_database.strings[string_key] = self._clean_val(string)
def _add_preamble(self, preamble):
"""
Stores a preamble.
:param preamble: the parsed preamble
:type preamble: string
"""
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(preamble)
python-bibtexparser-1.4.3/bibtexparser/bwriter.py 0000664 0000000 0000000 00000023040 14731101544 0022251 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Author: Francois Boulogne
# License:
import logging
from enum import Enum, auto
from typing import Dict, Callable, Iterable, Union
from bibtexparser.bibdatabase import (BibDatabase, COMMON_STRINGS,
BibDataString,
BibDataStringExpression)
logger = logging.getLogger(__name__)
__all__ = ['BibTexWriter']
# A list of entries that should not be included in the content (key = value) of a BibTex entry
ENTRY_TO_BIBTEX_IGNORE_ENTRIES = ['ENTRYTYPE', 'ID']
class SortingStrategy(Enum):
"""
Defines different strategies for sorting the entries not defined in :py:attr:`~.BibTexWriter.display_order` and that are added at the end.
"""
ALPHABETICAL_ASC = auto()
"""
Alphabetical sorting in ascending order.
"""
ALPHABETICAL_DESC = auto()
"""
Alphabetical sorting in descending order.
"""
PRESERVE = auto()
"""
Preserves the order of the entries. Entries are not sorted.
"""
def _apply_sorting_strategy(strategy: SortingStrategy, items: Iterable[str]) -> Iterable[str]:
"""
Sorts the items based on the given sorting strategy.
"""
if strategy == SortingStrategy.ALPHABETICAL_ASC:
return sorted(items)
elif strategy == SortingStrategy.ALPHABETICAL_DESC:
return reversed(sorted(items))
elif strategy == SortingStrategy.PRESERVE:
return items
else:
raise NotImplementedError(f"The strategy {strategy.name} is not implemented.")
def to_bibtex(parsed):
"""
Convenience function for backwards compatibility.
"""
return BibTexWriter().write(parsed)
def _str_or_expr_to_bibtex(e):
if isinstance(e, BibDataStringExpression):
return ' # '.join([_str_or_expr_to_bibtex(s) for s in e.expr])
elif isinstance(e, BibDataString):
return e.name
else:
return '{' + e + '}'
class BibTexWriter(object):
"""
Writer to convert a :class:`BibDatabase` object to a string or file formatted as a BibTeX file.
Example::
from bibtexparser.bwriter import BibTexWriter
bib_database = ...
writer = BibTexWriter()
writer.contents = ['comments', 'entries']
writer.indent = ' '
writer.order_entries_by = ('ENTRYTYPE', 'author', 'year')
bibtex_str = bibtexparser.dumps(bib_database, writer)
"""
_valid_contents = ['entries', 'comments', 'preambles', 'strings']
def __init__(self, write_common_strings=False):
#: List of BibTeX elements to write, valid values are `entries`, `comments`, `preambles`, `strings`.
self.contents = ['comments', 'preambles', 'strings', 'entries']
#: Character(s) for indenting BibTeX field-value pairs. Default: single space.
self.indent = ' '
#: Align values. Aligns all values according to a given length by padding with single spaces.
# If align_values is true, the maximum number of characters used in any field name is used as the length.
# If align_values is a number, the greater of the specified value or the number of characters used in the
# field name is used as the length.
# Default: False
self.align_values: Union[int, bool] = False
#: Align multi-line values. Formats a multi-line value such that the text is aligned exactly
# on top of each other. Default: False
self.align_multiline_values = False
#: Characters(s) for separating BibTeX entries. Default: new line.
self.entry_separator = '\n'
#: Tuple of fields for ordering BibTeX entries. Set to `None` to disable sorting. Default: BibTeX key `('ID', )`.
self.order_entries_by = ('ID', )
#: Tuple of fields for display order in a single BibTeX entry. Fields not listed here will be displayed at the
# end in the order defined by display_order_sorting. Default: '[]'
self.display_order = []
# Sorting strategy for entries not contained in display_order. Entries not defined in display_order are added
# at the end in the order defined by this strategy. Default: SortingStrategy.ALPHABETICAL_ASC
self.display_order_sorting: SortingStrategy = SortingStrategy.ALPHABETICAL_ASC
#: BibTeX syntax allows comma first syntax
#: (common in functional languages), use this to enable
#: comma first syntax as the bwriter output
self.comma_first = False
#: BibTeX syntax allows the comma to be optional at the end of the last field in an entry.
#: Use this to enable writing this last comma in the bwriter output. Defaults: False.
self.add_trailing_comma = False
#: internal variable used if self.align_values = True or self.align_values =
self._max_field_width = 0
#: Whether common strings are written
self.common_strings = write_common_strings
def write(self, bib_database):
"""
Converts a bibliographic database to a BibTeX-formatted string.
:param bib_database: bibliographic database to be converted to a BibTeX string
:type bib_database: BibDatabase
:return: BibTeX-formatted string
:rtype: str or unicode
"""
bibtex = ''
for content in self.contents:
try:
# Add each element set (entries, comments)
bibtex += getattr(self, '_' + content + '_to_bibtex')(bib_database)
except AttributeError:
logger.warning("BibTeX item '{}' does not exist and will not be written. Valid items are {}."
.format(content, self._valid_contents))
return bibtex
def _entries_to_bibtex(self, bib_database):
if self.order_entries_by:
# TODO: allow sort field does not exist for entry
entries = sorted(bib_database.entries, key=lambda x: BibDatabase.entry_sort_key(x, self.order_entries_by))
else:
entries = bib_database.entries
if self.align_values is True:
# determine maximum field width to be used
widths = [len(ele) for entry in entries for ele in entry if ele not in ENTRY_TO_BIBTEX_IGNORE_ENTRIES]
self._max_field_width = max(widths)
elif type(self.align_values) == int:
# Use specified value
self._max_field_width = self.align_values
return self.entry_separator.join(self._entry_to_bibtex(entry) for entry in entries)
def _entry_to_bibtex(self, entry):
bibtex = ''
# Write BibTeX key
bibtex += '@' + entry['ENTRYTYPE'] + '{' + entry['ID']
# create display_order of fields for this entry
# first those keys which are both in self.display_order and in entry.keys
display_order = [i for i in self.display_order if i in entry]
# then all the other fields sorted alphabetically
display_order += [i for i in _apply_sorting_strategy(self.display_order_sorting, entry) if i not in self.display_order]
if self.comma_first:
field_fmt = u"\n{indent}, {field:<{field_max_w}} = {value}"
else:
field_fmt = u",\n{indent}{field:<{field_max_w}} = {value}"
# Write field = value lines
for field in [i for i in display_order if i not in ENTRY_TO_BIBTEX_IGNORE_ENTRIES]:
max_field_width = max(len(field), self._max_field_width)
try:
value = _str_or_expr_to_bibtex(entry[field])
if self.align_multiline_values:
# Calculate indent of multi-line values. Text from a multiline string
# should be aligned, i.e., be exactly on top of each other.
# E.g.: title = {Hello
# World}
# Calculate the indent of "World":
# Left of field (whitespaces before e.g. 'title')
value_indent = len(self.indent) + max_field_width
# Right of field ' = ' (<- 3 chars) + '{' (<- 1 char)
value_indent += 3 + 1
value = value.replace('\n', '\n' + value_indent * ' ')
bibtex += field_fmt.format(
indent=self.indent,
field=field,
field_max_w=max_field_width,
value=value)
except TypeError:
raise TypeError(u"The field %s in entry %s must be a string"
% (field, entry['ID']))
if self.add_trailing_comma:
if self.comma_first:
bibtex += '\n'+self.indent+','
else:
bibtex += ','
bibtex += "\n}\n"
return bibtex
def _comments_to_bibtex(self, bib_database):
return ''.join(['@comment{{{0}}}\n{1}'.format(comment, self.entry_separator)
for comment in bib_database.comments])
def _preambles_to_bibtex(self, bib_database):
return ''.join(['@preamble{{"{0}"}}\n{1}'.format(preamble, self.entry_separator)
for preamble in bib_database.preambles])
def _strings_to_bibtex(self, bib_database):
return ''.join([
u'@string{{{name} = {value}}}\n{sep}'.format(
name=name,
value=_str_or_expr_to_bibtex(value),
sep=self.entry_separator)
for name, value in bib_database.strings.items()
if (self.common_strings or
name not in COMMON_STRINGS or # user defined string
value != COMMON_STRINGS[name] # string has been updated
)])
python-bibtexparser-1.4.3/bibtexparser/customization.py 0000664 0000000 0000000 00000051036 14731101544 0023511 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
"""
A set of functions useful for customizing bibtex fields.
You can find inspiration from these functions to design yours.
Each of them takes a record and return the modified record.
"""
import logging
import re
import warnings
from builtins import str
from bibtexparser.latexenc import latex_to_unicode, string_to_latex, protect_uppercase
logger = logging.getLogger(__name__)
__all__ = ['splitname', 'getnames', 'author', 'editor', 'journal', 'keyword',
'link', 'page_double_hyphen', 'doi', 'type', 'convert_to_unicode',
'homogenize_latex_encoding', 'add_plaintext_fields']
class InvalidName(ValueError):
"""Exception raised by :py:func:`customization.splitname` when an invalid name is input.
"""
pass
def splitname(name, strict_mode=True):
"""
Break a name into its constituent parts: First, von, Last, and Jr.
:param string name: a string containing a single name
:param Boolean strict_mode: whether to use strict mode
:returns: dictionary of constituent parts
:raises `customization.InvalidName`: If an invalid name is given and
``strict_mode = True``.
In BibTeX, a name can be represented in any of three forms:
* First von Last
* von Last, First
* von Last, Jr, First
This function attempts to split a given name into its four parts. The
returned dictionary has keys of ``first``, ``last``, ``von`` and ``jr``.
Each value is a list of the words making up that part; this may be an empty
list. If the input has no non-whitespace characters, a blank dictionary is
returned.
It is capable of detecting some errors with the input name. If the
``strict_mode`` parameter is ``True``, which is the default, this results in
a :class:`customization.InvalidName` exception being raised. If it is
``False``, the function continues, working around the error as best it can.
The errors that can be detected are listed below along with the handling
for non-strict mode:
* Name finishes with a trailing comma: delete the comma
* Too many parts (e.g., von Last, Jr, First, Error): merge extra parts
into First
* Unterminated opening brace: add closing brace to end of input
* Unmatched closing brace: add opening brace at start of word
"""
# Useful references:
# http://maverick.inria.fr/~Xavier.Decoret/resources/xdkbibtex/bibtex_summary.html#names
# http://tug.ctan.org/info/bibtex/tamethebeast/ttb_en.pdf
# Whitespace characters that can separate words.
whitespace = set(' ~\r\n\t')
# We'll iterate over the input once, dividing it into a list of words for
# each comma-separated section. We'll also calculate the case of each word
# as we work.
sections = [[]] # Sections of the name.
cases = [[]] # 1 = uppercase, 0 = lowercase, -1 = caseless.
word = [] # Current word.
case = -1 # Case of the current word.
level = 0 # Current brace level.
bracestart = False # Will the next character be the first within a brace?
controlseq = True # Are we currently processing a control sequence?
specialchar = None # Are we currently processing a special character?
# Using an iterator allows us to deal with escapes in a simple manner.
nameiter = iter(name)
for char in nameiter:
# An escape.
if char == '\\':
escaped = next(nameiter)
# BibTeX doesn't allow whitespace escaping. Copy the slash and fall
# through to the normal case to handle the whitespace.
if escaped in whitespace:
word.append(char)
char = escaped
else:
# Is this the first character in a brace?
if bracestart:
bracestart = False
controlseq = escaped.isalpha()
specialchar = True
# Can we use it to determine the case?
elif (case == -1) and escaped.isalpha():
if escaped.isupper():
case = 1
else:
case = 0
# Copy the escape to the current word and go to the next
# character in the input.
word.append(char)
word.append(escaped)
continue
# Start of a braced expression.
if char == '{':
level += 1
word.append(char)
bracestart = True
controlseq = False
specialchar = False
continue
# All the below cases imply this (and don't test its previous value).
bracestart = False
# End of a braced expression.
if char == '}':
# Check and reduce the level.
if level:
level -= 1
else:
if strict_mode:
raise InvalidName("Unmatched closing brace in name {{{0}}}.".format(name))
word.insert(0, '{')
# Update the state, append the character, and move on.
controlseq = False
specialchar = False
word.append(char)
continue
# Inside a braced expression.
if level:
# Is this the end of a control sequence?
if controlseq:
if not char.isalpha():
controlseq = False
# If it's a special character, can we use it for a case?
elif specialchar:
if (case == -1) and char.isalpha():
if char.isupper():
case = 1
else:
case = 0
# Append the character and move on.
word.append(char)
continue
# End of a word.
# NB. we know we're not in a brace here due to the previous case.
if char == ',' or char in whitespace:
# Don't add empty words due to repeated whitespace.
if word:
sections[-1].append(''.join(word))
word = []
cases[-1].append(case)
case = -1
controlseq = False
specialchar = False
# End of a section.
if char == ',':
if len(sections) < 3:
sections.append([])
cases.append([])
elif strict_mode:
raise InvalidName("Too many commas in the name {{{0}}}.".format(name))
continue
# Regular character.
word.append(char)
if (case == -1) and char.isalpha():
if char.isupper():
case = 1
else:
case = 0
# Unterminated brace?
if level:
if strict_mode:
raise InvalidName("Unterminated opening brace in the name {{{0}}}.".format(name))
while level:
word.append('}')
level -= 1
# Handle the final word.
if word:
sections[-1].append(''.join(word))
cases[-1].append(case)
# Get rid of trailing sections.
if not sections[-1]:
# Trailing comma?
if (len(sections) > 1) and strict_mode:
raise InvalidName("Trailing comma at end of name {{{0}}}.".format(name))
sections.pop(-1)
cases.pop(-1)
# No non-whitespace input.
if not sections or not any(bool(section) for section in sections):
return {}
# Initialise the output dictionary.
parts = {'first': [], 'last': [], 'von': [], 'jr': []}
# Form 1: "First von Last"
if len(sections) == 1:
p0 = sections[0]
# One word only: last cannot be empty.
if len(p0) == 1:
parts['last'] = p0
# Two words: must be first and last.
elif len(p0) == 2:
parts['first'] = p0[:1]
parts['last'] = p0[1:]
# Need to use the cases to figure it out.
else:
cases = cases[0]
# First is the longest sequence of words starting with uppercase
# that is not the whole string. von is then the longest sequence
# whose last word starts with lowercase that is not the whole
# string. Last is the rest. NB., this means last cannot be empty.
# At least one lowercase letter.
if 0 in cases:
# Index from end of list of first and last lowercase word.
firstl = cases.index(0) - len(cases)
lastl = -cases[::-1].index(0) - 1
if lastl == -1:
lastl -= 1 # Cannot consume the rest of the string.
# Pull the parts out.
parts['first'] = p0[:firstl]
parts['von'] = p0[firstl:lastl + 1]
parts['last'] = p0[lastl + 1:]
# No lowercase: last is the last word, first is everything else.
else:
parts['first'] = p0[:-1]
parts['last'] = p0[-1:]
# Form 2 ("von Last, First") or 3 ("von Last, jr, First")
else:
# As long as there is content in the first name partition, use it as-is.
first = sections[-1]
if first and first[0]:
parts['first'] = first
# And again with the jr part.
if len(sections) == 3:
jr = sections[-2]
if jr and jr[0]:
parts['jr'] = jr
# Last name cannot be empty; if there is only one word in the first
# partition, we have to use it for the last name.
last = sections[0]
if len(last) == 1:
parts['last'] = last
# Have to look at the cases to figure it out.
else:
lcases = cases[0]
# At least one lowercase: von is the longest sequence of whitespace
# separated words whose last word does not start with an uppercase
# word, and last is the rest.
if 0 in lcases:
split = len(lcases) - lcases[::-1].index(0)
if split == len(lcases):
split = 0 # Last cannot be empty.
parts['von'] = sections[0][:split]
parts['last'] = sections[0][split:]
# All uppercase => all last.
else:
parts['last'] = sections[0]
# Done.
return parts
def find_matching(
text: str,
opening: str,
closing: str,
ignore_escaped: bool = True,
) -> dict:
r"""
Find matching 'brackets'.
:param text: The string to consider.
:param opening: The opening bracket (e.g. "(", "[", "{").
:param closing: The closing bracket (e.g. ")", "]", "}").
:param ignore_escaped: Ignore escaped bracket (e.g. "\(", "\[", "\{", "\)", "\]", "\}").
:return: Dictionary with ``{index_opening: index_closing}``
"""
a = []
b = []
if ignore_escaped:
opening = r"(?= 0:
stack.append(i)
else:
if len(stack) == 0:
raise IndexError(f"No closing {closing} at: {i:d}")
j = stack.pop()
ret[j] = -1 * i
if len(stack) > 0:
raise IndexError(f"No opening {opening} at {stack.pop():d}")
return ret
def getnames(names):
"""Convert people names as surname, firstnames
or surname, initials.
:param names: a list of names
:type names: list
:returns: list -- Correctly formated names
.. Note::
This function is known to be too simple to handle properly
the complex rules. We would like to enhance this in forthcoming
releases.
"""
tidynames = []
for namestring in names:
namestring = namestring.strip()
if len(namestring) < 1:
continue
if ',' in namestring:
namesplit = namestring.split(',', 1)
last = namesplit[0].strip()
firsts = [i.strip() for i in namesplit[1].split()]
else:
if "{" in namestring and "}" in namestring:
try:
brackets = find_matching(namestring, "{", "}")
except IndexError:
tidynames.append(namestring)
continue
namesplit = []
start = 0
i = 0
while True:
if i in brackets:
i = brackets[i]
else:
i += 1
if i >= len(namestring):
break
if namestring[i] == " ":
namesplit.append(namestring[start:i])
start = i + 1
elif i == len(namestring) - 1:
namesplit.append(namestring[start:])
else:
namesplit = namestring.split()
last = namesplit.pop()
firsts = [i.replace('.', '. ').strip() for i in namesplit]
if last in ['jnr', 'jr', 'junior']:
last = firsts.pop()
for item in firsts:
if item in ['ben', 'van', 'der', 'de', 'la', 'le']:
last = firsts.pop() + ' ' + last
tidynames.append(last + ", " + ' '.join(firsts))
return tidynames
def author(record):
"""
Split author field into a list of "Name, Surname".
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
if "author" in record:
if record["author"]:
record["author"] = getnames([i.strip() for i in re.split(r"\ and\ ", record["author"].replace('\n', ' '),
flags=re.IGNORECASE)])
else:
del record["author"]
return record
def editor(record):
"""
Turn the editor field into a dict composed of the original editor name
and a editor id (without coma or blank).
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
if "editor" in record:
if record["editor"]:
record["editor"] = getnames([i.strip() for i in record["editor"].replace('\n', ' ').split(" and ")])
# convert editor to object
record["editor"] = [{"name": i, "ID": i.replace(',', '').replace(' ', '').replace('.', '')} for i in
record["editor"]]
else:
del record["editor"]
return record
def page_double_hyphen(record):
"""
Separate pages by a double hyphen (--).
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
if "pages" in record:
# hyphen, non-breaking hyphen, en dash, em dash, hyphen-minus, minus sign
separators = [u'‐', u'‑', u'–', u'—', u'-', u'−']
for separator in separators:
if separator in record["pages"]:
p = [i.strip().strip(separator) for i in record["pages"].split(separator)]
record["pages"] = p[0] + '--' + p[-1]
return record
def type(record):
"""
Put the type into lower case.
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
if "type" in record:
record["type"] = record["type"].lower()
return record
def journal(record):
"""
Turn the journal field into a dict composed of the original journal name
and a journal id (without coma or blank).
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
if "journal" in record:
# switch journal to object
if record["journal"]:
record["journal"] = {"name": record["journal"],
"ID": record["journal"].replace(',', '').replace(' ', '').replace('.', '')}
return record
def keyword(record, sep=',|;'):
"""
Split keyword field into a list.
:param record: the record.
:type record: dict
:param sep: pattern used for the splitting regexp.
:type record: string, optional
:returns: dict -- the modified record.
"""
if "keyword" in record:
record["keyword"] = [i.strip() for i in re.split(sep, record["keyword"].replace('\n', ''))]
return record
def link(record):
"""
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
if "link" in record:
links = [i.strip().replace(" ", " ") for i in record["link"].split('\n')]
record['link'] = []
for link in links:
parts = link.split(" ")
linkobj = {"url": parts[0]}
if len(parts) > 1:
linkobj["anchor"] = parts[1]
if len(parts) > 2:
linkobj["format"] = parts[2]
if len(linkobj["url"]) > 0:
record["link"].append(linkobj)
return record
def doi(record):
"""
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
if 'doi' in record:
if 'link' not in record:
record['link'] = []
nodoi = True
for item in record['link']:
if 'doi' in item:
nodoi = False
if nodoi:
link = record['doi']
if link.startswith('10'):
link = 'https://doi.org/' + link
record['link'].append({"url": link, "anchor": "doi"})
return record
def convert_to_unicode(record):
"""
Convert accent from latex to unicode style.
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
for val in record:
if isinstance(record[val], list):
record[val] = [
latex_to_unicode(x) for x in record[val]
]
elif isinstance(record[val], dict):
record[val] = {
k: latex_to_unicode(v) for k, v in record[val].items()
}
else:
record[val] = latex_to_unicode(record[val])
return record
def homogenize_latex_encoding(record):
"""
Homogenize the latex enconding style for bibtex
This function is experimental.
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
# First, we convert everything to unicode
record = convert_to_unicode(record)
# And then, we fall back
for val in record:
if val not in ('ID',):
logger.debug('Apply string_to_latex to: %s', val)
if isinstance(record[val], list):
record[val] = [
string_to_latex(x) for x in record[val]
]
elif isinstance(record[val], str):
record[val] = string_to_latex(record[val])
else:
warnings.warn('Unable to homogenize latex encoding for %s: Expected string or list,' % val,
RuntimeWarning)
if val == 'title':
logger.debug('Protect uppercase in title')
logger.debug('Before: %s', record[val])
record[val] = protect_uppercase(record[val])
logger.debug('After: %s', record[val])
return record
def add_plaintext_fields(record):
"""
For each field in the record, add a `plain_` field containing the
plaintext, stripped from braces and similar. See
https://github.com/sciunto-org/python-bibtexparser/issues/116.
:param record: the record.
:type record: dict
:returns: dict -- the modified record.
"""
def _strip_string(string):
for stripped in ['{', '}']:
string = string.replace(stripped, "")
return string
for key in list(record.keys()):
plain_key = "plain_{}".format(key)
record[plain_key] = record[key]
if isinstance(record[plain_key], str):
record[plain_key] = _strip_string(record[plain_key])
elif isinstance(record[plain_key], dict):
record[plain_key] = {
subkey: _strip_string(value)
for subkey, value in record[plain_key].items()
}
elif isinstance(record[plain_key], list):
record[plain_key] = [
_strip_string(value)
for value in record[plain_key]
]
return record
python-bibtexparser-1.4.3/bibtexparser/latexenc.py 0000664 0000000 0000000 00000276536 14731101544 0022422 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Original source: github.com/okfn/bibserver
# Authors:
# markmacgillivray
# Etienne Posthumus (epoz)
# Francois Boulogne
import itertools
import re
import unicodedata
__all__ = ['string_to_latex', 'latex_to_unicode', 'protect_uppercase',
'unicode_to_latex', 'unicode_to_crappy_latex1',
'unicode_to_crappy_latex2']
def string_to_latex(string):
"""
Convert a string to its latex equivalent
"""
escape = [' ', '{', '}']
new = []
for char in string:
if char in escape:
new.append(char)
else:
new.append(unicode_to_latex_map.get(char, char))
return ''.join(new)
def _replace_latex(string, latex, unicod):
if latex in string:
if unicodedata.combining(unicod):
for m in re.finditer(re.escape(latex), string):
i, j = m.span()
# Insert after the following character,
if j < len(string):
string = ''.join([
string[:i], string[j], unicod, string[(j + 1):]])
else:
# except if not in last position (nothing to modify)
string = string[:i]
else:
# Just replace
string = string.replace(latex, unicod)
return string
def _replace_all_latex(string, replacements):
for u, l in replacements:
string = _replace_latex(string, l.rstrip(), u)
return string
def latex_to_unicode(string):
"""
Convert a LaTeX string to unicode equivalent.
:param string: string to convert
:returns: string
"""
if '\\' in string or '{' in string:
string = _replace_all_latex(string, itertools.chain(
unicode_to_crappy_latex1, unicode_to_latex))
# TODO Shouldn't this preserve escaped braces instead?
# Remove any left braces
string = string.replace("{", "").replace("}", "")
# If there is still very crappy items
if '\\' in string or '{' in string:
string = _replace_all_latex(string, unicode_to_crappy_latex2)
# Normalize unicode characters
# Also, when converting to unicode, we should return a normalized Unicode
# string, that is always having only compound accentuated character (letter
# + accent) or single accentuated character (letter with accent). We choose
# to normalize to the latter.
string = unicodedata.normalize("NFC", u"".join(string))
return string
def protect_uppercase(string):
"""
Protect uppercase letters for bibtex
:param string: string to convert
:returns: string
"""
string = re.sub(r'([^{]|^)([A-Z])([^}]|$)', r'\g<1>{\g<2>}\g<3>', string)
return string
# list of latex conversions from
# https://gist.github.com/798549
# this list contrains crappy accents
# like \`{e} which is not advised for bibtex
# http://tex.stackexchange.com/questions/57743/how-to-write-a-and-other-umlauts-and-accented-letters-in-bibliography/57745#57745
# Correct accent are in unicode_to_latex
unicode_to_latex = []
unicode_to_latex_map = {}
unicode_to_crappy_latex1 = []
unicode_to_crappy_latex2 = []
def prepare_unicode_to_latex():
global unicode_to_latex
global unicode_to_latex_map
global unicode_to_crappy_latex1
global unicode_to_crappy_latex2
to_crappy1 = (
("\u00C0", "\\`{A}"),
("\u00C1", "\\'{A}"),
("\u00C2", "\\^{A}"),
("\u00C3", "\\~{A}"),
("\u00C4", "\\\"{A}"),
("\u00C5", "\\AA "),
("\u00C6", "\\AE "),
("\u00C7", "\\c{C}"),
("\u00C8", "\\`{E}"),
("\u00C9", "\\'{E}"),
("\u00CA", "\\^{E}"),
("\u00CB", "\\\"{E}"),
("\u00CC", "\\`{I}"),
("\u00CD", "\\'{I}"),
("\u00CE", "\\^{I}"),
("\u00CF", "\\\"{I}"),
("\u00D0", "\\DH "),
("\u00D1", "\\~{N}"),
("\u00D2", "\\`{O}"),
("\u00D3", "\\'{O}"),
("\u00D4", "\\^{O}"),
("\u00D5", "\\~{O}"),
("\u00D6", "\\\"{O}"),
("\u00D7", "\\texttimes "),
("\u00D8", "\\O "),
("\u00D9", "\\`{U}"),
("\u00DA", "\\'{U}"),
("\u00DB", "\\^{U}"),
("\u00DC", "\\\"{U}"),
("\u00DD", "\\'{Y}"),
("\u00DE", "\\TH "),
("\u00DF", "\\ss "),
("\u00E0", "\\`{a}"),
("\u00E1", "\\'{a}"),
("\u00E2", "\\^{a}"),
("\u00E3", "\\~{a}"),
("\u00E4", "\\\"{a}"),
("\u00E5", "\\aa "),
("\u00E6", "\\ae "),
("\u00E7", "\\c{c}"),
("\u00E8", "\\`{e}"),
("\u00E9", "\\'{e}"),
("\u00EA", "\\^{e}"),
("\u00EB", "\\\"{e}"),
("\u00EC", "\\`{\\i}"),
("\u00ED", "\\'{\\i}"),
("\u00EE", "\\^{\\i}"),
("\u00EF", "\\\"{\\i}"),
("\u00F0", "\\dh "),
("\u00F1", "\\~{n}"),
("\u00F2", "\\`{o}"),
("\u00F3", "\\'{o}"),
("\u00F4", "\\^{o}"),
("\u00F5", "\\~{o}"),
("\u00F6", "\\\"{o}"),
("\u00F7", "\\div "),
("\u00F8", "\\o "),
("\u00F9", "\\`{u}"),
("\u00FA", "\\'{u}"),
("\u00FB", "\\^{u}"),
("\u00FC", "\\\"{u}"),
("\u00FD", "\\'{y}"),
("\u00FE", "\\th "),
("\u00FF", "\\\"{y}"),
("\u0100", "\\={A}"),
("\u0101", "\\={a}"),
("\u0102", "\\u{A}"),
("\u0103", "\\u{a}"),
("\u0104", "\\k{A}"),
("\u0105", "\\k{a}"),
("\u0106", "\\'{C}"),
("\u0107", "\\'{c}"),
("\u0108", "\\^{C}"),
("\u0109", "\\^{c}"),
("\u010A", "\\.{C}"),
("\u010B", "\\.{c}"),
("\u010C", "\\v{C}"),
("\u010D", "\\v{c}"),
("\u010E", "\\v{D}"),
("\u010F", "\\v{d}"),
("\u0110", "\\DJ "),
("\u0111", "\\dj "),
("\u0112", "\\={E}"),
("\u0113", "\\={e}"),
("\u0114", "\\u{E}"),
("\u0115", "\\u{e}"),
("\u0116", "\\.{E}"),
("\u0117", "\\.{e}"),
("\u0118", "\\k{E}"),
("\u0119", "\\k{e}"),
("\u011A", "\\v{E}"),
("\u011B", "\\v{e}"),
("\u011C", "\\^{G}"),
("\u011D", "\\^{g}"),
("\u011E", "\\u{G}"),
("\u011F", "\\u{g}"),
("\u0120", "\\.{G}"),
("\u0121", "\\.{g}"),
("\u0122", "\\c{G}"),
("\u0123", "\\c{g}"),
("\u0124", "\\^{H}"),
("\u0125", "\\^{h}"),
("\u0126", "{\\fontencoding{LELA}\\selectfont\\char40}"),
("\u0127", "\\Elzxh "),
("\u0128", "\\~{I}"),
("\u0129", "\\~{\\i}"),
("\u012A", "\\={I}"),
("\u012B", "\\={\\i}"),
("\u012C", "\\u{I}"),
("\u012D", "\\u{\\i}"),
("\u012E", "\\k{I}"),
("\u012F", "\\k{i}"),
("\u0130", "\\.{I}"),
("\u0131", "\\i "),
# (u"\u0132", "IJ"),
# (u"\u0133", "ij"),
("\u0134", "\\^{J}"),
("\u0135", "\\^{\\j}"),
("\u0136", "\\c{K}"),
("\u0137", "\\c{k}"),
("\u0138", "{\\fontencoding{LELA}\\selectfont\\char91}"),
("\u0139", "\\'{L}"),
("\u013A", "\\'{l}"),
("\u013B", "\\c{L}"),
("\u013C", "\\c{l}"),
("\u013D", "\\v{L}"),
("\u013E", "\\v{l}"),
("\u013F", "{\\fontencoding{LELA}\\selectfont\\char201}"),
("\u0140", "{\\fontencoding{LELA}\\selectfont\\char202}"),
("\u0141", "\\L "),
("\u0142", "\\l "),
("\u0143", "\\'{N}"),
("\u0144", "\\'{n}"),
("\u0145", "\\c{N}"),
("\u0146", "\\c{n}"),
("\u0147", "\\v{N}"),
("\u0148", "\\v{n}"),
("\u0149", "'n"),
("\u014A", "\\NG "),
("\u014B", "\\ng "),
("\u014C", "\\={O}"),
("\u014D", "\\={o}"),
("\u014E", "\\u{O}"),
("\u014F", "\\u{o}"),
("\u0150", "\\H{O}"),
("\u0151", "\\H{o}"),
("\u0152", "\\OE "),
("\u0153", "\\oe "),
("\u0154", "\\'{R}"),
("\u0155", "\\'{r}"),
("\u0156", "\\c{R}"),
("\u0157", "\\c{r}"),
("\u0158", "\\v{R}"),
("\u0159", "\\v{r}"),
("\u015A", "\\'{S}"),
("\u015B", "\\'{s}"),
("\u015C", "\\^{S}"),
("\u015D", "\\^{s}"),
("\u015E", "\\c{S}"),
("\u015F", "\\c{s}"),
("\u0160", "\\v{S}"),
("\u0161", "\\v{s}"),
("\u0162", "\\c{T}"),
("\u0163", "\\c{t}"),
("\u0164", "\\v{T}"),
("\u0165", "\\v{t}"),
("\u0166", "{\\fontencoding{LELA}\\selectfont\\char47}"),
("\u0167", "{\\fontencoding{LELA}\\selectfont\\char63}"),
("\u0168", "\\~{U}"),
("\u0169", "\\~{u}"),
("\u016A", "\\={U}"),
("\u016B", "\\={u}"),
("\u016C", "\\u{U}"),
("\u016D", "\\u{u}"),
("\u016E", "\\r{U}"),
("\u016F", "\\r{u}"),
("\u0170", "\\H{U}"),
("\u0171", "\\H{u}"),
("\u0172", "\\k{U}"),
("\u0173", "\\k{u}"),
("\u0174", "\\^{W}"),
("\u0175", "\\^{w}"),
("\u0176", "\\^{Y}"),
("\u0177", "\\^{y}"),
("\u0178", "\\\"{Y}"),
("\u0179", "\\'{Z}"),
("\u017A", "\\'{z}"),
("\u017B", "\\.{Z}"),
("\u017C", "\\.{z}"),
("\u017D", "\\v{Z}"),
("\u017E", "\\v{z}"),
("\u0195", "\\texthvlig "),
("\u019E", "\\textnrleg "),
("\u01AA", "\\eth "),
("\u01BA", "{\\fontencoding{LELA}\\selectfont\\char195}"),
("\u01C2", "\\textdoublepipe "),
("\u01F5", "\\'{g}"),
("\u0386", "\\'{A}"),
("\u0388", "\\'{E}"),
("\u0389", "\\'{H}"),
("\u03CC", "\\'{o}"),
)
# These are dangerous
# should not be used on
# {\'E} for instance!
to_crappy2 = (
("\u0300", "\\`"),
("\u0301", "\\'"),
("\u0302", "\\^"),
("\u0327", "\\c"),
)
# list of latex conversions from
# https://gist.github.com/798549
# Corrected \`{e} -> {\`e}
to_latex = (
("\u0020", "\\space "),
("\u0023", "\\#"),
("\u0024", "\\textdollar "),
("\u0025", "\\%"),
("\u0026", "\\&"),
("\u0027", "\\textquotesingle "),
("\u002A", "\\ast "),
("\u005C", "\\textbackslash "),
("\u005E", "\\^{}"),
("\u005F", "\\_"),
("\u0060", "\\textasciigrave "),
("\u007B", "\\lbrace "),
("\u007C", "\\vert "),
("\u007D", "\\rbrace "),
("\u007E", "\\textasciitilde "),
("\u00A1", "\\textexclamdown "),
("\u00A2", "\\textcent "),
("\u00A3", "\\textsterling "),
("\u00A4", "\\textcurrency "),
("\u00A5", "\\textyen "),
("\u00A6", "\\textbrokenbar "),
("\u00A7", "\\textsection "),
("\u00A8", "\\textasciidieresis "),
("\u00A9", "\\textcopyright "),
("\u00AA", "\\textordfeminine "),
("\u00AB", "\\guillemotleft "),
("\u00AC", "\\lnot "),
("\u00AD", "\\-"),
("\u00AE", "\\textregistered "),
("\u00AF", "\\textasciimacron "),
("\u00B0", "\\textdegree "),
("\u00B1", "\\pm "),
("\u00B2", "{^2}"),
("\u00B3", "{^3}"),
("\u00B4", "\\textasciiacute "),
("\u00B5", "\\mathrm{\\mu}"),
("\u00B6", "\\textparagraph "),
("\u00B7", "\\cdot "),
("\u00B8", "\\c{}"),
("\u00B9", "{^1}"),
("\u00BA", "\\textordmasculine "),
("\u00BB", "\\guillemotright "),
("\u00BC", "\\textonequarter "),
("\u00BD", "\\textonehalf "),
("\u00BE", "\\textthreequarters "),
("\u00BF", "\\textquestiondown "),
("\u00C0", "{\\`A}"),
("\u00C1", "{\\'A}"),
("\u00C2", "{\\^A}"),
("\u00C3", "{\\~A}"),
("\u00C4", "{\\\"A}"),
("\u00C5", "{\\AA}"),
("\u00C6", "{\\AE}"),
("\u00C7", "{\\c C}"),
("\u00C8", "{\\`E}"),
("\u00C9", "{\\'E}"),
("\u00CA", "{\\^E}"),
("\u00CB", "{\\\"E}"),
("\u00CC", "{\\`I}"),
("\u00CD", "{\\'I}"),
("\u00CE", "{\\^I}"),
("\u00CF", "{\\\"I}"),
("\u00D0", "{\\DH}"),
("\u00D1", "{\\~N}"),
("\u00D2", "{\\`O}"),
("\u00D3", "{\\'O}"),
("\u00D4", "{\\^O}"),
("\u00D5", "{\\~O}"),
("\u00D6", "{\\\"O}"),
("\u00D7", "\\texttimes "),
("\u00D8", "{\\O}"),
("\u00D9", "{\\`U}"),
("\u00DA", "{\\'U}"),
("\u00DB", "{\\^U}"),
("\u00DC", "{\\\"U}"),
("\u00DD", "{\\'Y}"),
("\u00DE", "{\\TH}"),
("\u00DF", "{\\ss}"),
("\u00E0", "{\\`a}"),
("\u00E1", "{\\'a}"),
("\u00E2", "{\\^a}"),
("\u00E3", "{\\~a}"),
("\u00E4", "{\\\"a}"),
("\u00E5", "{\\aa}"),
("\u00E6", "{\\ae}"),
("\u00E7", "{\\c c}"),
("\u00E8", "{\\`e}"),
("\u00E9", "{\\'e}"),
("\u00EA", "{\\^e}"),
("\u00EB", "{\\\"e}"),
("\u00EC", "{\\`\\i}"),
("\u00ED", "{\\'\\i}"),
("\u00EE", "{\\^\\i}"),
("\u00EF", "{\\\"\\i}"),
("\u00F0", "{\\dh }"),
("\u00F1", "{\\~n}"),
("\u00F2", "{\\`o}"),
("\u00F3", "{\\'o}"),
("\u00F4", "{\\^o}"),
("\u00F5", "{\\~o}"),
("\u00F6", "{\\\"o}"),
("\u00F7", "{\\div}"),
("\u00F8", "{\\o}"),
("\u00F9", "{\\`u}"),
("\u00FA", "{\\'u}"),
("\u00FB", "{\\^u}"),
("\u00FC", "{\\\"u}"),
("\u00FD", "{\\'y}"),
("\u00FE", "{\\th}"),
("\u00FF", "{\\\"y}"),
("\u0100", "{\\= A}"),
("\u0101", "{\\= a}"),
("\u0102", "{\\u A}"),
("\u0103", "{\\u a}"),
("\u0104", "{\\k A}"),
("\u0105", "{\\k a}"),
("\u0106", "{\\' C}"),
("\u0107", "{\\' c}"),
("\u0108", "{\\^ C}"),
("\u0109", "{\\^ c}"),
("\u010A", "{\\. C}"),
("\u010B", "{\\. c}"),
("\u010C", "{\\v C}"),
("\u010D", "{\\v c}"),
("\u010E", "{\\v D}"),
("\u010F", "{\\v d}"),
("\u0110", "{\\DJ}"),
("\u0111", "{\\dj}"),
("\u0112", "{\\= E}"),
("\u0113", "{\\= e}"),
("\u0114", "{\\u E}"),
("\u0115", "{\\u e}"),
("\u0116", "{\\.E}"),
("\u0117", "{\\.e}"),
("\u0118", "{\\k E}"),
("\u0119", "{\\k e}"),
("\u011A", "{\\v E}"),
("\u011B", "{\\v e}"),
("\u011C", "{\\^ G}"),
("\u011D", "{\\^ g}"),
("\u011E", "{\\u G}"),
("\u011F", "{\\u g}"),
("\u0120", "{\\. G}"),
("\u0121", "{\\. g}"),
("\u0122", "{\\c G}"),
("\u0123", "{\\c g}"),
("\u0124", "{\\^ H}"),
("\u0125", "{\\^ h}"),
("\u0126", "{\\fontencoding{LELA}\\selectfont\\char40}"),
("\u0127", "\\Elzxh "),
("\u0128", "{\\~ I}"),
("\u0129", "{\\~ \\i}"),
("\u012A", "{\\= I}"),
("\u012B", "{\\= \\i}"),
("\u012C", "{\\u I}"),
("\u012D", "{\\u \\i}"),
("\u012E", "{\\k I}"),
("\u012F", "{\\k i}"),
("\u0130", "{\\. I}"),
("\u0131", "{\\i}"),
# (u"\u0132", "IJ"),
# (u"\u0133", "ij"),
("\u0134", "{\\^J}"),
("\u0135", "{\\^\\j}"),
("\u0136", "{\\c K}"),
("\u0137", "{\\c k}"),
("\u0138", "{\\fontencoding{LELA}\\selectfont\\char91}"),
("\u0139", "{\\'L}"),
("\u013A", "{\\'l}"),
("\u013B", "{\\c L}"),
("\u013C", "{\\c l}"),
("\u013D", "{\\v L}"),
("\u013E", "{\\v l}"),
("\u013F", "{\\fontencoding{LELA}\\selectfont\\char201}"),
("\u0140", "{\\fontencoding{LELA}\\selectfont\\char202}"),
("\u0141", "{\\L}"),
("\u0142", "{\\l}"),
("\u0143", "{\\'N}"),
("\u0144", "{\\'n}"),
("\u0145", "{\\c N}"),
("\u0146", "{\\c n}"),
("\u0147", "{\\v N}"),
("\u0148", "{\\v n}"),
("\u0149", "'n"),
("\u014A", "{\\NG}"),
("\u014B", "{\\ng}"),
("\u014C", "{\\= O}"),
("\u014D", "{\\= o}"),
("\u014E", "{\\u O}"),
("\u014F", "{\\u o}"),
("\u0150", "{\\H O}"),
("\u0151", "{\\H o}"),
("\u0152", "{\\OE}"),
("\u0153", "{\\oe}"),
("\u0154", "{\\'R}"),
("\u0155", "{\\'r}"),
("\u0156", "{\\c R}"),
("\u0157", "{\\c r}"),
("\u0158", "{\\v R}"),
("\u0159", "{\\v r}"),
("\u015A", "{\\' S}"),
("\u015B", "{\\' s}"),
("\u015C", "{\\^ S}"),
("\u015D", "{\\^ s}"),
("\u015E", "{\\c S}"),
("\u015F", "{\\c s}"),
("\u0160", "{\\v S}"),
("\u0161", "{\\v s}"),
("\u0162", "{\\c T}"),
("\u0163", "{\\c t}"),
("\u0164", "{\\v T}"),
("\u0165", "{\\v t}"),
("\u0166", "{\\fontencoding{LELA}\\selectfont\\char47}"),
("\u0167", "{\\fontencoding{LELA}\\selectfont\\char63}"),
("\u0168", "{\\~ U}"),
("\u0169", "{\\~ u}"),
("\u016A", "{\\= U}"),
("\u016B", "{\\= u}"),
("\u016C", "{\\u U}"),
("\u016D", "{\\u u}"),
("\u016E", "{\\r U}"),
("\u016F", "{\\r u}"),
("\u0170", "{\\H U}"),
("\u0171", "{\\H u}"),
("\u0172", "{\\k U}"),
("\u0173", "{\\k u}"),
("\u0174", "{\\^ W}"),
("\u0175", "{\\^ w}"),
("\u0176", "{\\^ Y}"),
("\u0177", "{\\^ y}"),
("\u0178", "{\\\" Y}"),
("\u0179", "{\\' Z}"),
("\u017A", "{\\' z}"),
("\u017B", "{\\. Z}"),
("\u017C", "{\\. z}"),
("\u017D", "{\\v Z}"),
("\u017E", "{\\v z}"),
("\u0195", "{\\texthvlig}"),
("\u019E", "{\\textnrleg}"),
("\u01AA", "{\\eth}"),
("\u01BA", "{\\fontencoding{LELA}\\selectfont\\char195}"),
("\u01C2", "\\textdoublepipe "),
("\u01F5", "{\\' g}"),
("\u0250", "\\Elztrna "),
("\u0252", "\\Elztrnsa "),
("\u0254", "\\Elzopeno "),
("\u0256", "\\Elzrtld "),
("\u0258", "{\\fontencoding{LEIP}\\selectfont\\char61}"),
("\u0259", "\\Elzschwa "),
("\u025B", "\\varepsilon "),
("\u0263", "\\Elzpgamma "),
("\u0264", "\\Elzpbgam "),
("\u0265", "\\Elztrnh "),
("\u026C", "\\Elzbtdl "),
("\u026D", "\\Elzrtll "),
("\u026F", "\\Elztrnm "),
("\u0270", "\\Elztrnmlr "),
("\u0271", "\\Elzltlmr "),
("\u0272", "\\Elzltln "),
("\u0273", "\\Elzrtln "),
("\u0277", "\\Elzclomeg "),
("\u0278", "\\textphi "),
("\u0279", "\\Elztrnr "),
("\u027A", "\\Elztrnrl "),
("\u027B", "\\Elzrttrnr "),
("\u027C", "\\Elzrl "),
("\u027D", "\\Elzrtlr "),
("\u027E", "\\Elzfhr "),
("\u027F", "{\\fontencoding{LEIP}\\selectfont\\char202}"),
("\u0282", "\\Elzrtls "),
("\u0283", "\\Elzesh "),
("\u0287", "\\Elztrnt "),
("\u0288", "\\Elzrtlt "),
("\u028A", "\\Elzpupsil "),
("\u028B", "\\Elzpscrv "),
("\u028C", "\\Elzinvv "),
("\u028D", "\\Elzinvw "),
("\u028E", "\\Elztrny "),
("\u0290", "\\Elzrtlz "),
("\u0292", "\\Elzyogh "),
("\u0294", "\\Elzglst "),
("\u0295", "\\Elzreglst "),
("\u0296", "\\Elzinglst "),
("\u029E", "\\textturnk "),
("\u02A4", "\\Elzdyogh "),
("\u02A7", "\\Elztesh "),
("\u02C7", "\\textasciicaron "),
("\u02C8", "\\Elzverts "),
("\u02CC", "\\Elzverti "),
("\u02D0", "\\Elzlmrk "),
("\u02D1", "\\Elzhlmrk "),
("\u02D2", "\\Elzsbrhr "),
("\u02D3", "\\Elzsblhr "),
("\u02D4", "\\Elzrais "),
("\u02D5", "\\Elzlow "),
("\u02D8", "\\textasciibreve "),
("\u02D9", "\\textperiodcentered "),
("\u02DA", "\\r{}"),
("\u02DB", "\\k{}"),
("\u02DC", "\\texttildelow "),
("\u02DD", "\\H{}"),
("\u02E5", "\\tone{55}"),
("\u02E6", "\\tone{44}"),
("\u02E7", "\\tone{33}"),
("\u02E8", "\\tone{22}"),
("\u02E9", "\\tone{11}"),
("\u0303", "\\~"),
("\u0304", "\\="),
("\u0306", "\\u"),
("\u0307", "\\."),
("\u0308", "\\\""),
("\u030A", "\\r"),
("\u030B", "\\H"),
("\u030C", "\\v"),
("\u030F", "\\cyrchar\\C"),
("\u0311", "{\\fontencoding{LECO}\\selectfont\\char177}"),
("\u0318", "{\\fontencoding{LECO}\\selectfont\\char184}"),
("\u0319", "{\\fontencoding{LECO}\\selectfont\\char185}"),
("\u0321", "\\Elzpalh "),
("\u0322", "\\Elzrh "),
("\u0328", "\\k"),
("\u032A", "\\Elzsbbrg "),
("\u032B", "{\\fontencoding{LECO}\\selectfont\\char203}"),
("\u032F", "{\\fontencoding{LECO}\\selectfont\\char207}"),
("\u0335", "\\Elzxl "),
("\u0336", "\\Elzbar "),
("\u0337", "{\\fontencoding{LECO}\\selectfont\\char215}"),
("\u0338", "{\\fontencoding{LECO}\\selectfont\\char216}"),
("\u033A", "{\\fontencoding{LECO}\\selectfont\\char218}"),
("\u033B", "{\\fontencoding{LECO}\\selectfont\\char219}"),
("\u033C", "{\\fontencoding{LECO}\\selectfont\\char220}"),
("\u033D", "{\\fontencoding{LECO}\\selectfont\\char221}"),
("\u0361", "{\\fontencoding{LECO}\\selectfont\\char225}"),
("\u0386", "{\\' A}"),
("\u0388", "{\\' E}"),
("\u0389", "{\\' H}"),
("\u038A", "\\'{}{I}"),
("\u038C", "\\'{}O"),
("\u038E", "\\mathrm{'Y}"),
("\u038F", "\\mathrm{'\\Omega}"),
("\u0390", "\\acute{\\ddot{\\iota}}"),
("\u0391", "\\Alpha "),
("\u0392", "\\Beta "),
("\u0393", "\\Gamma "),
("\u0394", "\\Delta "),
("\u0395", "\\Epsilon "),
("\u0396", "\\Zeta "),
("\u0397", "\\Eta "),
("\u0398", "\\Theta "),
("\u0399", "\\Iota "),
("\u039A", "\\Kappa "),
("\u039B", "\\Lambda "),
("\u039E", "\\Xi "),
("\u03A0", "\\Pi "),
("\u03A1", "\\Rho "),
("\u03A3", "\\Sigma "),
("\u03A4", "\\Tau "),
("\u03A5", "\\Upsilon "),
("\u03A6", "\\Phi "),
("\u03A7", "\\Chi "),
("\u03A8", "\\Psi "),
("\u03A9", "\\Omega "),
("\u03AA", "\\mathrm{\\ddot{I}}"),
("\u03AB", "\\mathrm{\\ddot{Y}}"),
("\u03AC", "\\'{$\\alpha$}"),
("\u03AD", "\\acute{\\epsilon}"),
("\u03AE", "\\acute{\\eta}"),
("\u03AF", "\\acute{\\iota}"),
("\u03B0", "\\acute{\\ddot{\\upsilon}}"),
("\u03B1", "\\alpha "),
("\u03B2", "\\beta "),
("\u03B3", "\\gamma "),
("\u03B4", "\\delta "),
("\u03B5", "\\epsilon "),
("\u03B6", "\\zeta "),
("\u03B7", "\\eta "),
("\u03B8", "\\texttheta "),
("\u03B9", "\\iota "),
("\u03BA", "\\kappa "),
("\u03BB", "\\lambda "),
("\u03BC", "\\mu "),
("\u03BD", "\\nu "),
("\u03BE", "\\xi "),
("\u03C0", "\\pi "),
("\u03C1", "\\rho "),
("\u03C2", "\\varsigma "),
("\u03C3", "\\sigma "),
("\u03C4", "\\tau "),
("\u03C5", "\\upsilon "),
("\u03C6", "\\varphi "),
("\u03C7", "\\chi "),
("\u03C8", "\\psi "),
("\u03C9", "\\omega "),
("\u03CA", "\\ddot{\\iota}"),
("\u03CB", "\\ddot{\\upsilon}"),
("\u03CC", "{\\' o}"),
("\u03CD", "\\acute{\\upsilon}"),
("\u03CE", "\\acute{\\omega}"),
("\u03D0", "\\Pisymbol{ppi022}{87}"),
("\u03D1", "\\textvartheta "),
("\u03D2", "\\Upsilon "),
("\u03D5", "\\phi "),
("\u03D6", "\\varpi "),
("\u03DA", "\\Stigma "),
("\u03DC", "\\Digamma "),
("\u03DD", "\\digamma "),
("\u03DE", "\\Koppa "),
("\u03E0", "\\Sampi "),
("\u03F0", "\\varkappa "),
("\u03F1", "\\varrho "),
("\u03F4", "\\textTheta "),
("\u03F6", "\\backepsilon "),
("\u0401", "\\cyrchar\\CYRYO "),
("\u0402", "\\cyrchar\\CYRDJE "),
("\u0403", "\\cyrchar{\\'\\CYRG}"),
("\u0404", "\\cyrchar\\CYRIE "),
("\u0405", "\\cyrchar\\CYRDZE "),
("\u0406", "\\cyrchar\\CYRII "),
("\u0407", "\\cyrchar\\CYRYI "),
("\u0408", "\\cyrchar\\CYRJE "),
("\u0409", "\\cyrchar\\CYRLJE "),
("\u040A", "\\cyrchar\\CYRNJE "),
("\u040B", "\\cyrchar\\CYRTSHE "),
("\u040C", "\\cyrchar{\\'\\CYRK}"),
("\u040E", "\\cyrchar\\CYRUSHRT "),
("\u040F", "\\cyrchar\\CYRDZHE "),
("\u0410", "\\cyrchar\\CYRA "),
("\u0411", "\\cyrchar\\CYRB "),
("\u0412", "\\cyrchar\\CYRV "),
("\u0413", "\\cyrchar\\CYRG "),
("\u0414", "\\cyrchar\\CYRD "),
("\u0415", "\\cyrchar\\CYRE "),
("\u0416", "\\cyrchar\\CYRZH "),
("\u0417", "\\cyrchar\\CYRZ "),
("\u0418", "\\cyrchar\\CYRI "),
("\u0419", "\\cyrchar\\CYRISHRT "),
("\u041A", "\\cyrchar\\CYRK "),
("\u041B", "\\cyrchar\\CYRL "),
("\u041C", "\\cyrchar\\CYRM "),
("\u041D", "\\cyrchar\\CYRN "),
("\u041E", "\\cyrchar\\CYRO "),
("\u041F", "\\cyrchar\\CYRP "),
("\u0420", "\\cyrchar\\CYRR "),
("\u0421", "\\cyrchar\\CYRS "),
("\u0422", "\\cyrchar\\CYRT "),
("\u0423", "\\cyrchar\\CYRU "),
("\u0424", "\\cyrchar\\CYRF "),
("\u0425", "\\cyrchar\\CYRH "),
("\u0426", "\\cyrchar\\CYRC "),
("\u0427", "\\cyrchar\\CYRCH "),
("\u0428", "\\cyrchar\\CYRSH "),
("\u0429", "\\cyrchar\\CYRSHCH "),
("\u042A", "\\cyrchar\\CYRHRDSN "),
("\u042B", "\\cyrchar\\CYRERY "),
("\u042C", "\\cyrchar\\CYRSFTSN "),
("\u042D", "\\cyrchar\\CYREREV "),
("\u042E", "\\cyrchar\\CYRYU "),
("\u042F", "\\cyrchar\\CYRYA "),
("\u0430", "\\cyrchar\\cyra "),
("\u0431", "\\cyrchar\\cyrb "),
("\u0432", "\\cyrchar\\cyrv "),
("\u0433", "\\cyrchar\\cyrg "),
("\u0434", "\\cyrchar\\cyrd "),
("\u0435", "\\cyrchar\\cyre "),
("\u0436", "\\cyrchar\\cyrzh "),
("\u0437", "\\cyrchar\\cyrz "),
("\u0438", "\\cyrchar\\cyri "),
("\u0439", "\\cyrchar\\cyrishrt "),
("\u043A", "\\cyrchar\\cyrk "),
("\u043B", "\\cyrchar\\cyrl "),
("\u043C", "\\cyrchar\\cyrm "),
("\u043D", "\\cyrchar\\cyrn "),
("\u043E", "\\cyrchar\\cyro "),
("\u043F", "\\cyrchar\\cyrp "),
("\u0440", "\\cyrchar\\cyrr "),
("\u0441", "\\cyrchar\\cyrs "),
("\u0442", "\\cyrchar\\cyrt "),
("\u0443", "\\cyrchar\\cyru "),
("\u0444", "\\cyrchar\\cyrf "),
("\u0445", "\\cyrchar\\cyrh "),
("\u0446", "\\cyrchar\\cyrc "),
("\u0447", "\\cyrchar\\cyrch "),
("\u0448", "\\cyrchar\\cyrsh "),
("\u0449", "\\cyrchar\\cyrshch "),
("\u044A", "\\cyrchar\\cyrhrdsn "),
("\u044B", "\\cyrchar\\cyrery "),
("\u044C", "\\cyrchar\\cyrsftsn "),
("\u044D", "\\cyrchar\\cyrerev "),
("\u044E", "\\cyrchar\\cyryu "),
("\u044F", "\\cyrchar\\cyrya "),
("\u0451", "\\cyrchar\\cyryo "),
("\u0452", "\\cyrchar\\cyrdje "),
("\u0453", "\\cyrchar{\\'\\cyrg}"),
("\u0454", "\\cyrchar\\cyrie "),
("\u0455", "\\cyrchar\\cyrdze "),
("\u0456", "\\cyrchar\\cyrii "),
("\u0457", "\\cyrchar\\cyryi "),
("\u0458", "\\cyrchar\\cyrje "),
("\u0459", "\\cyrchar\\cyrlje "),
("\u045A", "\\cyrchar\\cyrnje "),
("\u045B", "\\cyrchar\\cyrtshe "),
("\u045C", "\\cyrchar{\\'\\cyrk}"),
("\u045E", "\\cyrchar\\cyrushrt "),
("\u045F", "\\cyrchar\\cyrdzhe "),
("\u0460", "\\cyrchar\\CYROMEGA "),
("\u0461", "\\cyrchar\\cyromega "),
("\u0462", "\\cyrchar\\CYRYAT "),
("\u0464", "\\cyrchar\\CYRIOTE "),
("\u0465", "\\cyrchar\\cyriote "),
("\u0466", "\\cyrchar\\CYRLYUS "),
("\u0467", "\\cyrchar\\cyrlyus "),
("\u0468", "\\cyrchar\\CYRIOTLYUS "),
("\u0469", "\\cyrchar\\cyriotlyus "),
("\u046A", "\\cyrchar\\CYRBYUS "),
("\u046C", "\\cyrchar\\CYRIOTBYUS "),
("\u046D", "\\cyrchar\\cyriotbyus "),
("\u046E", "\\cyrchar\\CYRKSI "),
("\u046F", "\\cyrchar\\cyrksi "),
("\u0470", "\\cyrchar\\CYRPSI "),
("\u0471", "\\cyrchar\\cyrpsi "),
("\u0472", "\\cyrchar\\CYRFITA "),
("\u0474", "\\cyrchar\\CYRIZH "),
("\u0478", "\\cyrchar\\CYRUK "),
("\u0479", "\\cyrchar\\cyruk "),
("\u047A", "\\cyrchar\\CYROMEGARND "),
("\u047B", "\\cyrchar\\cyromegarnd "),
("\u047C", "\\cyrchar\\CYROMEGATITLO "),
("\u047D", "\\cyrchar\\cyromegatitlo "),
("\u047E", "\\cyrchar\\CYROT "),
("\u047F", "\\cyrchar\\cyrot "),
("\u0480", "\\cyrchar\\CYRKOPPA "),
("\u0481", "\\cyrchar\\cyrkoppa "),
("\u0482", "\\cyrchar\\cyrthousands "),
("\u0488", "\\cyrchar\\cyrhundredthousands "),
("\u0489", "\\cyrchar\\cyrmillions "),
("\u048C", "\\cyrchar\\CYRSEMISFTSN "),
("\u048D", "\\cyrchar\\cyrsemisftsn "),
("\u048E", "\\cyrchar\\CYRRTICK "),
("\u048F", "\\cyrchar\\cyrrtick "),
("\u0490", "\\cyrchar\\CYRGUP "),
("\u0491", "\\cyrchar\\cyrgup "),
("\u0492", "\\cyrchar\\CYRGHCRS "),
("\u0493", "\\cyrchar\\cyrghcrs "),
("\u0494", "\\cyrchar\\CYRGHK "),
("\u0495", "\\cyrchar\\cyrghk "),
("\u0496", "\\cyrchar\\CYRZHDSC "),
("\u0497", "\\cyrchar\\cyrzhdsc "),
("\u0498", "\\cyrchar\\CYRZDSC "),
("\u0499", "\\cyrchar\\cyrzdsc "),
("\u049A", "\\cyrchar\\CYRKDSC "),
("\u049B", "\\cyrchar\\cyrkdsc "),
("\u049C", "\\cyrchar\\CYRKVCRS "),
("\u049D", "\\cyrchar\\cyrkvcrs "),
("\u049E", "\\cyrchar\\CYRKHCRS "),
("\u049F", "\\cyrchar\\cyrkhcrs "),
("\u04A0", "\\cyrchar\\CYRKBEAK "),
("\u04A1", "\\cyrchar\\cyrkbeak "),
("\u04A2", "\\cyrchar\\CYRNDSC "),
("\u04A3", "\\cyrchar\\cyrndsc "),
("\u04A4", "\\cyrchar\\CYRNG "),
("\u04A5", "\\cyrchar\\cyrng "),
("\u04A6", "\\cyrchar\\CYRPHK "),
("\u04A7", "\\cyrchar\\cyrphk "),
("\u04A8", "\\cyrchar\\CYRABHHA "),
("\u04A9", "\\cyrchar\\cyrabhha "),
("\u04AA", "\\cyrchar\\CYRSDSC "),
("\u04AB", "\\cyrchar\\cyrsdsc "),
("\u04AC", "\\cyrchar\\CYRTDSC "),
("\u04AD", "\\cyrchar\\cyrtdsc "),
("\u04AE", "\\cyrchar\\CYRY "),
("\u04AF", "\\cyrchar\\cyry "),
("\u04B0", "\\cyrchar\\CYRYHCRS "),
("\u04B1", "\\cyrchar\\cyryhcrs "),
("\u04B2", "\\cyrchar\\CYRHDSC "),
("\u04B3", "\\cyrchar\\cyrhdsc "),
("\u04B4", "\\cyrchar\\CYRTETSE "),
("\u04B5", "\\cyrchar\\cyrtetse "),
("\u04B6", "\\cyrchar\\CYRCHRDSC "),
("\u04B7", "\\cyrchar\\cyrchrdsc "),
("\u04B8", "\\cyrchar\\CYRCHVCRS "),
("\u04B9", "\\cyrchar\\cyrchvcrs "),
("\u04BA", "\\cyrchar\\CYRSHHA "),
("\u04BB", "\\cyrchar\\cyrshha "),
("\u04BC", "\\cyrchar\\CYRABHCH "),
("\u04BD", "\\cyrchar\\cyrabhch "),
("\u04BE", "\\cyrchar\\CYRABHCHDSC "),
("\u04BF", "\\cyrchar\\cyrabhchdsc "),
("\u04C0", "\\cyrchar\\CYRpalochka "),
("\u04C3", "\\cyrchar\\CYRKHK "),
("\u04C4", "\\cyrchar\\cyrkhk "),
("\u04C7", "\\cyrchar\\CYRNHK "),
("\u04C8", "\\cyrchar\\cyrnhk "),
("\u04CB", "\\cyrchar\\CYRCHLDSC "),
("\u04CC", "\\cyrchar\\cyrchldsc "),
("\u04D4", "\\cyrchar\\CYRAE "),
("\u04D5", "\\cyrchar\\cyrae "),
("\u04D8", "\\cyrchar\\CYRSCHWA "),
("\u04D9", "\\cyrchar\\cyrschwa "),
("\u04E0", "\\cyrchar\\CYRABHDZE "),
("\u04E1", "\\cyrchar\\cyrabhdze "),
("\u04E8", "\\cyrchar\\CYROTLD "),
("\u04E9", "\\cyrchar\\cyrotld "),
("\u2002", "\\hspace{0.6em}"),
("\u2003", "\\hspace{1em}"),
("\u2004", "\\hspace{0.33em}"),
("\u2005", "\\hspace{0.25em}"),
("\u2006", "\\hspace{0.166em}"),
("\u2007", "\\hphantom{0}"),
("\u2008", "\\hphantom{,}"),
("\u2009", "\\hspace{0.167em}"),
("\u2009-0200A-0200A", "\\;"),
("\u200A", "\\mkern1mu "),
("\u2013", "\\textendash "),
("\u2014", "\\textemdash "),
("\u2015", "\\rule{1em}{1pt}"),
("\u2016", "\\Vert "),
("\u201B", "\\Elzreapos "),
("\u201C", "\\textquotedblleft "),
("\u201D", "\\textquotedblright "),
("\u201E", ",,"),
("\u2020", "\\textdagger "),
("\u2021", "\\textdaggerdbl "),
("\u2022", "\\textbullet "),
# (u"\u2025", ".."),
("\u2026", "\\ldots "),
("\u2030", "\\textperthousand "),
("\u2031", "\\textpertenthousand "),
("\u2032", "{'}"),
("\u2033", "{''}"),
("\u2034", "{'''}"),
("\u2035", "\\backprime "),
("\u2039", "\\guilsinglleft "),
("\u203A", "\\guilsinglright "),
("\u2057", "''''"),
("\u205F", "\\mkern4mu "),
("\u2060", "\\nolinebreak "),
("\u20A7", "\\ensuremath{\\Elzpes}"),
("\u20AC", "\\mbox{\\texteuro} "),
("\u20DB", "\\dddot "),
("\u20DC", "\\ddddot "),
("\u2102", "\\mathbb{C}"),
("\u210A", "\\mathscr{g}"),
("\u210B", "\\mathscr{H}"),
("\u210C", "\\mathfrak{H}"),
("\u210D", "\\mathbb{H}"),
("\u210F", "\\hslash "),
("\u2110", "\\mathscr{I}"),
("\u2111", "\\mathfrak{I}"),
("\u2112", "\\mathscr{L}"),
("\u2113", "\\mathscr{l}"),
("\u2115", "\\mathbb{N}"),
("\u2116", "\\cyrchar\\textnumero "),
("\u2118", "\\wp "),
("\u2119", "\\mathbb{P}"),
("\u211A", "\\mathbb{Q}"),
("\u211B", "\\mathscr{R}"),
("\u211C", "\\mathfrak{R}"),
("\u211D", "\\mathbb{R}"),
("\u211E", "\\Elzxrat "),
("\u2122", "\\texttrademark "),
("\u2124", "\\mathbb{Z}"),
("\u2126", "\\Omega "),
("\u2127", "\\mho "),
("\u2128", "\\mathfrak{Z}"),
("\u2129", "\\ElsevierGlyph{2129}"),
("\u212B", "\\AA "),
("\u212C", "\\mathscr{B}"),
("\u212D", "\\mathfrak{C}"),
("\u212F", "\\mathscr{e}"),
("\u2130", "\\mathscr{E}"),
("\u2131", "\\mathscr{F}"),
("\u2133", "\\mathscr{M}"),
("\u2134", "\\mathscr{o}"),
("\u2135", "\\aleph "),
("\u2136", "\\beth "),
("\u2137", "\\gimel "),
("\u2138", "\\daleth "),
("\u2153", "\\textfrac{1}{3}"),
("\u2154", "\\textfrac{2}{3}"),
("\u2155", "\\textfrac{1}{5}"),
("\u2156", "\\textfrac{2}{5}"),
("\u2157", "\\textfrac{3}{5}"),
("\u2158", "\\textfrac{4}{5}"),
("\u2159", "\\textfrac{1}{6}"),
("\u215A", "\\textfrac{5}{6}"),
("\u215B", "\\textfrac{1}{8}"),
("\u215C", "\\textfrac{3}{8}"),
("\u215D", "\\textfrac{5}{8}"),
("\u215E", "\\textfrac{7}{8}"),
("\u2190", "\\leftarrow "),
("\u2191", "\\uparrow "),
("\u2192", "\\rightarrow "),
("\u2193", "\\downarrow "),
("\u2194", "\\leftrightarrow "),
("\u2195", "\\updownarrow "),
("\u2196", "\\nwarrow "),
("\u2197", "\\nearrow "),
("\u2198", "\\searrow "),
("\u2199", "\\swarrow "),
("\u219A", "\\nleftarrow "),
("\u219B", "\\nrightarrow "),
("\u219C", "\\arrowwaveright "),
("\u219D", "\\arrowwaveright "),
("\u219E", "\\twoheadleftarrow "),
("\u21A0", "\\twoheadrightarrow "),
("\u21A2", "\\leftarrowtail "),
("\u21A3", "\\rightarrowtail "),
("\u21A6", "\\mapsto "),
("\u21A9", "\\hookleftarrow "),
("\u21AA", "\\hookrightarrow "),
("\u21AB", "\\looparrowleft "),
("\u21AC", "\\looparrowright "),
("\u21AD", "\\leftrightsquigarrow "),
("\u21AE", "\\nleftrightarrow "),
("\u21B0", "\\Lsh "),
("\u21B1", "\\Rsh "),
("\u21B3", "\\ElsevierGlyph{21B3}"),
("\u21B6", "\\curvearrowleft "),
("\u21B7", "\\curvearrowright "),
("\u21BA", "\\circlearrowleft "),
("\u21BB", "\\circlearrowright "),
("\u21BC", "\\leftharpoonup "),
("\u21BD", "\\leftharpoondown "),
("\u21BE", "\\upharpoonright "),
("\u21BF", "\\upharpoonleft "),
("\u21C0", "\\rightharpoonup "),
("\u21C1", "\\rightharpoondown "),
("\u21C2", "\\downharpoonright "),
("\u21C3", "\\downharpoonleft "),
("\u21C4", "\\rightleftarrows "),
("\u21C5", "\\dblarrowupdown "),
("\u21C6", "\\leftrightarrows "),
("\u21C7", "\\leftleftarrows "),
("\u21C8", "\\upuparrows "),
("\u21C9", "\\rightrightarrows "),
("\u21CA", "\\downdownarrows "),
("\u21CB", "\\leftrightharpoons "),
("\u21CC", "\\rightleftharpoons "),
("\u21CD", "\\nLeftarrow "),
("\u21CE", "\\nLeftrightarrow "),
("\u21CF", "\\nRightarrow "),
("\u21D0", "\\Leftarrow "),
("\u21D1", "\\Uparrow "),
("\u21D2", "\\Rightarrow "),
("\u21D3", "\\Downarrow "),
("\u21D4", "\\Leftrightarrow "),
("\u21D5", "\\Updownarrow "),
("\u21DA", "\\Lleftarrow "),
("\u21DB", "\\Rrightarrow "),
("\u21DD", "\\rightsquigarrow "),
("\u21F5", "\\DownArrowUpArrow "),
("\u2200", "\\forall "),
("\u2201", "\\complement "),
("\u2202", "\\partial "),
("\u2203", "\\exists "),
("\u2204", "\\nexists "),
("\u2205", "\\varnothing "),
("\u2207", "\\nabla "),
("\u2208", "\\in "),
("\u2209", "\\not\\in "),
("\u220B", "\\ni "),
("\u220C", "\\not\\ni "),
("\u220F", "\\prod "),
("\u2210", "\\coprod "),
("\u2211", "\\sum "),
("\u2213", "\\mp "),
("\u2214", "\\dotplus "),
("\u2216", "\\setminus "),
("\u2217", "{_\\ast}"),
("\u2218", "\\circ "),
("\u2219", "\\bullet "),
("\u221A", "\\surd "),
("\u221D", "\\propto "),
("\u221E", "\\infty "),
("\u221F", "\\rightangle "),
("\u2220", "\\angle "),
("\u2221", "\\measuredangle "),
("\u2222", "\\sphericalangle "),
("\u2223", "\\mid "),
("\u2224", "\\nmid "),
("\u2225", "\\parallel "),
("\u2226", "\\nparallel "),
("\u2227", "\\wedge "),
("\u2228", "\\vee "),
("\u2229", "\\cap "),
("\u222A", "\\cup "),
("\u222B", "\\int "),
("\u222C", "\\int\\!\\int "),
("\u222D", "\\int\\!\\int\\!\\int "),
("\u222E", "\\oint "),
("\u222F", "\\surfintegral "),
("\u2230", "\\volintegral "),
("\u2231", "\\clwintegral "),
("\u2232", "\\ElsevierGlyph{2232}"),
("\u2233", "\\ElsevierGlyph{2233}"),
("\u2234", "\\therefore "),
("\u2235", "\\because "),
("\u2237", "\\Colon "),
("\u2238", "\\ElsevierGlyph{2238}"),
("\u223A", "\\mathbin{{:}\\!\\!{-}\\!\\!{:}}"),
("\u223B", "\\homothetic "),
("\u223C", "\\sim "),
("\u223D", "\\backsim "),
("\u223E", "\\lazysinv "),
("\u2240", "\\wr "),
("\u2241", "\\not\\sim "),
("\u2242", "\\ElsevierGlyph{2242}"),
("\u2242-00338", "\\NotEqualTilde "),
("\u2243", "\\simeq "),
("\u2244", "\\not\\simeq "),
("\u2245", "\\cong "),
("\u2246", "\\approxnotequal "),
("\u2247", "\\not\\cong "),
("\u2248", "\\approx "),
("\u2249", "\\not\\approx "),
("\u224A", "\\approxeq "),
("\u224B", "\\tildetrpl "),
("\u224B-00338", "\\not\\apid "),
("\u224C", "\\allequal "),
("\u224D", "\\asymp "),
("\u224E", "\\Bumpeq "),
("\u224E-00338", "\\NotHumpDownHump "),
("\u224F", "\\bumpeq "),
("\u224F-00338", "\\NotHumpEqual "),
("\u2250", "\\doteq "),
("\u2250-00338", "\\not\\doteq"),
("\u2251", "\\doteqdot "),
("\u2252", "\\fallingdotseq "),
("\u2253", "\\risingdotseq "),
("\u2254", ":="),
("\u2255", "=:"),
("\u2256", "\\eqcirc "),
("\u2257", "\\circeq "),
("\u2259", "\\estimates "),
("\u225A", "\\ElsevierGlyph{225A}"),
("\u225B", "\\starequal "),
("\u225C", "\\triangleq "),
("\u225F", "\\ElsevierGlyph{225F}"),
("\u2260", "\\not ="),
("\u2261", "\\equiv "),
("\u2262", "\\not\\equiv "),
("\u2264", "\\leq "),
("\u2265", "\\geq "),
("\u2266", "\\leqq "),
("\u2267", "\\geqq "),
("\u2268", "\\lneqq "),
("\u2268-0FE00", "\\lvertneqq "),
("\u2269", "\\gneqq "),
("\u2269-0FE00", "\\gvertneqq "),
("\u226A", "\\ll "),
("\u226A-00338", "\\NotLessLess "),
("\u226B", "\\gg "),
("\u226B-00338", "\\NotGreaterGreater "),
("\u226C", "\\between "),
("\u226D", "\\not\\kern-0.3em\\times "),
("\u226E", "\\not<"),
("\u226F", "\\not>"),
("\u2270", "\\not\\leq "),
("\u2271", "\\not\\geq "),
("\u2272", "\\lessequivlnt "),
("\u2273", "\\greaterequivlnt "),
("\u2274", "\\ElsevierGlyph{2274}"),
("\u2275", "\\ElsevierGlyph{2275}"),
("\u2276", "\\lessgtr "),
("\u2277", "\\gtrless "),
("\u2278", "\\notlessgreater "),
("\u2279", "\\notgreaterless "),
("\u227A", "\\prec "),
("\u227B", "\\succ "),
("\u227C", "\\preccurlyeq "),
("\u227D", "\\succcurlyeq "),
("\u227E", "\\precapprox "),
("\u227E-00338", "\\NotPrecedesTilde "),
("\u227F", "\\succapprox "),
("\u227F-00338", "\\NotSucceedsTilde "),
("\u2280", "\\not\\prec "),
("\u2281", "\\not\\succ "),
("\u2282", "\\subset "),
("\u2283", "\\supset "),
("\u2284", "\\not\\subset "),
("\u2285", "\\not\\supset "),
("\u2286", "\\subseteq "),
("\u2287", "\\supseteq "),
("\u2288", "\\not\\subseteq "),
("\u2289", "\\not\\supseteq "),
("\u228A", "\\subsetneq "),
("\u228A-0FE00", "\\varsubsetneqq "),
("\u228B", "\\supsetneq "),
("\u228B-0FE00", "\\varsupsetneq "),
("\u228E", "\\uplus "),
("\u228F", "\\sqsubset "),
("\u228F-00338", "\\NotSquareSubset "),
("\u2290", "\\sqsupset "),
("\u2290-00338", "\\NotSquareSuperset "),
("\u2291", "\\sqsubseteq "),
("\u2292", "\\sqsupseteq "),
("\u2293", "\\sqcap "),
("\u2294", "\\sqcup "),
("\u2295", "\\oplus "),
("\u2296", "\\ominus "),
("\u2297", "\\otimes "),
("\u2298", "\\oslash "),
("\u2299", "\\odot "),
("\u229A", "\\circledcirc "),
("\u229B", "\\circledast "),
("\u229D", "\\circleddash "),
("\u229E", "\\boxplus "),
("\u229F", "\\boxminus "),
("\u22A0", "\\boxtimes "),
("\u22A1", "\\boxdot "),
("\u22A2", "\\vdash "),
("\u22A3", "\\dashv "),
("\u22A4", "\\top "),
("\u22A5", "\\perp "),
("\u22A7", "\\truestate "),
("\u22A8", "\\forcesextra "),
("\u22A9", "\\Vdash "),
("\u22AA", "\\Vvdash "),
("\u22AB", "\\VDash "),
("\u22AC", "\\nvdash "),
("\u22AD", "\\nvDash "),
("\u22AE", "\\nVdash "),
("\u22AF", "\\nVDash "),
("\u22B2", "\\vartriangleleft "),
("\u22B3", "\\vartriangleright "),
("\u22B4", "\\trianglelefteq "),
("\u22B5", "\\trianglerighteq "),
("\u22B6", "\\original "),
("\u22B7", "\\image "),
("\u22B8", "\\multimap "),
("\u22B9", "\\hermitconjmatrix "),
("\u22BA", "\\intercal "),
("\u22BB", "\\veebar "),
("\u22BE", "\\rightanglearc "),
("\u22C0", "\\ElsevierGlyph{22C0}"),
("\u22C1", "\\ElsevierGlyph{22C1}"),
("\u22C2", "\\bigcap "),
("\u22C3", "\\bigcup "),
("\u22C4", "\\diamond "),
("\u22C5", "\\cdot "),
("\u22C6", "\\star "),
("\u22C7", "\\divideontimes "),
("\u22C8", "\\bowtie "),
("\u22C9", "\\ltimes "),
("\u22CA", "\\rtimes "),
("\u22CB", "\\leftthreetimes "),
("\u22CC", "\\rightthreetimes "),
("\u22CD", "\\backsimeq "),
("\u22CE", "\\curlyvee "),
("\u22CF", "\\curlywedge "),
("\u22D0", "\\Subset "),
("\u22D1", "\\Supset "),
("\u22D2", "\\Cap "),
("\u22D3", "\\Cup "),
("\u22D4", "\\pitchfork "),
("\u22D6", "\\lessdot "),
("\u22D7", "\\gtrdot "),
("\u22D8", "\\verymuchless "),
("\u22D9", "\\verymuchgreater "),
("\u22DA", "\\lesseqgtr "),
("\u22DB", "\\gtreqless "),
("\u22DE", "\\curlyeqprec "),
("\u22DF", "\\curlyeqsucc "),
("\u22E2", "\\not\\sqsubseteq "),
("\u22E3", "\\not\\sqsupseteq "),
("\u22E5", "\\Elzsqspne "),
("\u22E6", "\\lnsim "),
("\u22E7", "\\gnsim "),
("\u22E8", "\\precedesnotsimilar "),
("\u22E9", "\\succnsim "),
("\u22EA", "\\ntriangleleft "),
("\u22EB", "\\ntriangleright "),
("\u22EC", "\\ntrianglelefteq "),
("\u22ED", "\\ntrianglerighteq "),
("\u22EE", "\\vdots "),
("\u22EF", "\\cdots "),
("\u22F0", "\\upslopeellipsis "),
("\u22F1", "\\downslopeellipsis "),
("\u2305", "\\barwedge "),
("\u2306", "\\perspcorrespond "),
("\u2308", "\\lceil "),
("\u2309", "\\rceil "),
("\u230A", "\\lfloor "),
("\u230B", "\\rfloor "),
("\u2315", "\\recorder "),
("\u2316", "\\mathchar\"2208"),
("\u231C", "\\ulcorner "),
("\u231D", "\\urcorner "),
("\u231E", "\\llcorner "),
("\u231F", "\\lrcorner "),
("\u2322", "\\frown "),
("\u2323", "\\smile "),
("\u2329", "\\langle "),
("\u232A", "\\rangle "),
("\u233D", "\\ElsevierGlyph{E838}"),
("\u23A3", "\\Elzdlcorn "),
("\u23B0", "\\lmoustache "),
("\u23B1", "\\rmoustache "),
("\u2423", "\\textvisiblespace "),
("\u2460", "\\ding{172}"),
("\u2461", "\\ding{173}"),
("\u2462", "\\ding{174}"),
("\u2463", "\\ding{175}"),
("\u2464", "\\ding{176}"),
("\u2465", "\\ding{177}"),
("\u2466", "\\ding{178}"),
("\u2467", "\\ding{179}"),
("\u2468", "\\ding{180}"),
("\u2469", "\\ding{181}"),
("\u24C8", "\\circledS "),
("\u2506", "\\Elzdshfnc "),
("\u2519", "\\Elzsqfnw "),
("\u2571", "\\diagup "),
("\u25A0", "\\ding{110}"),
("\u25A1", "\\square "),
("\u25AA", "\\blacksquare "),
("\u25AD", "\\fbox{~~}"),
("\u25AF", "\\Elzvrecto "),
("\u25B1", "\\ElsevierGlyph{E381}"),
("\u25B2", "\\ding{115}"),
("\u25B3", "\\bigtriangleup "),
("\u25B4", "\\blacktriangle "),
("\u25B5", "\\vartriangle "),
("\u25B8", "\\blacktriangleright "),
("\u25B9", "\\triangleright "),
("\u25BC", "\\ding{116}"),
("\u25BD", "\\bigtriangledown "),
("\u25BE", "\\blacktriangledown "),
("\u25BF", "\\triangledown "),
("\u25C2", "\\blacktriangleleft "),
("\u25C3", "\\triangleleft "),
("\u25C6", "\\ding{117}"),
("\u25CA", "\\lozenge "),
("\u25CB", "\\bigcirc "),
("\u25CF", "\\ding{108}"),
("\u25D0", "\\Elzcirfl "),
("\u25D1", "\\Elzcirfr "),
("\u25D2", "\\Elzcirfb "),
("\u25D7", "\\ding{119}"),
("\u25D8", "\\Elzrvbull "),
("\u25E7", "\\Elzsqfl "),
("\u25E8", "\\Elzsqfr "),
("\u25EA", "\\Elzsqfse "),
("\u25EF", "\\bigcirc "),
("\u2605", "\\ding{72}"),
("\u2606", "\\ding{73}"),
("\u260E", "\\ding{37}"),
("\u261B", "\\ding{42}"),
("\u261E", "\\ding{43}"),
("\u263E", "\\rightmoon "),
("\u263F", "\\mercury "),
("\u2640", "\\venus "),
("\u2642", "\\male "),
("\u2643", "\\jupiter "),
("\u2644", "\\saturn "),
("\u2645", "\\uranus "),
("\u2646", "\\neptune "),
("\u2647", "\\pluto "),
("\u2648", "\\aries "),
("\u2649", "\\taurus "),
("\u264A", "\\gemini "),
("\u264B", "\\cancer "),
("\u264C", "\\leo "),
("\u264D", "\\virgo "),
("\u264E", "\\libra "),
("\u264F", "\\scorpio "),
("\u2650", "\\sagittarius "),
("\u2651", "\\capricornus "),
("\u2652", "\\aquarius "),
("\u2653", "\\pisces "),
("\u2660", "\\ding{171}"),
("\u2662", "\\diamond "),
("\u2663", "\\ding{168}"),
("\u2665", "\\ding{170}"),
("\u2666", "\\ding{169}"),
("\u2669", "\\quarternote "),
("\u266A", "\\eighthnote "),
("\u266D", "\\flat "),
("\u266E", "\\natural "),
("\u266F", "\\sharp "),
("\u2701", "\\ding{33}"),
("\u2702", "\\ding{34}"),
("\u2703", "\\ding{35}"),
("\u2704", "\\ding{36}"),
("\u2706", "\\ding{38}"),
("\u2707", "\\ding{39}"),
("\u2708", "\\ding{40}"),
("\u2709", "\\ding{41}"),
("\u270C", "\\ding{44}"),
("\u270D", "\\ding{45}"),
("\u270E", "\\ding{46}"),
("\u270F", "\\ding{47}"),
("\u2710", "\\ding{48}"),
("\u2711", "\\ding{49}"),
("\u2712", "\\ding{50}"),
("\u2713", "\\ding{51}"),
("\u2714", "\\ding{52}"),
("\u2715", "\\ding{53}"),
("\u2716", "\\ding{54}"),
("\u2717", "\\ding{55}"),
("\u2718", "\\ding{56}"),
("\u2719", "\\ding{57}"),
("\u271A", "\\ding{58}"),
("\u271B", "\\ding{59}"),
("\u271C", "\\ding{60}"),
("\u271D", "\\ding{61}"),
("\u271E", "\\ding{62}"),
("\u271F", "\\ding{63}"),
("\u2720", "\\ding{64}"),
("\u2721", "\\ding{65}"),
("\u2722", "\\ding{66}"),
("\u2723", "\\ding{67}"),
("\u2724", "\\ding{68}"),
("\u2725", "\\ding{69}"),
("\u2726", "\\ding{70}"),
("\u2727", "\\ding{71}"),
("\u2729", "\\ding{73}"),
("\u272A", "\\ding{74}"),
("\u272B", "\\ding{75}"),
("\u272C", "\\ding{76}"),
("\u272D", "\\ding{77}"),
("\u272E", "\\ding{78}"),
("\u272F", "\\ding{79}"),
("\u2730", "\\ding{80}"),
("\u2731", "\\ding{81}"),
("\u2732", "\\ding{82}"),
("\u2733", "\\ding{83}"),
("\u2734", "\\ding{84}"),
("\u2735", "\\ding{85}"),
("\u2736", "\\ding{86}"),
("\u2737", "\\ding{87}"),
("\u2738", "\\ding{88}"),
("\u2739", "\\ding{89}"),
("\u273A", "\\ding{90}"),
("\u273B", "\\ding{91}"),
("\u273C", "\\ding{92}"),
("\u273D", "\\ding{93}"),
("\u273E", "\\ding{94}"),
("\u273F", "\\ding{95}"),
("\u2740", "\\ding{96}"),
("\u2741", "\\ding{97}"),
("\u2742", "\\ding{98}"),
("\u2743", "\\ding{99}"),
("\u2744", "\\ding{100}"),
("\u2745", "\\ding{101}"),
("\u2746", "\\ding{102}"),
("\u2747", "\\ding{103}"),
("\u2748", "\\ding{104}"),
("\u2749", "\\ding{105}"),
("\u274A", "\\ding{106}"),
("\u274B", "\\ding{107}"),
("\u274D", "\\ding{109}"),
("\u274F", "\\ding{111}"),
("\u2750", "\\ding{112}"),
("\u2751", "\\ding{113}"),
("\u2752", "\\ding{114}"),
("\u2756", "\\ding{118}"),
("\u2758", "\\ding{120}"),
("\u2759", "\\ding{121}"),
("\u275A", "\\ding{122}"),
("\u275B", "\\ding{123}"),
("\u275C", "\\ding{124}"),
("\u275D", "\\ding{125}"),
("\u275E", "\\ding{126}"),
("\u2761", "\\ding{161}"),
("\u2762", "\\ding{162}"),
("\u2763", "\\ding{163}"),
("\u2764", "\\ding{164}"),
("\u2765", "\\ding{165}"),
("\u2766", "\\ding{166}"),
("\u2767", "\\ding{167}"),
("\u2776", "\\ding{182}"),
("\u2777", "\\ding{183}"),
("\u2778", "\\ding{184}"),
("\u2779", "\\ding{185}"),
("\u277A", "\\ding{186}"),
("\u277B", "\\ding{187}"),
("\u277C", "\\ding{188}"),
("\u277D", "\\ding{189}"),
("\u277E", "\\ding{190}"),
("\u277F", "\\ding{191}"),
("\u2780", "\\ding{192}"),
("\u2781", "\\ding{193}"),
("\u2782", "\\ding{194}"),
("\u2783", "\\ding{195}"),
("\u2784", "\\ding{196}"),
("\u2785", "\\ding{197}"),
("\u2786", "\\ding{198}"),
("\u2787", "\\ding{199}"),
("\u2788", "\\ding{200}"),
("\u2789", "\\ding{201}"),
("\u278A", "\\ding{202}"),
("\u278B", "\\ding{203}"),
("\u278C", "\\ding{204}"),
("\u278D", "\\ding{205}"),
("\u278E", "\\ding{206}"),
("\u278F", "\\ding{207}"),
("\u2790", "\\ding{208}"),
("\u2791", "\\ding{209}"),
("\u2792", "\\ding{210}"),
("\u2793", "\\ding{211}"),
("\u2794", "\\ding{212}"),
("\u2798", "\\ding{216}"),
("\u2799", "\\ding{217}"),
("\u279A", "\\ding{218}"),
("\u279B", "\\ding{219}"),
("\u279C", "\\ding{220}"),
("\u279D", "\\ding{221}"),
("\u279E", "\\ding{222}"),
("\u279F", "\\ding{223}"),
("\u27A0", "\\ding{224}"),
("\u27A1", "\\ding{225}"),
("\u27A2", "\\ding{226}"),
("\u27A3", "\\ding{227}"),
("\u27A4", "\\ding{228}"),
("\u27A5", "\\ding{229}"),
("\u27A6", "\\ding{230}"),
("\u27A7", "\\ding{231}"),
("\u27A8", "\\ding{232}"),
("\u27A9", "\\ding{233}"),
("\u27AA", "\\ding{234}"),
("\u27AB", "\\ding{235}"),
("\u27AC", "\\ding{236}"),
("\u27AD", "\\ding{237}"),
("\u27AE", "\\ding{238}"),
("\u27AF", "\\ding{239}"),
("\u27B1", "\\ding{241}"),
("\u27B2", "\\ding{242}"),
("\u27B3", "\\ding{243}"),
("\u27B4", "\\ding{244}"),
("\u27B5", "\\ding{245}"),
("\u27B6", "\\ding{246}"),
("\u27B7", "\\ding{247}"),
("\u27B8", "\\ding{248}"),
("\u27B9", "\\ding{249}"),
("\u27BA", "\\ding{250}"),
("\u27BB", "\\ding{251}"),
("\u27BC", "\\ding{252}"),
("\u27BD", "\\ding{253}"),
("\u27BE", "\\ding{254}"),
("\u27F5", "\\longleftarrow "),
("\u27F6", "\\longrightarrow "),
("\u27F7", "\\longleftrightarrow "),
("\u27F8", "\\Longleftarrow "),
("\u27F9", "\\Longrightarrow "),
("\u27FA", "\\Longleftrightarrow "),
("\u27FC", "\\longmapsto "),
("\u27FF", "\\sim\\joinrel\\leadsto"),
("\u2905", "\\ElsevierGlyph{E212}"),
("\u2912", "\\UpArrowBar "),
("\u2913", "\\DownArrowBar "),
("\u2923", "\\ElsevierGlyph{E20C}"),
("\u2924", "\\ElsevierGlyph{E20D}"),
("\u2925", "\\ElsevierGlyph{E20B}"),
("\u2926", "\\ElsevierGlyph{E20A}"),
("\u2927", "\\ElsevierGlyph{E211}"),
("\u2928", "\\ElsevierGlyph{E20E}"),
("\u2929", "\\ElsevierGlyph{E20F}"),
("\u292A", "\\ElsevierGlyph{E210}"),
("\u2933", "\\ElsevierGlyph{E21C}"),
("\u2933-00338", "\\ElsevierGlyph{E21D}"),
("\u2936", "\\ElsevierGlyph{E21A}"),
("\u2937", "\\ElsevierGlyph{E219}"),
("\u2940", "\\Elolarr "),
("\u2941", "\\Elorarr "),
("\u2942", "\\ElzRlarr "),
("\u2944", "\\ElzrLarr "),
("\u2947", "\\Elzrarrx "),
("\u294E", "\\LeftRightVector "),
("\u294F", "\\RightUpDownVector "),
("\u2950", "\\DownLeftRightVector "),
("\u2951", "\\LeftUpDownVector "),
("\u2952", "\\LeftVectorBar "),
("\u2953", "\\RightVectorBar "),
("\u2954", "\\RightUpVectorBar "),
("\u2955", "\\RightDownVectorBar "),
("\u2956", "\\DownLeftVectorBar "),
("\u2957", "\\DownRightVectorBar "),
("\u2958", "\\LeftUpVectorBar "),
("\u2959", "\\LeftDownVectorBar "),
("\u295A", "\\LeftTeeVector "),
("\u295B", "\\RightTeeVector "),
("\u295C", "\\RightUpTeeVector "),
("\u295D", "\\RightDownTeeVector "),
("\u295E", "\\DownLeftTeeVector "),
("\u295F", "\\DownRightTeeVector "),
("\u2960", "\\LeftUpTeeVector "),
("\u2961", "\\LeftDownTeeVector "),
("\u296E", "\\UpEquilibrium "),
("\u296F", "\\ReverseUpEquilibrium "),
("\u2970", "\\RoundImplies "),
("\u297C", "\\ElsevierGlyph{E214}"),
("\u297D", "\\ElsevierGlyph{E215}"),
("\u2980", "\\Elztfnc "),
("\u2985", "\\ElsevierGlyph{3018}"),
("\u2986", "\\Elroang "),
("\u2993", "<\\kern-0.58em("),
("\u2994", "\\ElsevierGlyph{E291}"),
("\u2999", "\\Elzddfnc "),
("\u299C", "\\Angle "),
("\u29A0", "\\Elzlpargt "),
("\u29B5", "\\ElsevierGlyph{E260}"),
("\u29B6", "\\ElsevierGlyph{E61B}"),
("\u29CA", "\\ElzLap "),
("\u29CB", "\\Elzdefas "),
("\u29CF", "\\LeftTriangleBar "),
("\u29CF-00338", "\\NotLeftTriangleBar "),
("\u29D0", "\\RightTriangleBar "),
("\u29D0-00338", "\\NotRightTriangleBar "),
("\u29DC", "\\ElsevierGlyph{E372}"),
("\u29EB", "\\blacklozenge "),
("\u29F4", "\\RuleDelayed "),
("\u2A04", "\\Elxuplus "),
("\u2A05", "\\ElzThr "),
("\u2A06", "\\Elxsqcup "),
("\u2A07", "\\ElzInf "),
("\u2A08", "\\ElzSup "),
("\u2A0D", "\\ElzCint "),
("\u2A0F", "\\clockoint "),
("\u2A10", "\\ElsevierGlyph{E395}"),
("\u2A16", "\\sqrint "),
("\u2A25", "\\ElsevierGlyph{E25A}"),
("\u2A2A", "\\ElsevierGlyph{E25B}"),
("\u2A2D", "\\ElsevierGlyph{E25C}"),
("\u2A2E", "\\ElsevierGlyph{E25D}"),
("\u2A2F", "\\ElzTimes "),
("\u2A34", "\\ElsevierGlyph{E25E}"),
("\u2A35", "\\ElsevierGlyph{E25E}"),
("\u2A3C", "\\ElsevierGlyph{E259}"),
("\u2A3F", "\\amalg "),
("\u2A53", "\\ElzAnd "),
("\u2A54", "\\ElzOr "),
("\u2A55", "\\ElsevierGlyph{E36E}"),
("\u2A56", "\\ElOr "),
("\u2A5E", "\\perspcorrespond "),
("\u2A5F", "\\Elzminhat "),
("\u2A63", "\\ElsevierGlyph{225A}"),
("\u2A6E", "\\stackrel{*}{=}"),
("\u2A75", "\\Equal "),
("\u2A7D", "\\leqslant "),
("\u2A7D-00338", "\\nleqslant "),
("\u2A7E", "\\geqslant "),
("\u2A7E-00338", "\\ngeqslant "),
("\u2A85", "\\lessapprox "),
("\u2A86", "\\gtrapprox "),
("\u2A87", "\\lneq "),
("\u2A88", "\\gneq "),
("\u2A89", "\\lnapprox "),
("\u2A8A", "\\gnapprox "),
("\u2A8B", "\\lesseqqgtr "),
("\u2A8C", "\\gtreqqless "),
("\u2A95", "\\eqslantless "),
("\u2A96", "\\eqslantgtr "),
("\u2A9D", "\\Pisymbol{ppi020}{117}"),
("\u2A9E", "\\Pisymbol{ppi020}{105}"),
("\u2AA1", "\\NestedLessLess "),
("\u2AA1-00338", "\\NotNestedLessLess "),
("\u2AA2", "\\NestedGreaterGreater "),
("\u2AA2-00338", "\\NotNestedGreaterGreater "),
("\u2AAF", "\\preceq "),
("\u2AAF-00338", "\\not\\preceq "),
("\u2AB0", "\\succeq "),
("\u2AB0-00338", "\\not\\succeq "),
("\u2AB5", "\\precneqq "),
("\u2AB6", "\\succneqq "),
("\u2AB7", "\\precapprox "),
("\u2AB8", "\\succapprox "),
("\u2AB9", "\\precnapprox "),
("\u2ABA", "\\succnapprox "),
("\u2AC5", "\\subseteqq "),
("\u2AC5-00338", "\\nsubseteqq "),
("\u2AC6", "\\supseteqq "),
("\u2AC6-00338", "\\nsupseteqq"),
("\u2ACB", "\\subsetneqq "),
("\u2ACC", "\\supsetneqq "),
("\u2AEB", "\\ElsevierGlyph{E30D}"),
("\u2AF6", "\\Elztdcol "),
("\u2AFD", "{{/}\\!\\!{/}}"),
("\u2AFD-020E5", "{\\rlap{\\textbackslash}{{/}\\!\\!{/}}}"),
("\u300A", "\\ElsevierGlyph{300A}"),
("\u300B", "\\ElsevierGlyph{300B}"),
("\u3018", "\\ElsevierGlyph{3018}"),
("\u3019", "\\ElsevierGlyph{3019}"),
("\u301A", "\\openbracketleft "),
("\u301B", "\\openbracketright "),
# (u"\uFB00", "ff"),
# (u"\uFB01", "fi"),
# (u"\uFB02", "fl"),
# (u"\uFB03", "ffi"),
# (u"\uFB04", "ffl"),
("\uD400", "\\mathbf{A}"),
("\uD401", "\\mathbf{B}"),
("\uD402", "\\mathbf{C}"),
("\uD403", "\\mathbf{D}"),
("\uD404", "\\mathbf{E}"),
("\uD405", "\\mathbf{F}"),
("\uD406", "\\mathbf{G}"),
("\uD407", "\\mathbf{H}"),
("\uD408", "\\mathbf{I}"),
("\uD409", "\\mathbf{J}"),
("\uD40A", "\\mathbf{K}"),
("\uD40B", "\\mathbf{L}"),
("\uD40C", "\\mathbf{M}"),
("\uD40D", "\\mathbf{N}"),
("\uD40E", "\\mathbf{O}"),
("\uD40F", "\\mathbf{P}"),
("\uD410", "\\mathbf{Q}"),
("\uD411", "\\mathbf{R}"),
("\uD412", "\\mathbf{S}"),
("\uD413", "\\mathbf{T}"),
("\uD414", "\\mathbf{U}"),
("\uD415", "\\mathbf{V}"),
("\uD416", "\\mathbf{W}"),
("\uD417", "\\mathbf{X}"),
("\uD418", "\\mathbf{Y}"),
("\uD419", "\\mathbf{Z}"),
("\uD41A", "\\mathbf{a}"),
("\uD41B", "\\mathbf{b}"),
("\uD41C", "\\mathbf{c}"),
("\uD41D", "\\mathbf{d}"),
("\uD41E", "\\mathbf{e}"),
("\uD41F", "\\mathbf{f}"),
("\uD420", "\\mathbf{g}"),
("\uD421", "\\mathbf{h}"),
("\uD422", "\\mathbf{i}"),
("\uD423", "\\mathbf{j}"),
("\uD424", "\\mathbf{k}"),
("\uD425", "\\mathbf{l}"),
("\uD426", "\\mathbf{m}"),
("\uD427", "\\mathbf{n}"),
("\uD428", "\\mathbf{o}"),
("\uD429", "\\mathbf{p}"),
("\uD42A", "\\mathbf{q}"),
("\uD42B", "\\mathbf{r}"),
("\uD42C", "\\mathbf{s}"),
("\uD42D", "\\mathbf{t}"),
("\uD42E", "\\mathbf{u}"),
("\uD42F", "\\mathbf{v}"),
("\uD430", "\\mathbf{w}"),
("\uD431", "\\mathbf{x}"),
("\uD432", "\\mathbf{y}"),
("\uD433", "\\mathbf{z}"),
("\uD434", "\\mathsl{A}"),
("\uD435", "\\mathsl{B}"),
("\uD436", "\\mathsl{C}"),
("\uD437", "\\mathsl{D}"),
("\uD438", "\\mathsl{E}"),
("\uD439", "\\mathsl{F}"),
("\uD43A", "\\mathsl{G}"),
("\uD43B", "\\mathsl{H}"),
("\uD43C", "\\mathsl{I}"),
("\uD43D", "\\mathsl{J}"),
("\uD43E", "\\mathsl{K}"),
("\uD43F", "\\mathsl{L}"),
("\uD440", "\\mathsl{M}"),
("\uD441", "\\mathsl{N}"),
("\uD442", "\\mathsl{O}"),
("\uD443", "\\mathsl{P}"),
("\uD444", "\\mathsl{Q}"),
("\uD445", "\\mathsl{R}"),
("\uD446", "\\mathsl{S}"),
("\uD447", "\\mathsl{T}"),
("\uD448", "\\mathsl{U}"),
("\uD449", "\\mathsl{V}"),
("\uD44A", "\\mathsl{W}"),
("\uD44B", "\\mathsl{X}"),
("\uD44C", "\\mathsl{Y}"),
("\uD44D", "\\mathsl{Z}"),
("\uD44E", "\\mathsl{a}"),
("\uD44F", "\\mathsl{b}"),
("\uD450", "\\mathsl{c}"),
("\uD451", "\\mathsl{d}"),
("\uD452", "\\mathsl{e}"),
("\uD453", "\\mathsl{f}"),
("\uD454", "\\mathsl{g}"),
("\uD456", "\\mathsl{i}"),
("\uD457", "\\mathsl{j}"),
("\uD458", "\\mathsl{k}"),
("\uD459", "\\mathsl{l}"),
("\uD45A", "\\mathsl{m}"),
("\uD45B", "\\mathsl{n}"),
("\uD45C", "\\mathsl{o}"),
("\uD45D", "\\mathsl{p}"),
("\uD45E", "\\mathsl{q}"),
("\uD45F", "\\mathsl{r}"),
("\uD460", "\\mathsl{s}"),
("\uD461", "\\mathsl{t}"),
("\uD462", "\\mathsl{u}"),
("\uD463", "\\mathsl{v}"),
("\uD464", "\\mathsl{w}"),
("\uD465", "\\mathsl{x}"),
("\uD466", "\\mathsl{y}"),
("\uD467", "\\mathsl{z}"),
("\uD468", "\\mathbit{A}"),
("\uD469", "\\mathbit{B}"),
("\uD46A", "\\mathbit{C}"),
("\uD46B", "\\mathbit{D}"),
("\uD46C", "\\mathbit{E}"),
("\uD46D", "\\mathbit{F}"),
("\uD46E", "\\mathbit{G}"),
("\uD46F", "\\mathbit{H}"),
("\uD470", "\\mathbit{I}"),
("\uD471", "\\mathbit{J}"),
("\uD472", "\\mathbit{K}"),
("\uD473", "\\mathbit{L}"),
("\uD474", "\\mathbit{M}"),
("\uD475", "\\mathbit{N}"),
("\uD476", "\\mathbit{O}"),
("\uD477", "\\mathbit{P}"),
("\uD478", "\\mathbit{Q}"),
("\uD479", "\\mathbit{R}"),
("\uD47A", "\\mathbit{S}"),
("\uD47B", "\\mathbit{T}"),
("\uD47C", "\\mathbit{U}"),
("\uD47D", "\\mathbit{V}"),
("\uD47E", "\\mathbit{W}"),
("\uD47F", "\\mathbit{X}"),
("\uD480", "\\mathbit{Y}"),
("\uD481", "\\mathbit{Z}"),
("\uD482", "\\mathbit{a}"),
("\uD483", "\\mathbit{b}"),
("\uD484", "\\mathbit{c}"),
("\uD485", "\\mathbit{d}"),
("\uD486", "\\mathbit{e}"),
("\uD487", "\\mathbit{f}"),
("\uD488", "\\mathbit{g}"),
("\uD489", "\\mathbit{h}"),
("\uD48A", "\\mathbit{i}"),
("\uD48B", "\\mathbit{j}"),
("\uD48C", "\\mathbit{k}"),
("\uD48D", "\\mathbit{l}"),
("\uD48E", "\\mathbit{m}"),
("\uD48F", "\\mathbit{n}"),
("\uD490", "\\mathbit{o}"),
("\uD491", "\\mathbit{p}"),
("\uD492", "\\mathbit{q}"),
("\uD493", "\\mathbit{r}"),
("\uD494", "\\mathbit{s}"),
("\uD495", "\\mathbit{t}"),
("\uD496", "\\mathbit{u}"),
("\uD497", "\\mathbit{v}"),
("\uD498", "\\mathbit{w}"),
("\uD499", "\\mathbit{x}"),
("\uD49A", "\\mathbit{y}"),
("\uD49B", "\\mathbit{z}"),
("\uD49C", "\\mathscr{A}"),
("\uD49E", "\\mathscr{C}"),
("\uD49F", "\\mathscr{D}"),
("\uD4A2", "\\mathscr{G}"),
("\uD4A5", "\\mathscr{J}"),
("\uD4A6", "\\mathscr{K}"),
("\uD4A9", "\\mathscr{N}"),
("\uD4AA", "\\mathscr{O}"),
("\uD4AB", "\\mathscr{P}"),
("\uD4AC", "\\mathscr{Q}"),
("\uD4AE", "\\mathscr{S}"),
("\uD4AF", "\\mathscr{T}"),
("\uD4B0", "\\mathscr{U}"),
("\uD4B1", "\\mathscr{V}"),
("\uD4B2", "\\mathscr{W}"),
("\uD4B3", "\\mathscr{X}"),
("\uD4B4", "\\mathscr{Y}"),
("\uD4B5", "\\mathscr{Z}"),
("\uD4B6", "\\mathscr{a}"),
("\uD4B7", "\\mathscr{b}"),
("\uD4B8", "\\mathscr{c}"),
("\uD4B9", "\\mathscr{d}"),
("\uD4BB", "\\mathscr{f}"),
("\uD4BD", "\\mathscr{h}"),
("\uD4BE", "\\mathscr{i}"),
("\uD4BF", "\\mathscr{j}"),
("\uD4C0", "\\mathscr{k}"),
("\uD4C1", "\\mathscr{l}"),
("\uD4C2", "\\mathscr{m}"),
("\uD4C3", "\\mathscr{n}"),
("\uD4C5", "\\mathscr{p}"),
("\uD4C6", "\\mathscr{q}"),
("\uD4C7", "\\mathscr{r}"),
("\uD4C8", "\\mathscr{s}"),
("\uD4C9", "\\mathscr{t}"),
("\uD4CA", "\\mathscr{u}"),
("\uD4CB", "\\mathscr{v}"),
("\uD4CC", "\\mathscr{w}"),
("\uD4CD", "\\mathscr{x}"),
("\uD4CE", "\\mathscr{y}"),
("\uD4CF", "\\mathscr{z}"),
("\uD4D0", "\\mathmit{A}"),
("\uD4D1", "\\mathmit{B}"),
("\uD4D2", "\\mathmit{C}"),
("\uD4D3", "\\mathmit{D}"),
("\uD4D4", "\\mathmit{E}"),
("\uD4D5", "\\mathmit{F}"),
("\uD4D6", "\\mathmit{G}"),
("\uD4D7", "\\mathmit{H}"),
("\uD4D8", "\\mathmit{I}"),
("\uD4D9", "\\mathmit{J}"),
("\uD4DA", "\\mathmit{K}"),
("\uD4DB", "\\mathmit{L}"),
("\uD4DC", "\\mathmit{M}"),
("\uD4DD", "\\mathmit{N}"),
("\uD4DE", "\\mathmit{O}"),
("\uD4DF", "\\mathmit{P}"),
("\uD4E0", "\\mathmit{Q}"),
("\uD4E1", "\\mathmit{R}"),
("\uD4E2", "\\mathmit{S}"),
("\uD4E3", "\\mathmit{T}"),
("\uD4E4", "\\mathmit{U}"),
("\uD4E5", "\\mathmit{V}"),
("\uD4E6", "\\mathmit{W}"),
("\uD4E7", "\\mathmit{X}"),
("\uD4E8", "\\mathmit{Y}"),
("\uD4E9", "\\mathmit{Z}"),
("\uD4EA", "\\mathmit{a}"),
("\uD4EB", "\\mathmit{b}"),
("\uD4EC", "\\mathmit{c}"),
("\uD4ED", "\\mathmit{d}"),
("\uD4EE", "\\mathmit{e}"),
("\uD4EF", "\\mathmit{f}"),
("\uD4F0", "\\mathmit{g}"),
("\uD4F1", "\\mathmit{h}"),
("\uD4F2", "\\mathmit{i}"),
("\uD4F3", "\\mathmit{j}"),
("\uD4F4", "\\mathmit{k}"),
("\uD4F5", "\\mathmit{l}"),
("\uD4F6", "\\mathmit{m}"),
("\uD4F7", "\\mathmit{n}"),
("\uD4F8", "\\mathmit{o}"),
("\uD4F9", "\\mathmit{p}"),
("\uD4FA", "\\mathmit{q}"),
("\uD4FB", "\\mathmit{r}"),
("\uD4FC", "\\mathmit{s}"),
("\uD4FD", "\\mathmit{t}"),
("\uD4FE", "\\mathmit{u}"),
("\uD4FF", "\\mathmit{v}"),
("\uD500", "\\mathmit{w}"),
("\uD501", "\\mathmit{x}"),
("\uD502", "\\mathmit{y}"),
("\uD503", "\\mathmit{z}"),
("\uD504", "\\mathfrak{A}"),
("\uD505", "\\mathfrak{B}"),
("\uD507", "\\mathfrak{D}"),
("\uD508", "\\mathfrak{E}"),
("\uD509", "\\mathfrak{F}"),
("\uD50A", "\\mathfrak{G}"),
("\uD50D", "\\mathfrak{J}"),
("\uD50E", "\\mathfrak{K}"),
("\uD50F", "\\mathfrak{L}"),
("\uD510", "\\mathfrak{M}"),
("\uD511", "\\mathfrak{N}"),
("\uD512", "\\mathfrak{O}"),
("\uD513", "\\mathfrak{P}"),
("\uD514", "\\mathfrak{Q}"),
("\uD516", "\\mathfrak{S}"),
("\uD517", "\\mathfrak{T}"),
("\uD518", "\\mathfrak{U}"),
("\uD519", "\\mathfrak{V}"),
("\uD51A", "\\mathfrak{W}"),
("\uD51B", "\\mathfrak{X}"),
("\uD51C", "\\mathfrak{Y}"),
("\uD51E", "\\mathfrak{a}"),
("\uD51F", "\\mathfrak{b}"),
("\uD520", "\\mathfrak{c}"),
("\uD521", "\\mathfrak{d}"),
("\uD522", "\\mathfrak{e}"),
("\uD523", "\\mathfrak{f}"),
("\uD524", "\\mathfrak{g}"),
("\uD525", "\\mathfrak{h}"),
("\uD526", "\\mathfrak{i}"),
("\uD527", "\\mathfrak{j}"),
("\uD528", "\\mathfrak{k}"),
("\uD529", "\\mathfrak{l}"),
("\uD52A", "\\mathfrak{m}"),
("\uD52B", "\\mathfrak{n}"),
("\uD52C", "\\mathfrak{o}"),
("\uD52D", "\\mathfrak{p}"),
("\uD52E", "\\mathfrak{q}"),
("\uD52F", "\\mathfrak{r}"),
("\uD530", "\\mathfrak{s}"),
("\uD531", "\\mathfrak{t}"),
("\uD532", "\\mathfrak{u}"),
("\uD533", "\\mathfrak{v}"),
("\uD534", "\\mathfrak{w}"),
("\uD535", "\\mathfrak{x}"),
("\uD536", "\\mathfrak{y}"),
("\uD537", "\\mathfrak{z}"),
("\uD538", "\\mathbb{A}"),
("\uD539", "\\mathbb{B}"),
("\uD53B", "\\mathbb{D}"),
("\uD53C", "\\mathbb{E}"),
("\uD53D", "\\mathbb{F}"),
("\uD53E", "\\mathbb{G}"),
("\uD540", "\\mathbb{I}"),
("\uD541", "\\mathbb{J}"),
("\uD542", "\\mathbb{K}"),
("\uD543", "\\mathbb{L}"),
("\uD544", "\\mathbb{M}"),
("\uD546", "\\mathbb{O}"),
("\uD54A", "\\mathbb{S}"),
("\uD54B", "\\mathbb{T}"),
("\uD54C", "\\mathbb{U}"),
("\uD54D", "\\mathbb{V}"),
("\uD54E", "\\mathbb{W}"),
("\uD54F", "\\mathbb{X}"),
("\uD550", "\\mathbb{Y}"),
("\uD552", "\\mathbb{a}"),
("\uD553", "\\mathbb{b}"),
("\uD554", "\\mathbb{c}"),
("\uD555", "\\mathbb{d}"),
("\uD556", "\\mathbb{e}"),
("\uD557", "\\mathbb{f}"),
("\uD558", "\\mathbb{g}"),
("\uD559", "\\mathbb{h}"),
("\uD55A", "\\mathbb{i}"),
("\uD55B", "\\mathbb{j}"),
("\uD55C", "\\mathbb{k}"),
("\uD55D", "\\mathbb{l}"),
("\uD55E", "\\mathbb{m}"),
("\uD55F", "\\mathbb{n}"),
("\uD560", "\\mathbb{o}"),
("\uD561", "\\mathbb{p}"),
("\uD562", "\\mathbb{q}"),
("\uD563", "\\mathbb{r}"),
("\uD564", "\\mathbb{s}"),
("\uD565", "\\mathbb{t}"),
("\uD566", "\\mathbb{u}"),
("\uD567", "\\mathbb{v}"),
("\uD568", "\\mathbb{w}"),
("\uD569", "\\mathbb{x}"),
("\uD56A", "\\mathbb{y}"),
("\uD56B", "\\mathbb{z}"),
("\uD56C", "\\mathslbb{A}"),
("\uD56D", "\\mathslbb{B}"),
("\uD56E", "\\mathslbb{C}"),
("\uD56F", "\\mathslbb{D}"),
("\uD570", "\\mathslbb{E}"),
("\uD571", "\\mathslbb{F}"),
("\uD572", "\\mathslbb{G}"),
("\uD573", "\\mathslbb{H}"),
("\uD574", "\\mathslbb{I}"),
("\uD575", "\\mathslbb{J}"),
("\uD576", "\\mathslbb{K}"),
("\uD577", "\\mathslbb{L}"),
("\uD578", "\\mathslbb{M}"),
("\uD579", "\\mathslbb{N}"),
("\uD57A", "\\mathslbb{O}"),
("\uD57B", "\\mathslbb{P}"),
("\uD57C", "\\mathslbb{Q}"),
("\uD57D", "\\mathslbb{R}"),
("\uD57E", "\\mathslbb{S}"),
("\uD57F", "\\mathslbb{T}"),
("\uD580", "\\mathslbb{U}"),
("\uD581", "\\mathslbb{V}"),
("\uD582", "\\mathslbb{W}"),
("\uD583", "\\mathslbb{X}"),
("\uD584", "\\mathslbb{Y}"),
("\uD585", "\\mathslbb{Z}"),
("\uD586", "\\mathslbb{a}"),
("\uD587", "\\mathslbb{b}"),
("\uD588", "\\mathslbb{c}"),
("\uD589", "\\mathslbb{d}"),
("\uD58A", "\\mathslbb{e}"),
("\uD58B", "\\mathslbb{f}"),
("\uD58C", "\\mathslbb{g}"),
("\uD58D", "\\mathslbb{h}"),
("\uD58E", "\\mathslbb{i}"),
("\uD58F", "\\mathslbb{j}"),
("\uD590", "\\mathslbb{k}"),
("\uD591", "\\mathslbb{l}"),
("\uD592", "\\mathslbb{m}"),
("\uD593", "\\mathslbb{n}"),
("\uD594", "\\mathslbb{o}"),
("\uD595", "\\mathslbb{p}"),
("\uD596", "\\mathslbb{q}"),
("\uD597", "\\mathslbb{r}"),
("\uD598", "\\mathslbb{s}"),
("\uD599", "\\mathslbb{t}"),
("\uD59A", "\\mathslbb{u}"),
("\uD59B", "\\mathslbb{v}"),
("\uD59C", "\\mathslbb{w}"),
("\uD59D", "\\mathslbb{x}"),
("\uD59E", "\\mathslbb{y}"),
("\uD59F", "\\mathslbb{z}"),
("\uD5A0", "\\mathsf{A}"),
("\uD5A1", "\\mathsf{B}"),
("\uD5A2", "\\mathsf{C}"),
("\uD5A3", "\\mathsf{D}"),
("\uD5A4", "\\mathsf{E}"),
("\uD5A5", "\\mathsf{F}"),
("\uD5A6", "\\mathsf{G}"),
("\uD5A7", "\\mathsf{H}"),
("\uD5A8", "\\mathsf{I}"),
("\uD5A9", "\\mathsf{J}"),
("\uD5AA", "\\mathsf{K}"),
("\uD5AB", "\\mathsf{L}"),
("\uD5AC", "\\mathsf{M}"),
("\uD5AD", "\\mathsf{N}"),
("\uD5AE", "\\mathsf{O}"),
("\uD5AF", "\\mathsf{P}"),
("\uD5B0", "\\mathsf{Q}"),
("\uD5B1", "\\mathsf{R}"),
("\uD5B2", "\\mathsf{S}"),
("\uD5B3", "\\mathsf{T}"),
("\uD5B4", "\\mathsf{U}"),
("\uD5B5", "\\mathsf{V}"),
("\uD5B6", "\\mathsf{W}"),
("\uD5B7", "\\mathsf{X}"),
("\uD5B8", "\\mathsf{Y}"),
("\uD5B9", "\\mathsf{Z}"),
("\uD5BA", "\\mathsf{a}"),
("\uD5BB", "\\mathsf{b}"),
("\uD5BC", "\\mathsf{c}"),
("\uD5BD", "\\mathsf{d}"),
("\uD5BE", "\\mathsf{e}"),
("\uD5BF", "\\mathsf{f}"),
("\uD5C0", "\\mathsf{g}"),
("\uD5C1", "\\mathsf{h}"),
("\uD5C2", "\\mathsf{i}"),
("\uD5C3", "\\mathsf{j}"),
("\uD5C4", "\\mathsf{k}"),
("\uD5C5", "\\mathsf{l}"),
("\uD5C6", "\\mathsf{m}"),
("\uD5C7", "\\mathsf{n}"),
("\uD5C8", "\\mathsf{o}"),
("\uD5C9", "\\mathsf{p}"),
("\uD5CA", "\\mathsf{q}"),
("\uD5CB", "\\mathsf{r}"),
("\uD5CC", "\\mathsf{s}"),
("\uD5CD", "\\mathsf{t}"),
("\uD5CE", "\\mathsf{u}"),
("\uD5CF", "\\mathsf{v}"),
("\uD5D0", "\\mathsf{w}"),
("\uD5D1", "\\mathsf{x}"),
("\uD5D2", "\\mathsf{y}"),
("\uD5D3", "\\mathsf{z}"),
("\uD5D4", "\\mathsfbf{A}"),
("\uD5D5", "\\mathsfbf{B}"),
("\uD5D6", "\\mathsfbf{C}"),
("\uD5D7", "\\mathsfbf{D}"),
("\uD5D8", "\\mathsfbf{E}"),
("\uD5D9", "\\mathsfbf{F}"),
("\uD5DA", "\\mathsfbf{G}"),
("\uD5DB", "\\mathsfbf{H}"),
("\uD5DC", "\\mathsfbf{I}"),
("\uD5DD", "\\mathsfbf{J}"),
("\uD5DE", "\\mathsfbf{K}"),
("\uD5DF", "\\mathsfbf{L}"),
("\uD5E0", "\\mathsfbf{M}"),
("\uD5E1", "\\mathsfbf{N}"),
("\uD5E2", "\\mathsfbf{O}"),
("\uD5E3", "\\mathsfbf{P}"),
("\uD5E4", "\\mathsfbf{Q}"),
("\uD5E5", "\\mathsfbf{R}"),
("\uD5E6", "\\mathsfbf{S}"),
("\uD5E7", "\\mathsfbf{T}"),
("\uD5E8", "\\mathsfbf{U}"),
("\uD5E9", "\\mathsfbf{V}"),
("\uD5EA", "\\mathsfbf{W}"),
("\uD5EB", "\\mathsfbf{X}"),
("\uD5EC", "\\mathsfbf{Y}"),
("\uD5ED", "\\mathsfbf{Z}"),
("\uD5EE", "\\mathsfbf{a}"),
("\uD5EF", "\\mathsfbf{b}"),
("\uD5F0", "\\mathsfbf{c}"),
("\uD5F1", "\\mathsfbf{d}"),
("\uD5F2", "\\mathsfbf{e}"),
("\uD5F3", "\\mathsfbf{f}"),
("\uD5F4", "\\mathsfbf{g}"),
("\uD5F5", "\\mathsfbf{h}"),
("\uD5F6", "\\mathsfbf{i}"),
("\uD5F7", "\\mathsfbf{j}"),
("\uD5F8", "\\mathsfbf{k}"),
("\uD5F9", "\\mathsfbf{l}"),
("\uD5FA", "\\mathsfbf{m}"),
("\uD5FB", "\\mathsfbf{n}"),
("\uD5FC", "\\mathsfbf{o}"),
("\uD5FD", "\\mathsfbf{p}"),
("\uD5FE", "\\mathsfbf{q}"),
("\uD5FF", "\\mathsfbf{r}"),
("\uD600", "\\mathsfbf{s}"),
("\uD601", "\\mathsfbf{t}"),
("\uD602", "\\mathsfbf{u}"),
("\uD603", "\\mathsfbf{v}"),
("\uD604", "\\mathsfbf{w}"),
("\uD605", "\\mathsfbf{x}"),
("\uD606", "\\mathsfbf{y}"),
("\uD607", "\\mathsfbf{z}"),
("\uD608", "\\mathsfsl{A}"),
("\uD609", "\\mathsfsl{B}"),
("\uD60A", "\\mathsfsl{C}"),
("\uD60B", "\\mathsfsl{D}"),
("\uD60C", "\\mathsfsl{E}"),
("\uD60D", "\\mathsfsl{F}"),
("\uD60E", "\\mathsfsl{G}"),
("\uD60F", "\\mathsfsl{H}"),
("\uD610", "\\mathsfsl{I}"),
("\uD611", "\\mathsfsl{J}"),
("\uD612", "\\mathsfsl{K}"),
("\uD613", "\\mathsfsl{L}"),
("\uD614", "\\mathsfsl{M}"),
("\uD615", "\\mathsfsl{N}"),
("\uD616", "\\mathsfsl{O}"),
("\uD617", "\\mathsfsl{P}"),
("\uD618", "\\mathsfsl{Q}"),
("\uD619", "\\mathsfsl{R}"),
("\uD61A", "\\mathsfsl{S}"),
("\uD61B", "\\mathsfsl{T}"),
("\uD61C", "\\mathsfsl{U}"),
("\uD61D", "\\mathsfsl{V}"),
("\uD61E", "\\mathsfsl{W}"),
("\uD61F", "\\mathsfsl{X}"),
("\uD620", "\\mathsfsl{Y}"),
("\uD621", "\\mathsfsl{Z}"),
("\uD622", "\\mathsfsl{a}"),
("\uD623", "\\mathsfsl{b}"),
("\uD624", "\\mathsfsl{c}"),
("\uD625", "\\mathsfsl{d}"),
("\uD626", "\\mathsfsl{e}"),
("\uD627", "\\mathsfsl{f}"),
("\uD628", "\\mathsfsl{g}"),
("\uD629", "\\mathsfsl{h}"),
("\uD62A", "\\mathsfsl{i}"),
("\uD62B", "\\mathsfsl{j}"),
("\uD62C", "\\mathsfsl{k}"),
("\uD62D", "\\mathsfsl{l}"),
("\uD62E", "\\mathsfsl{m}"),
("\uD62F", "\\mathsfsl{n}"),
("\uD630", "\\mathsfsl{o}"),
("\uD631", "\\mathsfsl{p}"),
("\uD632", "\\mathsfsl{q}"),
("\uD633", "\\mathsfsl{r}"),
("\uD634", "\\mathsfsl{s}"),
("\uD635", "\\mathsfsl{t}"),
("\uD636", "\\mathsfsl{u}"),
("\uD637", "\\mathsfsl{v}"),
("\uD638", "\\mathsfsl{w}"),
("\uD639", "\\mathsfsl{x}"),
("\uD63A", "\\mathsfsl{y}"),
("\uD63B", "\\mathsfsl{z}"),
("\uD63C", "\\mathsfbfsl{A}"),
("\uD63D", "\\mathsfbfsl{B}"),
("\uD63E", "\\mathsfbfsl{C}"),
("\uD63F", "\\mathsfbfsl{D}"),
("\uD640", "\\mathsfbfsl{E}"),
("\uD641", "\\mathsfbfsl{F}"),
("\uD642", "\\mathsfbfsl{G}"),
("\uD643", "\\mathsfbfsl{H}"),
("\uD644", "\\mathsfbfsl{I}"),
("\uD645", "\\mathsfbfsl{J}"),
("\uD646", "\\mathsfbfsl{K}"),
("\uD647", "\\mathsfbfsl{L}"),
("\uD648", "\\mathsfbfsl{M}"),
("\uD649", "\\mathsfbfsl{N}"),
("\uD64A", "\\mathsfbfsl{O}"),
("\uD64B", "\\mathsfbfsl{P}"),
("\uD64C", "\\mathsfbfsl{Q}"),
("\uD64D", "\\mathsfbfsl{R}"),
("\uD64E", "\\mathsfbfsl{S}"),
("\uD64F", "\\mathsfbfsl{T}"),
("\uD650", "\\mathsfbfsl{U}"),
("\uD651", "\\mathsfbfsl{V}"),
("\uD652", "\\mathsfbfsl{W}"),
("\uD653", "\\mathsfbfsl{X}"),
("\uD654", "\\mathsfbfsl{Y}"),
("\uD655", "\\mathsfbfsl{Z}"),
("\uD656", "\\mathsfbfsl{a}"),
("\uD657", "\\mathsfbfsl{b}"),
("\uD658", "\\mathsfbfsl{c}"),
("\uD659", "\\mathsfbfsl{d}"),
("\uD65A", "\\mathsfbfsl{e}"),
("\uD65B", "\\mathsfbfsl{f}"),
("\uD65C", "\\mathsfbfsl{g}"),
("\uD65D", "\\mathsfbfsl{h}"),
("\uD65E", "\\mathsfbfsl{i}"),
("\uD65F", "\\mathsfbfsl{j}"),
("\uD660", "\\mathsfbfsl{k}"),
("\uD661", "\\mathsfbfsl{l}"),
("\uD662", "\\mathsfbfsl{m}"),
("\uD663", "\\mathsfbfsl{n}"),
("\uD664", "\\mathsfbfsl{o}"),
("\uD665", "\\mathsfbfsl{p}"),
("\uD666", "\\mathsfbfsl{q}"),
("\uD667", "\\mathsfbfsl{r}"),
("\uD668", "\\mathsfbfsl{s}"),
("\uD669", "\\mathsfbfsl{t}"),
("\uD66A", "\\mathsfbfsl{u}"),
("\uD66B", "\\mathsfbfsl{v}"),
("\uD66C", "\\mathsfbfsl{w}"),
("\uD66D", "\\mathsfbfsl{x}"),
("\uD66E", "\\mathsfbfsl{y}"),
("\uD66F", "\\mathsfbfsl{z}"),
("\uD670", "\\mathtt{A}"),
("\uD671", "\\mathtt{B}"),
("\uD672", "\\mathtt{C}"),
("\uD673", "\\mathtt{D}"),
("\uD674", "\\mathtt{E}"),
("\uD675", "\\mathtt{F}"),
("\uD676", "\\mathtt{G}"),
("\uD677", "\\mathtt{H}"),
("\uD678", "\\mathtt{I}"),
("\uD679", "\\mathtt{J}"),
("\uD67A", "\\mathtt{K}"),
("\uD67B", "\\mathtt{L}"),
("\uD67C", "\\mathtt{M}"),
("\uD67D", "\\mathtt{N}"),
("\uD67E", "\\mathtt{O}"),
("\uD67F", "\\mathtt{P}"),
("\uD680", "\\mathtt{Q}"),
("\uD681", "\\mathtt{R}"),
("\uD682", "\\mathtt{S}"),
("\uD683", "\\mathtt{T}"),
("\uD684", "\\mathtt{U}"),
("\uD685", "\\mathtt{V}"),
("\uD686", "\\mathtt{W}"),
("\uD687", "\\mathtt{X}"),
("\uD688", "\\mathtt{Y}"),
("\uD689", "\\mathtt{Z}"),
("\uD68A", "\\mathtt{a}"),
("\uD68B", "\\mathtt{b}"),
("\uD68C", "\\mathtt{c}"),
("\uD68D", "\\mathtt{d}"),
("\uD68E", "\\mathtt{e}"),
("\uD68F", "\\mathtt{f}"),
("\uD690", "\\mathtt{g}"),
("\uD691", "\\mathtt{h}"),
("\uD692", "\\mathtt{i}"),
("\uD693", "\\mathtt{j}"),
("\uD694", "\\mathtt{k}"),
("\uD695", "\\mathtt{l}"),
("\uD696", "\\mathtt{m}"),
("\uD697", "\\mathtt{n}"),
("\uD698", "\\mathtt{o}"),
("\uD699", "\\mathtt{p}"),
("\uD69A", "\\mathtt{q}"),
("\uD69B", "\\mathtt{r}"),
("\uD69C", "\\mathtt{s}"),
("\uD69D", "\\mathtt{t}"),
("\uD69E", "\\mathtt{u}"),
("\uD69F", "\\mathtt{v}"),
("\uD6A0", "\\mathtt{w}"),
("\uD6A1", "\\mathtt{x}"),
("\uD6A2", "\\mathtt{y}"),
("\uD6A3", "\\mathtt{z}"),
("\uD6A8", "\\mathbf{\\Alpha}"),
("\uD6A9", "\\mathbf{\\Beta}"),
("\uD6AA", "\\mathbf{\\Gamma}"),
("\uD6AB", "\\mathbf{\\Delta}"),
("\uD6AC", "\\mathbf{\\Epsilon}"),
("\uD6AD", "\\mathbf{\\Zeta}"),
("\uD6AE", "\\mathbf{\\Eta}"),
("\uD6AF", "\\mathbf{\\Theta}"),
("\uD6B0", "\\mathbf{\\Iota}"),
("\uD6B1", "\\mathbf{\\Kappa}"),
("\uD6B2", "\\mathbf{\\Lambda}"),
("\uD6B5", "\\mathbf{\\Xi}"),
("\uD6B7", "\\mathbf{\\Pi}"),
("\uD6B8", "\\mathbf{\\Rho}"),
("\uD6B9", "\\mathbf{\\vartheta}"),
("\uD6BA", "\\mathbf{\\Sigma}"),
("\uD6BB", "\\mathbf{\\Tau}"),
("\uD6BC", "\\mathbf{\\Upsilon}"),
("\uD6BD", "\\mathbf{\\Phi}"),
("\uD6BE", "\\mathbf{\\Chi}"),
("\uD6BF", "\\mathbf{\\Psi}"),
("\uD6C0", "\\mathbf{\\Omega}"),
("\uD6C1", "\\mathbf{\\nabla}"),
("\uD6C2", "\\mathbf{\\Alpha}"),
("\uD6C3", "\\mathbf{\\Beta}"),
("\uD6C4", "\\mathbf{\\Gamma}"),
("\uD6C5", "\\mathbf{\\Delta}"),
("\uD6C6", "\\mathbf{\\Epsilon}"),
("\uD6C7", "\\mathbf{\\Zeta}"),
("\uD6C8", "\\mathbf{\\Eta}"),
("\uD6C9", "\\mathbf{\\theta}"),
("\uD6CA", "\\mathbf{\\Iota}"),
("\uD6CB", "\\mathbf{\\Kappa}"),
("\uD6CC", "\\mathbf{\\Lambda}"),
("\uD6CF", "\\mathbf{\\Xi}"),
("\uD6D1", "\\mathbf{\\Pi}"),
("\uD6D2", "\\mathbf{\\Rho}"),
("\uD6D3", "\\mathbf{\\varsigma}"),
("\uD6D4", "\\mathbf{\\Sigma}"),
("\uD6D5", "\\mathbf{\\Tau}"),
("\uD6D6", "\\mathbf{\\Upsilon}"),
("\uD6D7", "\\mathbf{\\Phi}"),
("\uD6D8", "\\mathbf{\\Chi}"),
("\uD6D9", "\\mathbf{\\Psi}"),
("\uD6DA", "\\mathbf{\\Omega}"),
("\uD6DB", "\\partial "),
("\uD6DC", "\\in"),
("\uD6DD", "\\mathbf{\\vartheta}"),
("\uD6DE", "\\mathbf{\\varkappa}"),
("\uD6DF", "\\mathbf{\\phi}"),
("\uD6E0", "\\mathbf{\\varrho}"),
("\uD6E1", "\\mathbf{\\varpi}"),
("\uD6E2", "\\mathsl{\\Alpha}"),
("\uD6E3", "\\mathsl{\\Beta}"),
("\uD6E4", "\\mathsl{\\Gamma}"),
("\uD6E5", "\\mathsl{\\Delta}"),
("\uD6E6", "\\mathsl{\\Epsilon}"),
("\uD6E7", "\\mathsl{\\Zeta}"),
("\uD6E8", "\\mathsl{\\Eta}"),
("\uD6E9", "\\mathsl{\\Theta}"),
("\uD6EA", "\\mathsl{\\Iota}"),
("\uD6EB", "\\mathsl{\\Kappa}"),
("\uD6EC", "\\mathsl{\\Lambda}"),
("\uD6EF", "\\mathsl{\\Xi}"),
("\uD6F1", "\\mathsl{\\Pi}"),
("\uD6F2", "\\mathsl{\\Rho}"),
("\uD6F3", "\\mathsl{\\vartheta}"),
("\uD6F4", "\\mathsl{\\Sigma}"),
("\uD6F5", "\\mathsl{\\Tau}"),
("\uD6F6", "\\mathsl{\\Upsilon}"),
("\uD6F7", "\\mathsl{\\Phi}"),
("\uD6F8", "\\mathsl{\\Chi}"),
("\uD6F9", "\\mathsl{\\Psi}"),
("\uD6FA", "\\mathsl{\\Omega}"),
("\uD6FB", "\\mathsl{\\nabla}"),
("\uD6FC", "\\mathsl{\\Alpha}"),
("\uD6FD", "\\mathsl{\\Beta}"),
("\uD6FE", "\\mathsl{\\Gamma}"),
("\uD6FF", "\\mathsl{\\Delta}"),
("\uD700", "\\mathsl{\\Epsilon}"),
("\uD701", "\\mathsl{\\Zeta}"),
("\uD702", "\\mathsl{\\Eta}"),
("\uD703", "\\mathsl{\\Theta}"),
("\uD704", "\\mathsl{\\Iota}"),
("\uD705", "\\mathsl{\\Kappa}"),
("\uD706", "\\mathsl{\\Lambda}"),
("\uD709", "\\mathsl{\\Xi}"),
("\uD70B", "\\mathsl{\\Pi}"),
("\uD70C", "\\mathsl{\\Rho}"),
("\uD70D", "\\mathsl{\\varsigma}"),
("\uD70E", "\\mathsl{\\Sigma}"),
("\uD70F", "\\mathsl{\\Tau}"),
("\uD710", "\\mathsl{\\Upsilon}"),
("\uD711", "\\mathsl{\\Phi}"),
("\uD712", "\\mathsl{\\Chi}"),
("\uD713", "\\mathsl{\\Psi}"),
("\uD714", "\\mathsl{\\Omega}"),
("\uD715", "\\partial "),
("\uD716", "\\in"),
("\uD717", "\\mathsl{\\vartheta}"),
("\uD718", "\\mathsl{\\varkappa}"),
("\uD719", "\\mathsl{\\phi}"),
("\uD71A", "\\mathsl{\\varrho}"),
("\uD71B", "\\mathsl{\\varpi}"),
("\uD71C", "\\mathbit{\\Alpha}"),
("\uD71D", "\\mathbit{\\Beta}"),
("\uD71E", "\\mathbit{\\Gamma}"),
("\uD71F", "\\mathbit{\\Delta}"),
("\uD720", "\\mathbit{\\Epsilon}"),
("\uD721", "\\mathbit{\\Zeta}"),
("\uD722", "\\mathbit{\\Eta}"),
("\uD723", "\\mathbit{\\Theta}"),
("\uD724", "\\mathbit{\\Iota}"),
("\uD725", "\\mathbit{\\Kappa}"),
("\uD726", "\\mathbit{\\Lambda}"),
("\uD729", "\\mathbit{\\Xi}"),
("\uD72B", "\\mathbit{\\Pi}"),
("\uD72C", "\\mathbit{\\Rho}"),
("\uD72D", "\\mathbit{O}"),
("\uD72E", "\\mathbit{\\Sigma}"),
("\uD72F", "\\mathbit{\\Tau}"),
("\uD730", "\\mathbit{\\Upsilon}"),
("\uD731", "\\mathbit{\\Phi}"),
("\uD732", "\\mathbit{\\Chi}"),
("\uD733", "\\mathbit{\\Psi}"),
("\uD734", "\\mathbit{\\Omega}"),
("\uD735", "\\mathbit{\\nabla}"),
("\uD736", "\\mathbit{\\Alpha}"),
("\uD737", "\\mathbit{\\Beta}"),
("\uD738", "\\mathbit{\\Gamma}"),
("\uD739", "\\mathbit{\\Delta}"),
("\uD73A", "\\mathbit{\\Epsilon}"),
("\uD73B", "\\mathbit{\\Zeta}"),
("\uD73C", "\\mathbit{\\Eta}"),
("\uD73D", "\\mathbit{\\Theta}"),
("\uD73E", "\\mathbit{\\Iota}"),
("\uD73F", "\\mathbit{\\Kappa}"),
("\uD740", "\\mathbit{\\Lambda}"),
("\uD743", "\\mathbit{\\Xi}"),
("\uD745", "\\mathbit{\\Pi}"),
("\uD746", "\\mathbit{\\Rho}"),
("\uD747", "\\mathbit{\\varsigma}"),
("\uD748", "\\mathbit{\\Sigma}"),
("\uD749", "\\mathbit{\\Tau}"),
("\uD74A", "\\mathbit{\\Upsilon}"),
("\uD74B", "\\mathbit{\\Phi}"),
("\uD74C", "\\mathbit{\\Chi}"),
("\uD74D", "\\mathbit{\\Psi}"),
("\uD74E", "\\mathbit{\\Omega}"),
("\uD74F", "\\partial "),
("\uD750", "\\in"),
("\uD751", "\\mathbit{\\vartheta}"),
("\uD752", "\\mathbit{\\varkappa}"),
("\uD753", "\\mathbit{\\phi}"),
("\uD754", "\\mathbit{\\varrho}"),
("\uD755", "\\mathbit{\\varpi}"),
("\uD756", "\\mathsfbf{\\Alpha}"),
("\uD757", "\\mathsfbf{\\Beta}"),
("\uD758", "\\mathsfbf{\\Gamma}"),
("\uD759", "\\mathsfbf{\\Delta}"),
("\uD75A", "\\mathsfbf{\\Epsilon}"),
("\uD75B", "\\mathsfbf{\\Zeta}"),
("\uD75C", "\\mathsfbf{\\Eta}"),
("\uD75D", "\\mathsfbf{\\Theta}"),
("\uD75E", "\\mathsfbf{\\Iota}"),
("\uD75F", "\\mathsfbf{\\Kappa}"),
("\uD760", "\\mathsfbf{\\Lambda}"),
("\uD763", "\\mathsfbf{\\Xi}"),
("\uD765", "\\mathsfbf{\\Pi}"),
("\uD766", "\\mathsfbf{\\Rho}"),
("\uD767", "\\mathsfbf{\\vartheta}"),
("\uD768", "\\mathsfbf{\\Sigma}"),
("\uD769", "\\mathsfbf{\\Tau}"),
("\uD76A", "\\mathsfbf{\\Upsilon}"),
("\uD76B", "\\mathsfbf{\\Phi}"),
("\uD76C", "\\mathsfbf{\\Chi}"),
("\uD76D", "\\mathsfbf{\\Psi}"),
("\uD76E", "\\mathsfbf{\\Omega}"),
("\uD76F", "\\mathsfbf{\\nabla}"),
("\uD770", "\\mathsfbf{\\Alpha}"),
("\uD771", "\\mathsfbf{\\Beta}"),
("\uD772", "\\mathsfbf{\\Gamma}"),
("\uD773", "\\mathsfbf{\\Delta}"),
("\uD774", "\\mathsfbf{\\Epsilon}"),
("\uD775", "\\mathsfbf{\\Zeta}"),
("\uD776", "\\mathsfbf{\\Eta}"),
("\uD777", "\\mathsfbf{\\Theta}"),
("\uD778", "\\mathsfbf{\\Iota}"),
("\uD779", "\\mathsfbf{\\Kappa}"),
("\uD77A", "\\mathsfbf{\\Lambda}"),
("\uD77D", "\\mathsfbf{\\Xi}"),
("\uD77F", "\\mathsfbf{\\Pi}"),
("\uD780", "\\mathsfbf{\\Rho}"),
("\uD781", "\\mathsfbf{\\varsigma}"),
("\uD782", "\\mathsfbf{\\Sigma}"),
("\uD783", "\\mathsfbf{\\Tau}"),
("\uD784", "\\mathsfbf{\\Upsilon}"),
("\uD785", "\\mathsfbf{\\Phi}"),
("\uD786", "\\mathsfbf{\\Chi}"),
("\uD787", "\\mathsfbf{\\Psi}"),
("\uD788", "\\mathsfbf{\\Omega}"),
("\uD789", "\\partial "),
("\uD78A", "\\in"),
("\uD78B", "\\mathsfbf{\\vartheta}"),
("\uD78C", "\\mathsfbf{\\varkappa}"),
("\uD78D", "\\mathsfbf{\\phi}"),
("\uD78E", "\\mathsfbf{\\varrho}"),
("\uD78F", "\\mathsfbf{\\varpi}"),
("\uD790", "\\mathsfbfsl{\\Alpha}"),
("\uD791", "\\mathsfbfsl{\\Beta}"),
("\uD792", "\\mathsfbfsl{\\Gamma}"),
("\uD793", "\\mathsfbfsl{\\Delta}"),
("\uD794", "\\mathsfbfsl{\\Epsilon}"),
("\uD795", "\\mathsfbfsl{\\Zeta}"),
("\uD796", "\\mathsfbfsl{\\Eta}"),
("\uD797", "\\mathsfbfsl{\\vartheta}"),
("\uD798", "\\mathsfbfsl{\\Iota}"),
("\uD799", "\\mathsfbfsl{\\Kappa}"),
("\uD79A", "\\mathsfbfsl{\\Lambda}"),
("\uD79D", "\\mathsfbfsl{\\Xi}"),
("\uD79F", "\\mathsfbfsl{\\Pi}"),
("\uD7A0", "\\mathsfbfsl{\\Rho}"),
("\uD7A1", "\\mathsfbfsl{\\vartheta}"),
("\uD7A2", "\\mathsfbfsl{\\Sigma}"),
("\uD7A3", "\\mathsfbfsl{\\Tau}"),
("\uD7A4", "\\mathsfbfsl{\\Upsilon}"),
("\uD7A5", "\\mathsfbfsl{\\Phi}"),
("\uD7A6", "\\mathsfbfsl{\\Chi}"),
("\uD7A7", "\\mathsfbfsl{\\Psi}"),
("\uD7A8", "\\mathsfbfsl{\\Omega}"),
("\uD7A9", "\\mathsfbfsl{\\nabla}"),
("\uD7AA", "\\mathsfbfsl{\\Alpha}"),
("\uD7AB", "\\mathsfbfsl{\\Beta}"),
("\uD7AC", "\\mathsfbfsl{\\Gamma}"),
("\uD7AD", "\\mathsfbfsl{\\Delta}"),
("\uD7AE", "\\mathsfbfsl{\\Epsilon}"),
("\uD7AF", "\\mathsfbfsl{\\Zeta}"),
("\uD7B0", "\\mathsfbfsl{\\Eta}"),
("\uD7B1", "\\mathsfbfsl{\\vartheta}"),
("\uD7B2", "\\mathsfbfsl{\\Iota}"),
("\uD7B3", "\\mathsfbfsl{\\Kappa}"),
("\uD7B4", "\\mathsfbfsl{\\Lambda}"),
("\uD7B7", "\\mathsfbfsl{\\Xi}"),
("\uD7B9", "\\mathsfbfsl{\\Pi}"),
("\uD7BA", "\\mathsfbfsl{\\Rho}"),
("\uD7BB", "\\mathsfbfsl{\\varsigma}"),
("\uD7BC", "\\mathsfbfsl{\\Sigma}"),
("\uD7BD", "\\mathsfbfsl{\\Tau}"),
("\uD7BE", "\\mathsfbfsl{\\Upsilon}"),
("\uD7BF", "\\mathsfbfsl{\\Phi}"),
("\uD7C0", "\\mathsfbfsl{\\Chi}"),
("\uD7C1", "\\mathsfbfsl{\\Psi}"),
("\uD7C2", "\\mathsfbfsl{\\Omega}"),
("\uD7C3", "\\partial "),
("\uD7C4", "\\in"),
("\uD7C5", "\\mathsfbfsl{\\vartheta}"),
("\uD7C6", "\\mathsfbfsl{\\varkappa}"),
("\uD7C7", "\\mathsfbfsl{\\phi}"),
("\uD7C8", "\\mathsfbfsl{\\varrho}"),
("\uD7C9", "\\mathsfbfsl{\\varpi}"),
("\uD7CE", "\\mathbf{0}"),
("\uD7CF", "\\mathbf{1}"),
("\uD7D0", "\\mathbf{2}"),
("\uD7D1", "\\mathbf{3}"),
("\uD7D2", "\\mathbf{4}"),
("\uD7D3", "\\mathbf{5}"),
("\uD7D4", "\\mathbf{6}"),
("\uD7D5", "\\mathbf{7}"),
("\uD7D6", "\\mathbf{8}"),
("\uD7D7", "\\mathbf{9}"),
("\uD7D8", "\\mathbb{0}"),
("\uD7D9", "\\mathbb{1}"),
("\uD7DA", "\\mathbb{2}"),
("\uD7DB", "\\mathbb{3}"),
("\uD7DC", "\\mathbb{4}"),
("\uD7DD", "\\mathbb{5}"),
("\uD7DE", "\\mathbb{6}"),
("\uD7DF", "\\mathbb{7}"),
("\uD7E0", "\\mathbb{8}"),
("\uD7E1", "\\mathbb{9}"),
("\uD7E2", "\\mathsf{0}"),
("\uD7E3", "\\mathsf{1}"),
("\uD7E4", "\\mathsf{2}"),
("\uD7E5", "\\mathsf{3}"),
("\uD7E6", "\\mathsf{4}"),
("\uD7E7", "\\mathsf{5}"),
("\uD7E8", "\\mathsf{6}"),
("\uD7E9", "\\mathsf{7}"),
("\uD7EA", "\\mathsf{8}"),
("\uD7EB", "\\mathsf{9}"),
("\uD7EC", "\\mathsfbf{0}"),
("\uD7ED", "\\mathsfbf{1}"),
("\uD7EE", "\\mathsfbf{2}"),
("\uD7EF", "\\mathsfbf{3}"),
("\uD7F0", "\\mathsfbf{4}"),
("\uD7F1", "\\mathsfbf{5}"),
("\uD7F2", "\\mathsfbf{6}"),
("\uD7F3", "\\mathsfbf{7}"),
("\uD7F4", "\\mathsfbf{8}"),
("\uD7F5", "\\mathsfbf{9}"),
("\uD7F6", "\\mathtt{0}"),
("\uD7F7", "\\mathtt{1}"),
("\uD7F8", "\\mathtt{2}"),
("\uD7F9", "\\mathtt{3}"),
("\uD7FA", "\\mathtt{4}"),
("\uD7FB", "\\mathtt{5}"),
("\uD7FC", "\\mathtt{6}"),
("\uD7FD", "\\mathtt{7}"),
("\uD7FE", "\\mathtt{8}"),
("\uD7FF", "\\mathtt{9}"),
)
unicode_to_latex = to_latex
unicode_to_crappy_latex1 = to_crappy1
unicode_to_crappy_latex2 = to_crappy2
unicode_to_latex_map = dict(unicode_to_latex)
prepare_unicode_to_latex()
python-bibtexparser-1.4.3/bibtexparser/tests/ 0000775 0000000 0000000 00000000000 14731101544 0021364 5 ustar 00root root 0000000 0000000 python-bibtexparser-1.4.3/bibtexparser/tests/data/ 0000775 0000000 0000000 00000000000 14731101544 0022275 5 ustar 00root root 0000000 0000000 python-bibtexparser-1.4.3/bibtexparser/tests/data/article.bib 0000664 0000000 0000000 00000000546 14731101544 0024403 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean César},
title = {An amazing title},
year = {2013},
month = "jan",
volume = {12},
pages = {12-23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_comma_first.bib 0000664 0000000 0000000 00000000705 14731101544 0026763 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013
, author = {Jean Cesar}
, title = {An amazing title}
, year = {2013}
, volume = {12}
, journal = {Nice Journal}
, comments = {A comment}
, keyword = {keyword1, keyword2}
}
@ARTICLE{ Baltazar2013
, author = {Jean Baltazar}
, title = {An amazing title}
, year = {2013}
, volume = {12}
, journal = {Nice Journal}
, comments = {A comment}
, keyword = {keyword1, keyword2}}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_comma_first_and_trailing_comma_output.bib 0000664 0000000 0000000 00000000550 14731101544 0034270 0 ustar 00root root 0000000 0000000 @article{Cesar2013
, abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word}
, author = {Jean César}
, comments = {A comment}
, journal = {Nice Journal}
, keyword = {keyword1, keyword2}
, month = {jan}
, pages = {12-23}
, title = {An amazing title}
, volume = {12}
, year = {2013}
,
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_comma_normal_multiple.bib 0000664 0000000 0000000 00000003147 14731101544 0031042 0 ustar 00root root 0000000 0000000 @article{FuMetalhalideperovskite2019,
author = {Yongping Fu and Haiming Zhu and Jie Chen and Matthew P. Hautzinger and X.-Y. Zhu and Song Jin},
doi = {10.1038/s41578-019-0080-9},
journal = {Nature Reviews Materials},
month = {feb},
number = {3},
pages = {169-188},
publisher = {Springer Science and Business Media {LLC}},
title = {Metal halide perovskite nanostructures for optoelectronic applications and the study of physical properties},
url = {https://www.nature.com/articles/s41578-019-0080-9},
volume = {4},
year = {2019}
}
@article{SunEnablingSiliconSolar2014,
author = {Ke Sun and Shaohua Shen and Yongqi Liang and Paul E. Burrows and Samuel S. Mao and Deli Wang},
doi = {10.1021/cr300459q},
journal = {Chemical Reviews},
month = {aug},
number = {17},
pages = {8662-8719},
publisher = {American Chemical Society ({ACS})},
title = {Enabling Silicon for Solar-Fuel Production},
url = {http://pubs.acs.org/doi/10.1021/cr300459q},
volume = {114},
year = {2014}
}
@article{LiuPhotocatalytichydrogenproduction2016,
author = {Maochang Liu and Yubin Chen and Jinzhan Su and Jinwen Shi and Xixi Wang and Liejin Guo},
doi = {10.1038/nenergy.2016.151},
impactfactor = {54.000},
journal = {Nature Energy},
month = {sep},
number = {11},
pages = {16151},
publisher = {Springer Science and Business Media {LLC}},
title = {Photocatalytic hydrogen production using twinned nanocrystals and an unanchored {NiSx} co-catalyst},
url = {http://www.nature.com/articles/nenergy2016151},
volume = {1},
year = {2016}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_comma_normal_single.bib 0000664 0000000 0000000 00000001076 14731101544 0030467 0 ustar 00root root 0000000 0000000 @article{FuMetalhalideperovskite2019,
author = {Yongping Fu and Haiming Zhu and Jie Chen and Matthew P. Hautzinger and X.-Y. Zhu and Song Jin},
doi = {10.1038/s41578-019-0080-9},
journal = {Nature Reviews Materials},
month = {feb},
number = {3},
pages = {169-188},
publisher = {Springer Science and Business Media {LLC}},
title = {Metal halide perovskite nanostructures for optoelectronic applications and the study of physical properties},
url = {https://www.nature.com/articles/s41578-019-0080-9},
volume = {4},
year = {2019}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_field_name_with_underscore.bib 0000664 0000000 0000000 00000000431 14731101544 0032023 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean César},
title = {An amazing title},
year = {2013},
volume = {12},
pages = {12-23},
journal = {Nice Journal},
comments = {A comment},
keyword = {keyword1, keyword2},
strange-field-name2 = {val2},
strange_field_name = {val},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_homogenize.bib 0000664 0000000 0000000 00000000716 14731101544 0026626 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
authors = {Jean César},
title = {An amazing title},
year = {2013},
month = "jan",
volume = {12},
pages = {12-23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
editors = {Edith Or},
keywords = {keyword1, keyword2},
links = {http://my.link/to-content},
subjects = "Some topic of interest",
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_missing_coma.bib 0000664 0000000 0000000 00000001465 14731101544 0027134 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean Cesar},
title = {An amazing title},
year = {2013},
volume = {12},
journal = {Nice Journal},
comments = {A comment},
keyword = {keyword1, keyword2}
}
@ARTICLE{Baltazar2013,
author = {Jean Baltazar},
title = {An amazing title},
year = {2013},
volume = {12},
journal = {Nice Journal},
comments = {A comment},
keyword = {keyword1, keyword2}}
@ARTICLE{Aimar2013,
author = {Jean Aimar},
title = {An amazing title},
year = {2013},
volume = {12},
journal = {Nice Journal},
comments = {A comment},
keyword = {keyword1, keyword2},
month = "january"
}
@ARTICLE{Doute2013,
author = {Jean Doute},
title = {An amazing title},
volume = {12},
journal = {Nice Journal},
comments = {A comment},
keyword = {keyword1, keyword2},
year = "2013"
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_multilines.bib 0000664 0000000 0000000 00000000751 14731101544 0026646 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean César},
title = {A mutline line title is very amazing. It should be
long enough to test multilines... with two lines or should we
even test three lines... What an amazing title.},
year = {2013},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2,
multiline-keyword1, multiline-keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_no_braces.bib 0000664 0000000 0000000 00000000556 14731101544 0026417 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = "Jean C{\'e}sar{\"u}",
title = "An amazing title",
year = "2013",
month = "jan",
volume = "12",
pages = "12-23",
journal = "Nice Journal",
abstract = "This is an abstract. This line should be long enough to test
multilines... and with a french érudit word",
comments = "A comment",
keyword = "keyword1, keyword2",
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_oneline.bib 0000664 0000000 0000000 00000000557 14731101544 0026116 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013, author = {Jean Cesar}, title = {An amazing title}, year = {2013}, volume = {12}, journal = {Nice Journal}, comments = {A comment}, keyword = {keyword1, keyword2}}
@ARTICLE{ Baltazar2013,author = {Jean Baltazar},title = {An amazing title},year = {2013},volume = {12},journal = {Nice Journal},comments = {A comment},keyword = {keyword1, keyword2}}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_output.bib 0000664 0000000 0000000 00000000533 14731101544 0026017 0 ustar 00root root 0000000 0000000 @article{Cesar2013,
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
author = {Jean César},
comments = {A comment},
journal = {Nice Journal},
keyword = {keyword1, keyword2},
month = {jan},
pages = {12-23},
title = {An amazing title},
volume = {12},
year = {2013}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_start_with_bom.bib 0000664 0000000 0000000 00000000552 14731101544 0027505 0 ustar 00root root 0000000 0000000
@ARTICLE{Cesar2013,
author = {Jean César},
title = {An amazing title},
year = {2013},
month = "jan",
volume = {12},
pages = {12-23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_start_with_whitespace.bib 0000664 0000000 0000000 00000000431 14731101544 0031060 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean Cesar},
title = {An amazing title},
year = {2013},
volume = {12},
journal = {Nice Journal}
}
@ARTICLE{Cesar2014,
author = {Jean Cesar},
title = {An amazing title},
year = {2014},
volume = {12},
journal = {Nice Journal}
} python-bibtexparser-1.4.3/bibtexparser/tests/data/article_trailing_comma_output.bib 0000664 0000000 0000000 00000000534 14731101544 0031065 0 ustar 00root root 0000000 0000000 @article{Cesar2013,
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
author = {Jean César},
comments = {A comment},
journal = {Nice Journal},
keyword = {keyword1, keyword2},
month = {jan},
pages = {12-23},
title = {An amazing title},
volume = {12},
year = {2013},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_with_annotation.bib 0000664 0000000 0000000 00000000603 14731101544 0027662 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean César},
author+an = {1=highlight},
title = {An amazing title},
year = {2013},
month = "jan",
volume = {12},
pages = {12-23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_with_annotation_output.bib 0000664 0000000 0000000 00000000567 14731101544 0031313 0 ustar 00root root 0000000 0000000 @article{Cesar2013,
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
author = {Jean César},
author+an = {1=highlight},
comments = {A comment},
journal = {Nice Journal},
keyword = {keyword1, keyword2},
month = {jan},
pages = {12-23},
title = {An amazing title},
volume = {12},
year = {2013}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_with_protection_braces.bib 0000664 0000000 0000000 00000000552 14731101544 0031220 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean César},
title = {{An amazing title}},
year = {2013},
month = "jan",
volume = {12},
pages = {12-23},
journal = {{Nice Journal}},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_with_special_characters.bib 0000664 0000000 0000000 00000000556 14731101544 0031336 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean C{\'e}sar{\"u}},
title = {An amazing title},
year = {2013},
month = "jan",
volume = {12},
pages = {12-23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_with_strings.bib 0000664 0000000 0000000 00000000511 14731101544 0027177 0 ustar 00root root 0000000 0000000 @STRING{ nice_journal = "Nice Journal" }
@STRING ( jean={Jean} )
@STRING{cesar = {César}}
@ARTICLE{Cesar2013,
author = jean # " " # cesar,
title = {An amazing title},
year = {2013},
month = jan,
volume = {12},
pages = {12-23},
journal = nice_journal,
comments = {A comment},
keyword = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/article_with_strings_output.bib 0000664 0000000 0000000 00000000476 14731101544 0030631 0 ustar 00root root 0000000 0000000 @string{nice_journal = {Nice Journal}}
@string{jean = {Jean}}
@string{cesar = {César}}
@article{Cesar2013,
author = jean # { } # cesar,
comments = {A comment},
journal = nice_journal,
keyword = {keyword1, keyword2},
month = jan,
pages = {12-23},
title = {An amazing title},
volume = {12},
year = {2013}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/biber-examples.bib 0000664 0000000 0000000 00000166371 14731101544 0025670 0 ustar 00root root 0000000 0000000 % From biber test data : t/tdata/examples.bib
@STRING{anch-ie = {Angew.~Chem. Int.~Ed.} }
@STRING{cup = {Cambridge University Press} }
@STRING{dtv = {Deutscher Taschenbuch-Verlag} }
@STRING{hup = {Harvard University Press} }
@STRING{jams = {J.~Amer. Math. Soc.} }
@STRING{jchph = {J.~Chem. Phys.} }
@STRING{jomch = {J.~Organomet. Chem.} }
@STRING{pup = {Princeton University Press} }
@InCollection{westfahl:space,
crossref = {westfahl:frontier},
langid = {english},
langidopts = {variant=american},
author = {Westfahl, Gary},
indextitle = {True Frontier, The},
title = {The True Frontier},
subtitle = {Confronting and Avoiding the Realities of Space in American Science Fiction
Films},
pages = {55--65},
annotation = {A cross-referenced article from a \texttt{collection}. This is an
\texttt{incollection} entry with a \texttt{crossref} field. Note the
\texttt{subtitle} and \texttt{indextitle} fields}
}
@Set{set,
crossref = {set:herrmann},
entryset = {set:herrmann,set:aksin,set:yoon},
annotation = {A \texttt{set} entry with three members. Note the \texttt{entryset} and
\texttt{crossref} fields. The cross-reference must point to the first member of
the set}
}
@Set{stdmodel,
crossref = {stdmodel:glashow},
entryset = {stdmodel:glashow,stdmodel:weinberg,stdmodel:salam},
annotation = {A \texttt{set} entry with three members discussing the standard model of
particle physics.}
}
@Set{stdmodel:ps_sc,
presort = {zs},
crossref = {stdmodel:glashow},
entryset = {stdmodel:glashow,stdmodel:weinberg,stdmodel:salam},
annotation = {A \texttt{set} entry with three members discussing the standard model of
particle physics. Note the \texttt{entryset} and \texttt{crossref} fields. The
cross-reference must point to the first member of the set}
}
@Article{angenendt,
langid = {german},
author = {Angenendt, Arnold},
indextitle = {In Honore Salvatoris},
title = {In Honore Salvatoris~-- Vom Sinn und Unsinn der Patrozinienkunde},
shorttitle = {In Honore Salvatoris},
journaltitle = {Revue d'Histoire Eccl{\'e}siastique},
volume = {97},
year = {2002},
pages = {431--456, 791--823},
annotation = {A German article in a French journal. Apart from that, a typical
\texttt{article} entry. Note the \texttt{indextitle} field}
}
@Article{angenendtsk,
sortkey = {AATESTKEY},
langid = {german},
author = {Angenendt, Arnold},
indextitle = {In Honore Salvatoris},
title = {In Honore Salvatoris~-- Vom Sinn und Unsinn der Patrozinienkunde},
shorttitle = {In Honore Salvatoris},
journaltitle = {Revue d'Histoire Eccl{\'e}siastique},
volume = {97},
year = {2002},
pages = {431--456, 791--823},
annotation = {A German article in a French journal. Apart from that, a typical
\texttt{article} entry. Note the \texttt{indextitle} field}
}
@Article{angenendtsa,
langid = {german},
shortauthor = {AA},
author = {Angenendt, Arnold},
indextitle = {In Honore Salvatoris},
title = {In Honore Salvatoris~-- Vom Sinn und Unsinn der Patrozinienkunde},
shorttitle = {In Honore Salvatoris},
journaltitle = {Revue d'Histoire Eccl{\'e}siastique},
volume = {97},
year = {2002},
pages = {431--456, 791--823},
annotation = {A German article in a French journal. Apart from that, a typical
\texttt{article} entry. Note the \texttt{indextitle} field}
}
@Article{baez/article,
langid = {english},
langidopts = {variant=american},
author = {Baez, John C. and Lauda, Aaron D.},
title = {Higher-Dimensional Algebra V: 2-Groups},
journaltitle = {Theory and Applications of Categories},
volume = {12},
version = {3},
date = {2004},
pages = {423--491},
eprint = {math/0307200v3},
eprinttype = {arxiv},
annotation = {An \texttt{article} with \texttt{eprint} and \texttt{eprinttype} fields. Note
that the arXiv reference is transformed into a clickable link if
\texttt{hyperref} support has been enabled. Compare \texttt{baez\slash online}
which is the same item given as an \texttt{online} entry}
}
@Article{bertram,
langid = {english},
langidopts = {variant=american},
author = {Bertram, Aaron and Wentworth, Richard},
title = {Gromov invariants for holomorphic maps on Riemann surfaces},
shorttitle = {Gromov invariants},
journaltitle = jams,
volume = {9},
number = {2},
year = {1996},
pages = {529--571},
annotation = {An \texttt{article} entry with a \texttt{volume} and a \texttt{number} field}
}
@Article{gillies,
langid = {english},
langidopts = {variant=british},
author = {Gillies, Alexander},
title = {Herder and the Preparation of Goethe's Idea of World Literature},
journaltitle = {Publications of the English Goethe Society},
volume = {9},
series = {newseries},
year = {1933},
pages = {46--67},
annotation = {An \texttt{article} entry with a \texttt{series} and a \texttt{volume} field.
Note that format of the \texttt{series} field in the database file}
}
@Article{kastenholz,
langid = {english},
langidopts = {variant=american},
author = {Kastenholz, M. A. and H{\"u}nenberger, Philippe H.},
indextitle = {Computation of ionic solvation free energies},
title = {Computation of methodology\hyphen independent ionic solvation free energies
from molecular simulations},
subtitle = {I. The electrostatic potential in molecular liquids},
journaltitle = jchph,
volume = {124},
eid = {124106},
year = {2006},
doi = {10.1063/1.2172593},
annotation = {An \texttt{article} entry with an \texttt{eid} and a \texttt{doi} field. Note
that the \textsc{doi} is transformed into a clickable link if \texttt{hyperref}
support has been enabled},
abstract = {The computation of ionic solvation free energies from atomistic simulations is
a surprisingly difficult problem that has found no satisfactory solution for
more than 15 years. The reason is that the charging free energies evaluated from
such simulations are affected by very large errors. One of these is related to
the choice of a specific convention for summing up the contributions of solvent
charges to the electrostatic potential in the ionic cavity, namely, on the basis
of point charges within entire solvent molecules (M scheme) or on the basis of
individual point charges (P scheme). The use of an inappropriate convention may
lead to a charge-independent offset in the calculated potential, which depends
on the details of the summation scheme, on the quadrupole-moment trace of the
solvent molecule, and on the approximate form used to represent electrostatic
interactions in the system. However, whether the M or P scheme (if any)
represents the appropriate convention is still a matter of on-going debate. The
goal of the present article is to settle this long-standing controversy by
carefully analyzing (both analytically and numerically) the properties of the
electrostatic potential in molecular liquids (and inside cavities within
them).}
}
@Article{murray,
langid = {english},
langidopts = {variant=american},
author = {Hostetler, Michael J. and Wingate, Julia E. and Zhong, Chuan-Jian and Harris,
Jay E. and Vachet, Richard W. and Clark, Michael R. and Londono, J. David and
Green, Stephen J. and Stokes, Jennifer J. and Wignall, George D. and Glish, Gary
L. and Porter, Marc D. and Evans, Neal D. and Murray, Royce W.},
indextitle = {Alkanethiolate gold cluster molecules},
title = {Alkanethiolate gold cluster molecules with core diameters from 1.5 to 5.2~nm},
subtitle = {Core and monolayer properties as a function of core size},
shorttitle = {Alkanethiolate gold cluster molecules},
journaltitle = {Langmuir},
volume = {14},
number = {1},
year = {1998},
pages = {17--30},
annotation = {An \texttt{article} entry with \arabic{author} authors. By default, long author
and editor lists are automatically truncated. This is configurable}
}
@Article{reese,
langid = {english},
langidopts = {variant=american},
author = {Reese, Trevor R.},
title = {Georgia in Anglo-Spanish Diplomacy, 1736-1739},
journaltitle = {William and Mary Quarterly},
volume = {15},
series = {3},
year = {1958},
pages = {168--190},
annotation = {An \texttt{article} entry with a \texttt{series} and a \texttt{volume} field.
Note the format of the series. If the value of the \texttt{series} field is an
integer, this number is printed as an ordinal and the string \enquote*{series}
is appended automatically}
}
@Article{set:aksin,
entryset = {set},
author = {Aks{\i}n, {\"O}zge and T{\"u}rkmen, Hayati and Artok, Levent and
{\k{C}}etinkaya, Bekir and Ni, Chaoying and B{\"u}y{\"u}kg{\"u}ng{\"o}r, Orhan
and {\"O}zkal, Erhan},
indextitle = {Effect of immobilization on catalytic characteristics},
title = {Effect of immobilization on catalytic characteristics of saturated
Pd-N-heterocyclic carbenes in Mizoroki-Heck reactions},
journaltitle = jomch,
volume = {691},
number = {13},
year = {2006},
month = {02},
pages = {3027--3036}
}
@Article{set:herrmann,
entryset = {set},
author = {Herrmann, Wolfgang A. and {\"O}fele, Karl and Schneider, Sabine K. and
Herdtweck, Eberhardt and Hoffmann, Stephan D.},
indextitle = {Carbocyclic carbene as an efficient catalyst, A},
title = {A carbocyclic carbene as an efficient catalyst ligand for C--C coupling
reactions},
journaltitle = anch-ie,
volume = {45},
number = {23},
year = {2006},
pages = {3859--3862}
}
@Article{set:yoon,
entryset = {set},
author = {Yoon, Myeong S. and Ryu, Dowook and Kim, Jeongryul and Ahn, Kyo Han},
indextitle = {Palladium pincer complexes},
title = {Palladium pincer complexes with reduced bond angle strain: efficient catalysts
for the Heck reaction},
journaltitle = {Organometallics},
volume = {25},
number = {10},
year = {2006},
pages = {2409--2411}
}
@Article{shore,
author = {Shore, Bradd},
title = {Twice-Born, Once Conceived},
subtitle = {Meaning Construction and Cultural Cognition},
journaltitle = {American Anthropologist},
volume = {93},
series = {newseries},
number = {1},
month = mar,
year = {1991},
pages = {9--27},
annotation = {An \texttt{article} entry with \texttt{series}, \texttt{volume}, and
\texttt{number} fields. Note the format of the \texttt{series} which is a
localization key}
}
@Article{sigfridsson,
langid = {english},
langidopts = {variant=american},
author = {Sigfridsson, Emma and Ryde, Ulf},
indextitle = {Methods for deriving atomic charges},
title = {Comparison of methods for deriving atomic charges from the electrostatic
potential and moments},
journaltitle = {Journal of Computational Chemistry},
volume = {19},
number = {4},
year = {1998},
doi = {10.1002/(SICI)1096-987X(199803)19:4<377::AID-JCC1>3.0.CO;2-P},
pages = {377--395},
annotation = {An \texttt{article} entry with \texttt{volume}, \texttt{number}, and
\texttt{doi} fields. Note that the \textsc{doi} is transformed into a clickable
link if \texttt{hyperref} support has been enabled},
abstract = { Four methods for deriving partial atomic charges from the quantum chemical
electrostatic potential (CHELP, CHELPG, Merz-Kollman, and RESP) have been
compared and critically evaluated. It is shown that charges strongly depend on
how and where the potential points are selected. Two alternative methods are
suggested to avoid the arbitrariness in the point-selection schemes and van der
Waals exclusion radii: CHELP-BOW, which also estimates the charges from the
electrostatic potential, but with potential points that are Boltzmann-weighted
after their occurrence in actual simulations using the energy function of the
program in which the charges will be used, and CHELMO, which estimates the
charges directly from the electrostatic multipole moments. Different criteria
for the quality of the charges are discussed.}
}
@Article{spiegelberg,
langid = {german},
sorttitle = {Intention und Intentionalitat in der Scholastik, bei Brentano und Husserl},
indexsorttitle = {Intention und Intentionalitat in der Scholastik, bei Brentano und Husserl},
author = {Spiegelberg, Herbert},
title = {\mkbibquote{Intention} und \mkbibquote{Intentionalit{\"a}t} in der Scholastik, bei
Brentano und Husserl},
shorttitle = {Intention und Intentionalit{\"a}t},
journaltitle = {Studia Philosophica},
volume = {29},
year = {1969},
pages = {189--216},
annotation = {An \texttt{article} entry. Note the \texttt{sorttitle} and
\texttt{indexsorttitle} fields and the markup of the quotes in the database file}
}
@Article{springer,
langid = {english},
langidopts = {variant=british},
author = {Springer, Otto},
title = {Mediaeval Pilgrim Routes from Scandinavia to Rome},
shorttitle = {Mediaeval Pilgrim Routes},
journaltitle = {Mediaeval Studies},
volume = {12},
year = {1950},
pages = {92--122},
annotation = {A plain \texttt{article} entry}
}
@Article{stdmodel:glashow,
entryset = {stdmodel},
author = {Glashow, Sheldon},
title = {Partial Symmetries of Weak Interactions},
journaltitle = {Nucl.~Phys.},
volume = {22},
year = {1961},
pages = {579\psqq}
}
@Article{stdmodel:weinberg,
entryset = {stdmodel},
author = {Weinberg, Steven},
title = {A Model of Leptons},
journaltitle = {Phys.~Rev.~Lett.},
volume = {19},
year = {1967},
pages = {1264\psqq}
}
@Book{aristotle:anima,
keywords = {primary},
langid = {english},
langidopts = {variant=british},
author = {Aristotle},
editor = {Hicks, Robert Drew},
title = {De Anima},
publisher = cup,
location = {Cambridge},
year = {1907},
annotation = {A \texttt{book} entry with an \texttt{author} and an \texttt{editor}}
}
@Book{aristotle:physics,
keywords = {primary},
langid = {english},
langidopts = {variant=american},
author = {Aristotle},
translator = {Wicksteed, P. H. and Cornford, F. M.},
title = {Physics},
shorttitle = {Physics},
publisher = {G. P. Putnam},
location = {New York},
year = {1929},
annotation = {A \texttt{book} entry with a \texttt{translator} field}
}
@Book{aristotle:poetics,
keywords = {primary},
langid = {english},
langidopts = {variant=british},
author = {Aristotle},
editor = {Lucas, D. W.},
title = {Poetics},
shorttitle = {Poetics},
series = {Clarendon Aristotle},
publisher = {Clarendon Press},
location = {Oxford},
year = {1968},
annotation = {A \texttt{book} entry with an \texttt{author} and an \texttt{editor} as well as
a \texttt{series} field}
}
@Book{aristotle:rhetoric,
keywords = {primary},
langid = {english},
langidopts = {variant=british},
sorttitle = {Rhetoric of Aristotle},
author = {Aristotle},
editor = {Cope, Edward Meredith},
commentator = {Cope, Edward Meredith},
indextitle = {Rhetoric of Aristotle, The},
title = {The Rhetoric of Aristotle with a commentary by the late Edward Meredith Cope},
shorttitle = {Rhetoric},
volumes = {3},
publisher = cup,
year = {1877},
annotation = {A commented edition. Note the concatenation of the \texttt{editor} and
\texttt{commentator} fields as well as the \texttt{volumes}, \texttt{sorttitle},
and \texttt{indextitle} fields}
}
@Book{augustine,
langid = {english},
langidopts = {variant=british},
author = {Augustine, Robert L.},
title = {Heterogeneous catalysis for the synthetic chemist},
shorttitle = {Heterogeneous catalysis},
publisher = {Marcel Dekker},
location = {New York},
year = {1995},
annotation = {A plain \texttt{book} entry}
}
@Book{averroes/bland,
keywords = {primary},
langid = {english},
langidopts = {variant=american},
author = {Averroes},
editor = {Bland, Kalman P.},
translator = {Bland, Kalman P.},
indextitle = {Epistle on the Possibility of Conjunction, The},
title = {The Epistle on the Possibility of Conjunction with the Active Intellect by Ibn
Rushd with the Commentary of Moses Narboni},
shorttitle = {Possibility of Conjunction},
series = {Moreshet: Studies in Jewish History, Literature and Thought},
number = {7},
publisher = {Jewish Theological Seminary of America},
location = {New York},
year = {1982},
annotation = {A \texttt{book} entry with a \texttt{series} and a \texttt{number}. Note the
concatenation of the \texttt{editor} and \texttt{translator} fields as well as
the \texttt{indextitle} field}
}
@Book{averroes/hannes,
keywords = {primary},
langid = {german},
sorttitle = {Uber die Moglichkeit der Conjunktion},
indexsorttitle = {Uber die Moglichkeit der Conjunktion},
author = {Averroes},
editor = {Hannes, Ludwig},
translator = {Hannes, Ludwig},
annotator = {Hannes, Ludwig},
indextitle = {{\"U}ber die M{\"o}glichkeit der Conjunktion},
title = {Des Averro{\"e}s Abhandlung: \mkbibquote{{\"U}ber die M{\"o}glichkeit der
Conjunktion} oder \mkbibquote{{\"U}ber den materiellen Intellekt}},
shorttitle = {M{\"o}glichkeit der Conjunktion},
publisher = {C.~A. Kaemmerer},
location = {Halle an der Saale},
year = {1892},
annotation = {An annotated edition. Note the concatenation of the \texttt{editor},
\texttt{translator}, and \texttt{annotator} fields. Also note the
\texttt{shorttitle}, \texttt{indextitle}, \texttt{sorttitle}, and
\texttt{indexsorttitle} fields}
}
@Book{averroes/hercz,
keywords = {primary},
langid = {german},
indexsorttitle = {Drei Abhandlungen uber die Conjunction},
author = {Averroes},
editor = {Hercz, J.},
translator = {Hercz, J.},
indextitle = {Drei Abhandlungen {\"u}ber die Conjunction},
title = {Drei Abhandlungen {\"u}ber die Conjunction des separaten Intellects mit dem
Menschen},
subtitle = {Von Averroes (Vater und Sohn), aus dem Arabischen {\"u}bersetzt von Samuel Ibn
Tibbon},
shorttitle = {Drei Abhandlungen},
publisher = {S.~Hermann},
location = {Berlin},
year = {1869},
annotation = {A \texttt{book} entry. Note the concatenation of the \texttt{editor} and
\texttt{translator} fields as well as the \texttt{indextitle} and
\texttt{indexsorttitle} fields}
}
@Book{cicero,
langid = {german},
author = {Cicero, Marcus Tullius},
editor = {Blank-Sangmeister, Ursula},
translator = {Blank-Sangmeister, Ursula},
afterword = {Thraede, Klaus},
indextitle = {De natura deorum},
title = {De natura deorum. {\"U}ber das Wesen der G{\"o}tter},
shorttitle = {De natura deorum},
language = {langlatin and langgerman},
publisher = {Reclam},
location = {Stuttgart},
year = {1995},
annotation = {A bilingual edition of Cicero's \emph{De natura deorum}, with a German
translation. Note the format of the \texttt{language} field in the database
file, the concatenation of the \texttt{editor} and \texttt{translator} fields,
and the \texttt{afterword} field}
}
@Book{coleridge,
langid = {english},
langidopts = {variant=british},
author = {Coleridge, Samuel Taylor},
editor = {Coburn, Kathleen and Engell, James and Bate, W. Jackson},
indextitle = {Biographia literaria},
title = {Biographia literaria, or Biographical sketches of my literary life and
opinions},
shorttitle = {Biographia literaria},
maintitle = {The collected works of Samuel Taylor Coleridge},
part = {2},
volume = {7},
series = {Bollingen Series},
number = {75},
publisher = {Routledge and Kegan Paul},
location = {London},
year = {1983},
annotation = {One (partial) volume of a multivolume book. This is a \texttt{book} entry with
a \texttt{volume} and a \texttt{part} field which explicitly refers to the
second (physical) part of the seventh (logical) volume. Also note the
\texttt{series} and \texttt{number} fields}
}
@Book{companion,
langid = {english},
langidopts = {variant=american},
sorttitle = {LaTeX Companion},
author = {Goossens, Michel and Mittelbach, Frank and Samarin, Alexander},
indextitle = {LaTeX Companion, The},
title = {The LaTeX Companion},
shorttitle = {LaTeX Companion},
edition = {1},
publisher = {Addison-Wesley},
location = {Reading, Mass.},
year = {1994},
annotation = {A book with three authors. Note the formatting of the author list. By default,
only the first name is reversed in the bibliography}
}
@Book{cotton,
langid = {english},
langidopts = {variant=british},
author = {Cotton, Frank Albert and Wilkinson, Geoffrey and Murillio, Carlos A. and
Bochmann, Manfred},
title = {Advanced inorganic chemistry},
edition = {6},
publisher = {Wiley},
location = {Chichester},
year = {1999},
annotation = {A \texttt{book} entry with \arabic{author} authors and an \texttt{edition}
field. By default, long \texttt{author} and \texttt{editor} lists are
automatically truncated. This is configurable}
}
@Book{gerhardt,
langid = {english},
langidopts = {variant=american},
sorttitle = {Federal Appointments Process},
author = {Gerhardt, Michael J.},
indextitle = {Federal Appointments Process, The},
title = {The Federal Appointments Process},
subtitle = {A Constitutional and Historical Analysis},
shorttitle = {Federal Appointments Process},
publisher = {Duke University Press},
location = {Durham and London},
year = {2000},
annotation = {This is a \texttt{book} entry. Note the format of the \texttt{location} field
as well as the \texttt{sorttitle} and \texttt{indextitle} fields}
}
@Book{gonzalez,
langid = {english},
langidopts = {variant=american},
sorttitle = {Ghost of John Wayne and Other Stories},
author = {Gonzalez, Ray},
indextitle = {Ghost of John Wayne and Other Stories, The},
title = {The Ghost of John Wayne and Other Stories},
shorttitle = {Ghost of John Wayne},
publisher = {The University of Arizona Press},
location = {Tucson},
year = {2001},
isbn = {0-816-52066-6},
annotation = {A collection of short stories. This is a \texttt{book} entry. Note the
\texttt{sorttitle} and \texttt{indextitle} fields in the database file. There's
also an \texttt{isbn} field}
}
@Book{hammond,
langid = {english},
langidopts = {variant=british},
sorttitle = {Basics of crystallography and diffraction},
author = {Hammond, Christopher},
indextitle = {Basics of crystallography and diffraction, The},
title = {The basics of crystallography and diffraction},
shorttitle = {Crystallography and diffraction},
publisher = {International Union of Crystallography and Oxford University Press},
location = {Oxford},
year = {1997},
annotation = {A \texttt{book} entry. Note the \texttt{sorttitle} and \texttt{indextitle}
fields as well as the format of the \texttt{publisher} field}
}
@Book{iliad,
langid = {german},
sorttitle = {Ilias},
author = {Homer},
translator = {Schadewaldt, Wolfgang},
introduction = {Latacz, Joachim},
indextitle = {Ilias, Die},
title = {Die Ilias},
shorttitle = {Ilias},
edition = {3},
publisher = {Artemis \& Winkler},
location = {D{\"u}sseldorf and Z{\"u}rich},
year = {2004},
annotation = {A German translation of the \emph{Iliad}. Note the \texttt{translator} and
\texttt{introduction} fields and the format of the \texttt{location} field in
the database file. Also note the \texttt{sorttitle} and \texttt{indextitle} fields}
}
@Book{knuth:ct,
langid = {english},
langidopts = {variant=american},
sortyear = {1984-0},
sorttitle = {Computers & Typesetting},
indexsorttitle = {Computers & Typesetting},
author = {Knuth, Donald E.},
title = {Computers \& Typesetting},
volumes = {5},
publisher = {Addison-Wesley},
location = {Reading, Mass.},
date = {1984/1986},
annotation = {A five-volume book cited as a whole. This is a \texttt{book} entry, note the
\texttt{volumes} field}
}
@Book{knuth:ct:a,
langid = {english},
langidopts = {variant=american},
sortyear = {1984-1},
sorttitle = {Computers & Typesetting A},
indexsorttitle = {The TeXbook},
author = {Knuth, Donald E.},
indextitle = {\TeX book, The},
title = {The \TeX book},
shorttitle = {\TeX book},
maintitle = {Computers \& Typesetting},
volume = {A},
publisher = {Addison-Wesley},
location = {Reading, Mass.},
year = {1984},
annotation = {The first volume of a five-volume book. Note the \texttt{sorttitle} and
\texttt{sortyear} fields. We want this volume to be listed after the entry
referring to the entire five-volume set. Also note the \texttt{indextitle} and
\texttt{indexsorttitle} fields}
}
@Book{knuth:ct:b,
langid = {english},
langidopts = {variant=american},
sortyear = {1984-2},
sorttitle = {Computers & Typesetting B},
indexsorttitle = {TeX: The Program},
author = {Knuth, Donald E.},
title = {\TeX: The Program},
shorttitle = {\TeX},
maintitle = {Computers \& Typesetting},
volume = {B},
publisher = {Addison-Wesley},
location = {Reading, Mass.},
year = {1986},
annotation = {The second volume of a five-volume book. Note the \texttt{sorttitle} and
\texttt{sortyear} fields. Also note the \texttt{indexsorttitle} field}
}
@Book{knuth:ct:c,
langid = {english},
langidopts = {variant=american},
sortyear = {1984-3},
sorttitle = {Computers & Typesetting C},
author = {Knuth, Donald E.},
indextitle = {METAFONTbook, The},
title = {The METAFONTbook},
shorttitle = {METAFONTbook},
maintitle = {Computers \& Typesetting},
volume = {C},
publisher = {Addison-Wesley},
location = {Reading, Mass.},
year = {1986},
annotation = {The third volume of a five-volume book. Note the \texttt{sorttitle} and
\texttt{sortyear} fields as well as the \texttt{indextitle} field}
}
@Book{knuth:ct:d,
langid = {english},
langidopts = {variant=american},
sortyear = {1984-4},
sorttitle = {Computers & Typesetting D},
author = {Knuth, Donald E.},
title = {METAFONT: The Program},
shorttitle = {METAFONT},
maintitle = {Computers \& Typesetting},
volume = {D},
publisher = {Addison-Wesley},
location = {Reading, Mass.},
year = {1986},
annotation = {The fourth volume of a five-volume book. Note the \texttt{sorttitle} and
\texttt{sortyear} fields}
}
@Book{knuth:ct:e,
langid = {english},
langidopts = {variant=american},
sortyear = {1984-5},
sorttitle = {Computers & Typesetting E},
author = {Knuth, Donald E.},
title = {Computer Modern Typefaces},
maintitle = {Computers \& Typesetting},
volume = {E},
publisher = {Addison-Wesley},
location = {Reading, Mass.},
year = {1986},
annotation = {The fifth volume of a five-volume book. Note the \texttt{sorttitle} and
\texttt{sortyear} fields}
}
@Book{malinowski,
langid = {english},
langidopts = {variant=british},
author = {Malinowski, Bronis{\l}aw},
title = {Argonauts of the Western Pacific},
subtitle = {An account of native enterprise and adventure in the Archipelagoes of
Melanesian New Guinea},
shorttitle = {Argonauts},
edition = {8},
publisher = {Routledge and Kegan Paul},
location = {London},
year = {1972},
annotation = {This is a \texttt{book} entry. Note the format of the \texttt{publisher} and
\texttt{edition} fields as well as the \texttt{subtitle} field}
}
@Book{maron,
langid = {english},
langidopts = {variant=american},
author = {Maron, Monika},
translator = {Brigitte Goldstein},
title = {Animal Triste},
shorttitle = {Animal Triste},
publisher = {University of Nebraska Press},
location = {Lincoln},
year = {2000},
origlanguage = {german},
annotation = {An English translation of a German novel with a French title. In other words: a
\texttt{book} entry with a \texttt{translator} field. Note the
\texttt{origlanguage} field which is concatenated with the \texttt{translator}}
}
@Book{massa,
langid = {english},
langidopts = {variant=british},
author = {Werner Massa},
title = {Crystal structure determination},
edition = {2},
publisher = {Spinger},
location = {Berlin},
year = {2004},
annotation = {A \texttt{book} entry with an \texttt{edition} field}
}
@Book{nietzsche:ksa,
langid = {german},
sortyear = {1988-00-000},
sorttitle = {Werke-00-000},
indexsorttitle = {Samtliche Werke},
author = {Nietzsche, Friedrich},
editor = {Colli, Giorgio and Montinari, Mazzino},
title = {S{\"a}mtliche Werke},
subtitle = {Kritische Studienausgabe},
volumes = {15},
edition = {2},
publisher = dtv # { and Walter de Gruyter},
location = {M{\"u}nchen and Berlin and New York},
year = {1988},
annotation = {The critical edition of Nietzsche's works. This is a \texttt{book} entry
referring to a 15-volume work as a whole. Note the \texttt{volumes} field and
the format of the \texttt{publisher} and \texttt{location} fields in the
database file. Also note the \texttt{sorttitle} and \texttt{sortyear} fields
which are used to fine-tune the sorting order of the bibliography. We want this
item listed first in the bibliography}
}
@Book{nietzsche:ksa1,
langid = {german},
sortyear = {1988-01-000},
sorttitle = {Werke-01-000},
indexsorttitle = {Samtliche Werke I},
author = {Nietzsche, Friedrich},
bookauthor = {Nietzsche, Friedrich},
editor = {Colli, Giorgio and Montinari, Mazzino},
indextitle = {S{\"a}mtliche Werke I},
title = {Die Geburt der Trag{\"o}die. Unzeitgem{\"a}{\ss}e Betrachtungen I--IV.
Nachgelassene Schriften 1870--1973},
shorttitle = {S{\"a}mtliche Werke I},
maintitle = {S{\"a}mtliche Werke},
mainsubtitle = {Kritische Studienausgabe},
volume = {1},
edition = {2},
publisher = dtv # { and Walter de Gruyter},
location = {M{\"u}nchen and Berlin and New York},
year = {1988},
annotation = {A single volume from the critical edition of Nietzsche's works. This
\texttt{book} entry explicitly refers to the first volume only. Note the
\texttt{title} and \texttt{maintitle} fields. Also note the \texttt{sorttitle}
and \texttt{sortyear} fields. We want this entry to be listed after the entry
referring to the entire edition}
}
@Book{nussbaum,
keywords = {secondary},
langid = {english},
langidopts = {variant=american},
sorttitle = {Aristotle's De Motu Animalium},
indexsorttitle = {Aristotle's De Motu Animalium},
author = {Nussbaum, Martha},
title = {Aristotle's \mkbibquote{De Motu Animalium}},
publisher = pup,
location = {Princeton},
year = {1978},
annotation = {A \texttt{book} entry. Note the \texttt{sorttitle} and \texttt{indexsorttitle}
fields and the markup of the quotes in the database file}
}
@Book{piccato,
langid = {english},
langidopts = {variant=american},
author = {Piccato, Pablo},
title = {City of Suspects},
subtitle = {Crime in Mexico City, 1900--1931},
shorttitle = {City of Suspects},
publisher = {Duke University Press},
location = {Durham and London},
year = {2001},
annotation = {This is a \texttt{book} entry. Note the format of the \texttt{location} field
in the database file}
}
@Book{vangennep,
options = {useprefix},
langid = {french},
sorttitle = {Rites de passage},
author = {van Gennep, Arnold},
indextitle = {Rites de passage, Les},
title = {Les rites de passage},
shorttitle = {Rites de passage},
publisher = {Nourry},
location = {Paris},
year = {1909},
annotation = {A \texttt{book} entry. Note the format of the printed name and compare the
\texttt{useprefix} option in the \texttt{options} field as well as
\texttt{brandt} and \texttt{geer}}
}
@Book{vangennepx,
langid = {french},
author = {van Gennep, Jean},
title = {Il a neigé à Port-au-Prince},
publisher = {Alhambra},
location = {Montréal},
year = {1999}
}
@Book{vazques-de-parga,
langid = {spanish},
sorttitle = {Peregrinaciones a Santiago de Compostela},
author = {V{\'a}zques{ de }Parga, Luis and Lacarra, Jos{\'e} Mar{\'i}a and Ur{\'i}a
R{\'i}u, Juan},
indextitle = {Peregrinaciones a Santiago de Compostela, Las},
title = {Las Peregrinaciones a Santiago de Compostela},
shorttitle = {Peregrinaciones},
volumes = {3},
publisher = {Iberdrola},
location = {Pamplona},
year = {1993},
note = {Ed. facs. de la realizada en 1948--49},
annotation = {A multivolume book cited as a whole. This is a \texttt{book} entry with
\texttt{volumes}, \texttt{note}, \texttt{sorttitle}, and \texttt{indextitle} fields}
}
@Book{worman,
langid = {english},
langidopts = {variant=american},
sorttitle = {Cast of Character},
author = {Worman, Nancy},
indextitle = {Cast of Character, The},
title = {The Cast of Character},
subtitle = {Style in Greek Literature},
shorttitle = {Cast of Character},
publisher = {University of Texas Press},
location = {Austin},
year = {2002},
annotation = {A \texttt{book} entry. Note the \texttt{sorttitle} and \texttt{indextitle}
fields}
}
@Book{wormanx,
langid = {english},
langidopts = {variant=american},
sorttitle = {Someone did it again},
author = {Worman, Nana},
indextitle = {Someone did it again},
title = {Someone did it again},
publisher = {University of Nowhere Press},
location = {Somewhere},
year = {2009}
}
@Collection{britannica,
options = {useeditor=false},
label = {EB},
langid = {english},
langidopts = {variant=british},
sorttitle = {Encyclop{\ae}dia Britannica},
editor = {Preece, Warren E.},
indextitle = {Encyclop{\ae}dia Britannica, The New},
title = {The New Encyclop{\ae}dia Britannica},
shorttitle = {Encyclop{\ae}dia Britannica},
volumes = {32},
edition = {15},
publisher = {Encyclop{\ae}dia Britannica},
location = {Chicago, Ill.},
year = {2003},
annotation = {This is a \texttt{collection} entry for an encyclopedia. Note the
\texttt{useeditor} option in the \texttt{options} field as well as the
\texttt{sorttitle} field. We want this entry to be cited and alphabetized by
title even though there is an editor. In addition to that, we want the title to
be alphabetized under \enquote*{E} rather than \enquote*{T}. Also note the
\texttt{label} field which is provided for author-year citation styles}
}
@Collection{gaonkar,
langid = {english},
langidopts = {variant=american},
editor = {Gaonkar, Dilip Parameshwar},
title = {Alternative Modernities},
publisher = {Duke University Press},
location = {Durham and London},
year = {2001},
isbn = {0-822-32714-7},
annotation = {This is a \texttt{collection} entry. Note the format of the \texttt{location}
field in the database file as well as the \texttt{isbn} field}
}
@Collection{jaffe,
editor = {Jaff{\'e}, Philipp},
editora = {Loewenfeld, Samuel and Kaltenbrunner, Ferdinand and Ewald, Paul},
editoratype = {redactor},
indextitle = {Regesta Pontificum Romanorum},
title = {Regesta Pontificum Romanorum ab condita ecclesia ad annum post Christum natum
\textsc{mcxcviii}},
shorttitle = {Regesta Pontificum Romanorum},
volumes = {2},
edition = {2},
location = {Leipzig},
date = {1885/1888},
annotation = {A \texttt{collection} entry with \texttt{edition} and \texttt{volumes} fields.
Note the \texttt{editortype} field handling the redactor}
}
@Collection{westfahl:frontier,
langid = {english},
langidopts = {variant=american},
editor = {Westfahl, Gary},
title = {Space and Beyond},
subtitle = {The Frontier Theme in Science Fiction},
booktitle = {Space and Beyond},
booksubtitle = {The Frontier Theme in Science Fiction},
publisher = {Greenwood},
location = {Westport, Conn. and London},
year = {2000},
annotation = {This is a \texttt{collection} entry. Note the format of the \texttt{location}
field as well as the \texttt{subtitle} and \texttt{booksubtitle} fields}
}
@InBook{kant:kpv,
shorthand = {KpV},
langid = {german},
author = {Kant, Immanuel},
bookauthor = {Kant, Immanuel},
title = {Kritik der praktischen Vernunft},
shorttitle = {Kritik der praktischen Vernunft},
booktitle = {Kritik der praktischen Vernunft. Kritik der Urtheilskraft},
maintitle = {Kants Werke. Akademie Textausgabe},
volume = {5},
publisher = {Walter de Gruyter},
location = {Berlin},
year = {1968},
pages = {1--163},
annotation = {An edition of Kant's \emph{Collected Works}, volume five. This is an
\texttt{inbook} entry which explicitly refers to the \emph{Critique of Practical
Reason} only, not to the entire fifth volume. Note the \texttt{author} and
\texttt{bookauthor} fields in the database file. By default, the
\texttt{bookauthor} is omitted if the values of the \texttt{author} and
\texttt{bookauthor} fields are identical}
}
@InBook{kant:ku,
shorthand = {KU},
langid = {german},
author = {Kant, Immanuel},
bookauthor = {Kant, Immanuel},
title = {Kritik der Urtheilskraft},
booktitle = {Kritik der praktischen Vernunft. Kritik der Urtheilskraft},
maintitle = {Kants Werke. Akademie Textausgabe},
volume = {5},
publisher = {Walter de Gruyter},
location = {Berlin},
year = {1968},
pages = {165--485},
annotation = {An edition of Kant's \emph{Collected Works}, volume five. This is an
\texttt{inbook} entry which explicitly refers to the \emph{Critique of Judgment}
only, not to the entire fifth volume}
}
@InBook{nietzsche:historie,
langid = {german},
sortyear = {1988-01-243},
sorttitle = {Werke-01-243},
indexsorttitle = {Vom Nutzen und Nachtheil der Historie fur das Leben},
author = {Nietzsche, Friedrich},
bookauthor = {Nietzsche, Friedrich},
editor = {Colli, Giorgio and Montinari, Mazzino},
indextitle = {Vom Nutzen und Nachtheil der Historie f{\"u}r das Leben},
title = {Unzeitgem{\"a}sse Betrachtungen. Zweites St{\"u}ck},
subtitle = {Vom Nutzen und Nachtheil der Historie f{\"u}r das Leben},
shorttitle = {Vom Nutzen und Nachtheil der Historie},
booktitle = {Die Geburt der Trag{\"o}die. Unzeitgem{\"a}{\ss}e Betrachtungen I--IV.
Nachgelassene Schriften 1870--1973},
maintitle = {S{\"a}mtliche Werke},
mainsubtitle = {Kritische Studienausgabe},
volume = {1},
publisher = dtv # { and Walter de Gruyter},
location = {M{\"u}nchen and Berlin and New York},
year = {1988},
pages = {243--334},
annotation = {A single essay from the critical edition of Nietzsche's works. This
\texttt{inbook} entry explicitly refers to an essay found in the first volume.
Note the \texttt{title}, \texttt{booktitle}, and \texttt{maintitle} fields. Also
note the \texttt{sorttitle} and \texttt{sortyear} fields. We want this entry to
be listed after the entry referring to the entire first volume}
}
@InCollection{brandt,
options = {useprefix=false},
langid = {german},
indexsorttitle = {Nordischen Lander von der Mitte des 11. Jahrhunderts bis 1448},
author = {von Brandt, Ahasver and Erich Hoffmann},
editor = {Ferdinand Seibt},
indextitle = {Nordischen L{\"a}nder von der Mitte des 11.~Jahrhunderts bis 1448, Die},
title = {Die nordischen L{\"a}nder von der Mitte des 11.~Jahrhunderts bis 1448},
shorttitle = {Die nordischen L{\"a}nder},
booktitle = {Europa im Hoch- und Sp{\"a}tmittelalter},
series = {Handbuch der europ{\"a}ischen Geschichte},
number = {2},
publisher = {Klett-Cotta},
location = {Stuttgart},
year = {1987},
pages = {884--917},
annotation = {An \texttt{incollection} entry with a \texttt{series} and a \texttt{number}.
Note the format of the printed name and compare the \texttt{useprefix} option in
the \texttt{options} field as well as \texttt{vangennep}. Also note the
\texttt{indextitle, and \texttt{indexsorttitle} fields}}
}
@InCollection{hyman,
keywords = {secondary},
langid = {english},
langidopts = {variant=american},
author = {Arthur Hyman},
editor = {O'Meara, Dominic J.},
indextitle = {Aristotle's Theory of the Intellect},
title = {Aristotle's Theory of the Intellect and its Interpretation by Averroes},
shorttitle = {Aristotle's Theory of the Intellect},
booktitle = {Studies in Aristotle},
series = {Studies in Philosophy and the History of Philosophy},
number = {9},
publisher = {The Catholic University of America Press},
location = {Washington, D.C.},
year = {1981},
pages = {161--191},
annotation = {An \texttt{incollection} entry with a \texttt{series} and \texttt{number} field}
}
@InCollection{pines,
keywords = {secondary},
langid = {english},
langidopts = {variant=american},
author = {Pines, Shlomo},
editor = {Twersky, Isadore},
indextitle = {Limitations of Human Knowledge According to Al-Farabi, ibn Bajja, and
Maimonides, The},
title = {The Limitations of Human Knowledge According to Al-Farabi, ibn Bajja, and
Maimonides},
shorttitle = {Limitations of Human Knowledge},
booktitle = {Studies in Medieval Jewish History and Literature},
publisher = hup,
location = {Cambridge, Mass.},
year = {1979},
pages = {82--109},
annotation = {A typical \texttt{incollection} entry. Note the \texttt{indextitle} field}
}
@InProceedings{moraux,
keywords = {secondary},
langid = {french},
indexsorttitle = {De Anima dans la tradition grecque},
author = {Moraux, Paul},
editor = {Lloyd, G. E. R. and Owen, G. E. L.},
indextitle = {\emph{De Anima} dans la tradition gr{\`e}cque, Le},
title = {Le \emph{De Anima} dans la tradition gr{\`e}cque},
subtitle = {Quelques aspects de l'interpretation du trait{\'e}, de Theophraste {\`a}
Themistius},
shorttitle = {\emph{De Anima} dans la tradition gr{\`e}cque},
booktitle = {Aristotle on Mind and the Senses},
booktitleaddon = {Proceedings of the Seventh Symposium Aristotelicum (1975)},
publisher = cup,
location = {Cambridge},
date = {1979-01-02/1980-04-08},
origdate = {1924-06-07/1924-07-09},
eventdate = {1924-02-03/1924-02-05},
urldate = {1979-03-03/1979-03-04},
url = {http://some/thing},
pages = {281--324},
annotation = {This is a typical \texttt{inproceedings} entry. Note the \texttt{booksubtitle},
\texttt{shorttitle}, \texttt{indextitle}, and \texttt{indexsorttitle} fields}
}
@InProceedings{stdmodel:salam,
entryset = {stdmodel},
author = {Salam, Abdus},
editor = {Svartholm, Nils},
title = {Weak and Electromagnetic Interactions},
booktitle = {Elementary particle theory},
booksubtitle = {Relativistic groups and analyticity},
booktitleaddon = {Proceedings of the Eighth Nobel Symposium, May 19--25, 1968},
venue = {Aspen{\"a}sgarden, Lerum},
publisher = {Almquist \& Wiksell},
location = {Stockholm},
year = {1968},
pages = {367\psqq}
}
@Manual{cms,
label = {CMS},
langid = {english},
langidopts = {variant=american},
sorttitle = {Chicago Manual of Style},
indextitle = {Chicago Manual of Style, The},
title = {The Chicago Manual of Style},
subtitle = {The Essential Guide for Writers, Editors, and Publishers},
shorttitle = {Chicago Manual of Style},
edition = {15},
publisher = {University of Chicago Press},
location = {Chicago, Ill.},
year = {2003},
isbn = {0-226-10403-6},
annotation = {This is a \texttt{manual} entry without an \texttt{author} or \texttt{editor}.
Note the \texttt{label} field in the database file which is provided for
author-year citation styles. Also note the \texttt{sorttitle} and
\texttt{indextitle} fields. By default, all entries without an \texttt{author}
or \texttt{editor} are alphabetized by \texttt{title} but we want this entry to
be alphabetized under \enquote*{C} rather than \enquote*{T}. There's also an
\texttt{isbn} field}
}
@Online{baez/online,
langid = {english},
langidopts = {variant=american},
author = {Baez, John C. and Lauda, Aaron D.},
title = {Higher-Dimensional Algebra V: 2-Groups},
version = {3},
date = {2004-10-27},
eprint = {math/0307200v3},
eprinttype = {arxiv},
annotation = {An \texttt{online} reference from arXiv. Note the \texttt{eprint} and
\texttt{eprinttype} fields. Also note that the arXiv reference is transformed
into a clickable link if \texttt{hyperref} support has been enabled. Compare
\texttt{baez\slash article} which is the same item given as an \texttt{article}
entry with eprint information}
}
@Online{ctan,
label = {CTAN},
langid = {english},
langidopts = {variant=american},
title = {CTAN},
subtitle = {The Comprehensive TeX Archive Network},
year = {2006},
url = {http://www.ctan.org},
urldate = {2006-10-01},
annotation = {This is an \texttt{online} entry. The \textsc{url}, which is given in the
\texttt{url} field, is transformed into a clickable link if \texttt{hyperref}
support has been enabled. Note the format of the \texttt{urldate} field
(\texttt{yyyy-mm-dd}) in the database file. It is also possible to use the
fields \texttt{urlday}\slash \texttt{urlmonth}\slash \texttt{urlyear} instead.
Also note the \texttt{label} field which may be used as a fallback by citation
styles which need an \texttt{author} and\slash or a \texttt{year}}
}
@Online{itzhaki,
langid = {english},
langidopts = {variant=american},
author = {Itzhaki, Nissan},
title = {Some remarks on 't Hooft's S-matrix for black holes},
version = {1},
date = {1996-03-11},
eprint = {hep-th/9603067},
eprinttype = {arxiv},
annotation = {An \texttt{online} reference from arXiv. Note the \texttt{eprint} and
\texttt{eprinttype} fields. Also note that the arXiv reference is transformed
into a clickable link if \texttt{hyperref} support has been enabled},
abstract = {We discuss the limitations of 't Hooft's proposal for the black hole S-matrix.
We find that the validity of the S-matrix implies violation of the
semi-classical approximation at scales large compared to the Planck scale. We
also show that the effect of the centrifugal barrier on the S-matrix is crucial
even for large transverse distances.}
}
@Online{markey,
langid = {english},
langidopts = {variant=american},
sorttitle = {Tame the Beast},
author = {Markey, Nicolas},
title = {Tame the BeaST},
subtitle = {The B to X of BibTeX},
version = {1.3},
url = {http://tug.ctan.org/tex-archive/info/bibtex/tamethebeast/ttb_en.pdf},
urldate = {2006-10-01},
annotation = {An \texttt{online} entry for a tutorial. Note the format of the \texttt{date}
field (\texttt{yyyy-mm-dd}) in the database file. It is also possible to use the
fields \texttt{day}\slash \texttt{month}\slash \texttt{year} instead.}
}
@Patent{almendro,
langid = {german},
author = {Almendro, Jos{\'e} L. and Mart{\'i}n, Jacinto and S{\'a}nchez, Alberto and
Nozal, Fernando},
title = {Elektromagnetisches Signalhorn},
number = {EU-29702195U},
location = {countryes and countryfr and countryuk and countryde},
year = {1998},
annotation = {This is a \texttt{patent} entry with a \texttt{location} field. The number is
given in the \texttt{number} field. Note the format of the \texttt{location}
field in the database file. Compare \texttt{laufenberg}, \texttt{sorace}, and
\texttt{kowalik}}
}
@Patent{kowalik,
langid = {french},
author = {Kowalik, F. and Isard, M.},
indextitle = {Estimateur d'un d{\'e}faut de fonctionnement},
title = {Estimateur d'un d{\'e}faut de fonctionnement d'un modulateur en quadrature et
{\'e}tage de modulation l'utilisant},
type = {patreqfr},
number = {9500261},
date = {1995-01-11},
annotation = {This is a \texttt{patent} entry for a French patent request with a full date.
The number is given in the \texttt{number} field. Note the format of the
\texttt{type} and \texttt{date} fields in the database file. Compare
\texttt{almendro}, \texttt{laufenberg}, and \texttt{sorace}}
}
@Patent{laufenberg,
langid = {german},
author = {Laufenberg, Xaver and Eynius, Dominique and Suelzle, Helmut and Usbeck, Stephan
and Spaeth, Matthias and Neuser-Hoffmann, Miriam and Myrzik, Christian and
Schmid, Manfred and Nietfeld, Franz and Thiel, Alexander and Braun, Harald and
Ebner, Norbert},
holder = {{Robert Bosch GmbH} and {Daimler Chrysler AG} and {Bayerische Motoren Werke
AG}},
title = {Elektrische Einrichtung und Betriebsverfahren},
type = {patenteu},
number = {1700367},
location = {countryde and countrywo},
date = {2006-09-13},
annotation = {This is a \texttt{patent} entry with a \texttt{holder} field. Note the format
of the \texttt{type} and \texttt{location} fields in the database file. Compare
\texttt{almendro}, \texttt{sorace}, and \texttt{kowalik}},
abstract = {The invention relates to an electric device comprising a generator, in
particular for use in the vehicle electric system of a motor vehicle and a
controller for controlling the generator voltage. The device is equipped with a
control zone, in which the voltage is controlled and zones, in which the torque
is controlled. The invention also relates to methods for operating a device of
this type.},
file = {http://v3.espacenet.com/textdoc?IDX=EP1700367}
}
@Patent{sorace,
langid = {english},
langidopts = {variant=american},
author = {Sorace, Ronald E. and Reinhardt, Victor S. and Vaughn, Steven A.},
holder = {{Hughes Aircraft Company}},
title = {High-Speed Digital-to-RF Converter},
type = {patentus},
number = {5668842},
date = {1997-09-16},
annotation = {This is a \texttt{patent} entry with a \texttt{holder} field. Note the format
of the \texttt{type} and \texttt{date} fields in the database file. Compare
\texttt{almendro}, \texttt{laufenberg}, and \texttt{kowalik}}
}
@Report{chiu,
langid = {english},
langidopts = {variant=american},
sorttitle = {Hybrid Hierarchical Model of a Multiple Virtual Storage (MVS) Operating
System},
author = {Chiu, Willy W. and Chow, We Min},
indextitle = {Hybrid Hierarchical Model, A},
title = {A Hybrid Hierarchical Model of a Multiple Virtual Storage (MVS) Operating
System},
institution = {IBM and HP and Sun and Sony},
type = {resreport},
number = {RC-6947},
year = {1978},
annotation = {This is a \texttt{report} entry for a research report. Note the format of the
\texttt{type} field in the database file which uses a localization key. The
number of the report is given in the \texttt{number} field. Also note the
\texttt{sorttitle} and \texttt{indextitle} fields}
}
@Report{padhye,
langid = {english},
langidopts = {variant=american},
sorttitle = {A Stochastic Model of TCP Reno Congestion Avoidance and Control},
author = {Padhye, Jitendra and Firoiu, Victor and Towsley, Don},
indextitle = {Stochastic Model of TCP Reno Congestion Avoidance and Control, A},
title = {A Stochastic Model of TCP Reno Congestion Avoidance and Control},
institution = {University of Massachusetts},
type = {techreport},
number = {99-02},
location = {Amherst, Mass.},
year = {1999},
annotation = {This is a \texttt{report} entry for a technical report. Note the format of the
\texttt{type} field in the database file which uses a localization key. The
number of the report is given in the \texttt{number} field. Also note the
\texttt{sorttitle} and \texttt{indextitle} fields},
abstract = {The steady state performance of a bulk transfer TCP flow (i.e. a flow with a
large amount of data to send, such as FTP transfers) may be characterized by
three quantities. The first is the send rate, which is the amount of data sent
by the sender in unit time. The second is the throughput, which is the amount of
data received by the receiver in unit time. Note that the throughput will always
be less than or equal to the send rate due to losses. Finally, the number of
non-duplicate packets received by the receiver in unit time gives us the goodput
of the connection. The goodput is always less than or equal to the throughput,
since the receiver may receive two copies of the same packet due to
retransmissions by the sender. In a previous paper, we presented a simple model
for predicting the steady state send rate of a bulk transfer TCP flow as a
function of loss rate and round trip time. In this paper, we extend that work in
two ways. First, we analyze the performance of bulk transfer TCP flows using
more precise, stochastic analysis. Second, we build upon the previous analysis
to provide both an approximate formula as well as a more accurate stochastic
model for the steady state throughput of a bulk transfer TCP flow.},
file = {ftp://gaia.cs.umass.edu/pub/Padhey99-markov.ps}
}
@Thesis{geer,
options = {useprefix=false},
langid = {english},
langidopts = {variant=british},
author = {de Geer, Ingrid},
title = {Earl, Saint, Bishop, Skald~-- and Music},
subtitle = {The Orkney Earldom of the Twelfth Century. A Musicological Study},
institution = {Uppsala Universitet},
type = {phdthesis},
location = {Uppsala},
year = {1985},
annotation = {This is a typical \texttt{thesis} entry for a PhD thesis. Note the
\texttt{type} field in the database file which uses a localization key. Also
note the format of the printed name and compare the \texttt{useprefix} option in
the \texttt{options} field as well as \texttt{vangennep}}
}
@Thesis{loh,
langid = {english},
langidopts = {variant=american},
author = {Loh, Nin C.},
title = {High-Resolution Micromachined Interferometric Accelerometer},
institution = {Massachusetts Institute of Technology},
type = {mathesis},
location = {Cambridge, Mass.},
year = {1992},
annotation = {This is a typical \texttt{thesis} entry for an MA thesis. Note the
\texttt{type} field in the database file which uses a localization key}
}
@Thesis{Pimentel00,
author = {Pimentel, Jr.,Joseph J.},
title = {Sociolinguistic Reflections of Privatization and Globalization: The {Arabic} of {Egyptian} newspaper advertisements},
school = {University of Michigan},
year = {2000},
}
@Book{luzzatto,
author = {Luzzatto, Moshe Ḥayyim},
title = {ha-Lashon la-Ramḥal: u-vo sheloshah ḥiburim},
location = {Yerushalayim},
publisher = {Makhon Ramḥal},
year = {2000}
}
% Without braces round this prefixed lastname, T::B::N fails to get
% the lastname on Windows for some reason
@BOOK{hasan,
author = {{al-Hasan}, ʿAlī},
editor = {{al-Hasan}, ʿAlī},
translator = {{al-Hasan}, ʿAlī},
title = {Some title},
publisher = {Some press},
year = {2000},
}
% This also tests auto-escaping of some LaTeX specials chars like "%"
@MISC{t1,
KEYWORDS = { primary, something,somethingelse},
AUTHOR = {Bill Brown},
TITLE = {10\% of [100] and 90% of $Normal_2$ | \& # things {$^{3}$}},
YEAR = 1992,
PAGES = {100--}
}
% SORTNAME should not be output
@MISC{t2,
AUTHOR = {Bill Brown},
SORTNAME = {Bill Brown},
TITLE = {Signs of W$\frac{o}{a}$nder},
YEAR = {1994},
PAGES = {100--108}
}
@MISC{tvonb,
AUTHOR = {Terrence von Bobble},
TITLE = {Things},
YEAR = {1997}
}
% fullhash and namehash should be different for these two
@UNPUBLISHED{anon1,
AUTHOR = {AnonymousX},
TITLE = {Title1},
YEAR = {1835},
SHORTAUTHOR = {XAnony},
SHORTTITLE = {Shorttitle},
KEYWORDS = {arc},
PAGES = {111--118},
LANGID = {english},
LANGIDOPTS = {variant=american},
}
@UNPUBLISHED{anon2,
AUTHOR = {AnonymousY},
TITLE = {Title2},
YEAR = {1839},
PAGES = {1176--1276},
SHORTAUTHOR = {YAnony},
SHORTTITLE = {Shorttitle},
KEYWORDS = {arc},
LANGID = {en},
LANGIDOPTS = {variant=american},
}
% Testing uniquelist
@MISC{u1,
AUTHOR = {AAA and BBB and CCC and DDD},
TITLE = {A title},
DATE = {2000}
}
@MISC{u2,
AUTHOR = {AAA and BBB and CCC and DDD and EEE},
TITLE = {A title},
DATE = {2003}
}
% Testing ignore
@UNPUBLISHED{i1,
ABSTRACT = {Some abstract %50 of which is useless},
AUTHOR = {AAA and BBB and CCC and DDD and EEE},
TITLE = {A title},
DATE = {2003},
USERB = {test},
LISTA = {list test},
LISTB = {late and early},
LISTC = {late and early},
LISTD = {æøå},
}
@MISC{i2,
USERB = {Some text & some bad chars},
USERF = {Something to lose in an alsoset},
AUTHOR = {AAA and BBB and CCC},
TITLE = {A title},
DATE = {2005}
}
% Testing per_type and per_entry max/min*names
@MISC{tmn1,
AUTHOR = {AAA and BBB and CCC},
TITLE = {A title},
INSTITUTION = {A and B and C},
DATE = {2005}
}
@BOOK{tmn2,
OPTIONS = {maxalphanames = 2},
AUTHOR = {AAA and BBB and CCC},
TITLE = {A title},
DATE = {2005}
}
@UNPUBLISHED{tmn3,
OPTIONS = {minitems=2},
AUTHOR = {AAA and BBB and CCC},
INSTITUTION = {A and B and C},
TITLE = {A title},
DATE = {2005}
}
@UNPUBLISHED{tmn4,
OPTIONS = {maxbibnames=4},
AUTHOR = {AAA and BBB and CCC},
TITLE = {A title},
DATE = {2005}
}
% Test of duplicate skipping
@UNPUBLISHED{tmn4,
AUTHOR = {ZZZ}
}
% Testing exotic labelnames
@MISC{lne1,
AUTHOR = {AAA and BBB and CCC},
NAMEA = {ZZZ},
TITLE = {A title},
DATE = {2005}
}
% Testing citekey aliases
@MISC{alias1,
AUTHOR = {Alan Alias},
DATE = {2005},
IDS = {alias2, alias3}
}
@MISC{alias2,
AUTHOR = {Alan Alias},
DATE = {2005},
IDS = {alias4}
}
@MISC{alias5,
AUTHOR = {Alan Alias},
DATE = {2005},
IDS = {alias6}
}
% Testing url encoding
@MISC{url1,
AUTHOR = {Alan Alias},
DATE = {2005},
URL = {http://www.something.com/q=á\'{e}\'aŠ},
URLS = {http://www.something.com/q=á\'{e}\'aŠ and http://www.sun.com}
}
@MISC{url2,
AUTHOR = {Alan Alias},
DATE = {2005},
URL = {http://www.something.com/q=one\%20two}
}
@ONLINE{ol1,
AUTHOR = {Oliver Online},
TITLE = {Online1},
NOTE = {A note}
}
@MISC{pages1,
PAGES = {23--24}
}
@MISC{pages2,
PAGES = {23}
}
@MISC{pages3,
PAGES = {{I-II}---{III-IV}}
}
@MISC{pages4,
PAGES = { 3 -- 5 },
}
@MISC{pages5,
PAGES = {42--},
}
@MISC{pages6,
PAGES = {\bibstring{number} 42},
}
@MISC{pages7,
PAGES = {\bibstring{number} 42, 3 --- 6, {I-II}--5},
}
@MISC{pages8,
PAGES = {10-15, ⅥⅠ-ⅻ},
}
@MISC{pages9,
PAGES = {M-1--M-4},
}
% Testing user->style maps
@CONVERSATION{us1,
AUTHOR = {Test},
}
@BOOK{labelstest,
AUTHOR = {AAA and BBB and CCC},
TITLE = {A title},
DATE = {2005-03-02}
}
@BOOK{list1,
LOCATION = {AAA and BBB and others}
}
@BOOK{sn1,
SORTNAME = {ZZZ},
AUTHOR = {AAA}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/book.bib 0000664 0000000 0000000 00000000313 14731101544 0023702 0 ustar 00root root 0000000 0000000 @BOOK{Bird1987,
title = {Dynamics of Polymeric Liquid},
publisher = {Wiley Edition},
year = {1987},
author = {Bird, R.B. and Armstrong, R.C. and Hassager, O.},
volume = {1},
edition = {2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/book_bom.bib 0000664 0000000 0000000 00000000316 14731101544 0024542 0 ustar 00root root 0000000 0000000 @BOOK{Bird1987,
title = {Dynamics of Polymeric Liquid},
publisher = {Wiley Edition},
year = {1987},
author = {Bird, R.B. and Armstrong, R.C. and Hassager, O.},
volume = {1},
edition = {2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/book_capital_AND.bib 0000664 0000000 0000000 00000000313 14731101544 0026061 0 ustar 00root root 0000000 0000000 @BOOK{Bird1987,
title = {Dynamics of Polymeric Liquid},
publisher = {Wiley Edition},
year = {1987},
author = {Bird, R.B. and Armstrong, R.C. AND Hassager, O.},
volume = {1},
edition = {2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/book_comma_first.bib 0000664 0000000 0000000 00000000326 14731101544 0026271 0 ustar 00root root 0000000 0000000 @book{Bird1987
, author = {Bird, R.B. and Armstrong, R.C. and Hassager, O.}
, edition = {2}
, publisher = {Wiley Edition}
, title = {Dynamics of Polymeric Liquid}
, volume = {1}
, year = {1987}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/book_output.bib 0000664 0000000 0000000 00000000304 14731101544 0025322 0 ustar 00root root 0000000 0000000 @book{Bird1987,
author = {Bird, R.B. and Armstrong, R.C. and Hassager, O.},
edition = {2},
publisher = {Wiley Edition},
title = {Dynamics of Polymeric Liquid},
volume = {1},
year = {1987}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/comments_only.bib 0000664 0000000 0000000 00000000140 14731101544 0025634 0 ustar 00root root 0000000 0000000 @comment{ignore this line!}
@Comment{ignore this line too!}
@COMMENT{and ignore this line too!}
python-bibtexparser-1.4.3/bibtexparser/tests/data/comments_only_output.bib 0000664 0000000 0000000 00000000143 14731101544 0027257 0 ustar 00root root 0000000 0000000 @comment{ignore this line!}
@comment{ignore this line too!}
@comment{and ignore this line too!}
python-bibtexparser-1.4.3/bibtexparser/tests/data/comments_percentage.bib 0000664 0000000 0000000 00000000637 14731101544 0027003 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean Cesar},
title = {An amazing title},
year = {2013},
volume = {12},
journal = {Nice Journal},
comments = {A comment},
keyword = {keyword1, keyword2},
}
% comment.
@ARTICLE{Baltazar2013,
author = {Jean Baltazar},
title = {An amazing title},
year = {2013},
volume = {12},
journal = {Nice Journal},
comments = {A comment},
keyword = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/comments_percentage_nolastcoma.bib 0000664 0000000 0000000 00000000635 14731101544 0031221 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar2013,
author = {Jean Cesar},
title = {An amazing title},
year = {2013},
volume = {12},
journal = {Nice Journal},
comments = {A comment},
keyword = {keyword1, keyword2}
}
% comment.
@ARTICLE{Baltazar2013,
author = {Jean Baltazar},
title = {An amazing title},
year = {2013},
volume = {12},
journal = {Nice Journal},
comments = {A comment},
keyword = {keyword1, keyword2}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/comments_spaces_and_declarations.bib 0000664 0000000 0000000 00000000716 14731101544 0031514 0 ustar 00root root 0000000 0000000 % a comment
@preamble{ "Blah blah" }
Another comment
@string{title = {A great title} }
and one more comment
@ARTICLE{Cesar2013,
author = {Jean César},
title = title,
year = {2013},
month = "jan",
volume = {12},
pages = {12-23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/common_strings.bib 0000664 0000000 0000000 00000000456 14731101544 0026021 0 ustar 00root root 0000000 0000000 @string{jan = {January}}
@string{feb = {February}}
@string{mar = {March}}
@string{apr = {April}}
@string{may = {May}}
@string{jun = {June}}
@string{jul = {July}}
@string{aug = {August}}
@string{sep = {September}}
@string{oct = {October}}
@string{nov = {November}}
@string{dec = {December}}
python-bibtexparser-1.4.3/bibtexparser/tests/data/crossref_cascading.bib 0000664 0000000 0000000 00000000534 14731101544 0026577 0 ustar 00root root 0000000 0000000 % From biber test data : t/tdata/crossrefs.bib
% Kept initial comment but not for our purpose
% Test of dependency calculations for non-cited entries
@BOOK{r1,
DATE = {1911},
CROSSREF = {r2}
}
@BOOK{r2,
DATE = {1911},
CROSSREF = {r3}
}
@BOOK{r3,
DATE = {1911},
CROSSREF = {r4}
}
@BOOK{r4,
DATE = {1911},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/crossref_cascading_aliases.bib 0000664 0000000 0000000 00000000773 14731101544 0030305 0 ustar 00root root 0000000 0000000 % From biber test data : t/tdata/crossrefs.bib
% Kept initial comment but not for our purpose
% Testing cascading crossrefs
@MVBOOK{ccr1,
IDS = {ccr1alias},
AUTHOR = {Vince Various},
EDITOR = {Edward Editor},
TITLE = {Stuff Concerning Varia},
DATE = {1934}
}
% using alias
@BOOK{ccr2,
TITLE = {Misc etc.},
DATE = {1923},
CROSSREF = {ccr1alias}
}
@INBOOK{ccr3,
TITLE = {Perhaps, Perchance, Possibilities?},
DATE = {1911},
CROSSREF = {ccr2}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/crossref_cascading_cycle.bib 0000664 0000000 0000000 00000000371 14731101544 0027755 0 ustar 00root root 0000000 0000000 % From biber test data : t/tdata/crossrefs.bib
% Kept initial comment but not for our purpose
% Testing circular refs detection
@BOOK{circ1,
DATE = {1911},
CROSSREF = {circ2}
}
@BOOK{circ2,
DATE = {1911},
CROSSREF = {circ1}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/crossref_entries.bib 0000664 0000000 0000000 00000006175 14731101544 0026343 0 ustar 00root root 0000000 0000000 % From biber test data : t/tdata/crossrefs.bib
% Kept initial comment but not for our purpose
% Testing mincrossrefs. cr1 and cr2 crossrefs should trigger inclusion of cr_m and also
% the crossref fields in both of them
% Also a test of some aliases
@INBOOK{cr1,
AUTHOR = {Graham Gullam},
TITLE = {Great and Good Graphs},
ORIGDATE = {1955},
ARCHIVEPREFIX = {SomEPrFiX},
PRIMARYCLASS = {SOMECLASS},
CROSSREF = {cr_m}
}
@INBOOK{cr2,
AUTHOR = {Frederick Fumble},
TITLE = {Fabulous Fourier Forms},
SCHOOL = {School},
INSTITUTION = {Institution},
ORIGDATE = {1943},
CROSSREF = {cr_m}
}
@BOOK{cr_m,
EDITOR = {Edgar Erbriss},
TITLE = {Graphs of the Continent},
PUBLISHER = {Grimble},
YEAR = {1974}
}
% Testing explicit cite of crossref parent. Should trigger inclusion of child crossref field
@INBOOK{cr3,
AUTHOR = {Arthur Aptitude},
TITLE = {Arrangements of All Articles},
ORIGDATE = {1934},
ARCHIVEPREFIX = {SomEPrFiX},
EPRINTTYPE = {sometype},
CROSSREF = {crt}
}
@BOOK{crt,
EDITOR = {Mark Monkley},
TITLE = {Beasts of the Burbling Burns},
PUBLISHER = {Rancour},
YEAR = {1996}
}
% Testing mincrossrefs not reached. cr4 is cited, cr5 isn't, therefore mincrossrefs (2) for
% crn not reached
@INBOOK{cr4,
AUTHOR = {Morris Mumble},
TITLE = {Enterprising Entities},
ORIGDATE = {1911},
CROSSREF = {crn}
}
@INBOOK{cr5,
AUTHOR = {Oliver Ordinary},
TITLE = {Questionable Quidities},
ORIGDATE = {1919},
CROSSREF = {crn}
}
@BOOK{crn,
EDITOR = {Jeremy Jermain},
TITLE = {Vanquished, Victor, Vandal},
PUBLISHER = {Pillsbury},
YEAR = {1945}
}
% Testing inheritance of event information
@PROCEEDINGS{cr6i,
AUTHOR = {Spurious Author},
ADDRESS = {Address},
TITLE = {Title of proceeding},
EDITOR = {Editor},
PUBLISHER = {Publisher of proceeding},
EVENTDATE = {2009-08-21/2009-08-24},
EVENTTITLE = {Title of the event},
VENUE = {Location of event},
YEAR = {2009}
}
@INPROCEEDINGS{cr6,
AUTHOR = {Author, Firstname},
CROSSREF = {cr6i},
PAGES = {123--},
TITLE = {Title of inproceeding},
BOOKTITLE = {Manual booktitle},
YEAR = {2009},
}
% Testing inheritance of special fields (booktitle, bookauthor etc.)
@BOOK{cr7i,
AUTHOR = {Brian Bookauthor},
TITLE = {Book Title},
SUBTITLE = {Book Subtitle},
TITLEADDON = {Book Titleaddon},
PUBLISHER = {Publisher of proceeding},
YEAR = {2009},
VERBA = {String},
}
@INBOOK{cr7,
AUTHOR = {Author, Firstname},
CROSSREF = {cr7i},
PAGES = {123--126},
TITLE = {Title of Book bit},
YEAR = {2010}
}
% Testing supression of default inheritance
@COLLECTION{cr8i,
EDITOR = {Brian Editor},
TITLE = {Book Title},
SUBTITLE = {Book Subtitle},
TITLEADDON = {Book Titleaddon},
PUBLISHER = {Publisher of Collection},
YEAR = {2009}
}
@INCOLLECTION{cr8,
AUTHOR = {Smith, Firstname},
CROSSREF = {cr8i},
PAGES = {1--12},
TITLE = {Title of Collection bit},
YEAR = {2010}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/crossref_missing_entries.bib 0000664 0000000 0000000 00000000234 14731101544 0030062 0 ustar 00root root 0000000 0000000 % Testing missing crossref
@INBOOK{mcr,
AUTHOR = {Megan Mistrel},
TITLE = {Lumbering Lunatics},
ORIGDATE = {1933},
CROSSREF = {missing1}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/empty.bib 0000664 0000000 0000000 00000000000 14731101544 0024077 0 ustar 00root root 0000000 0000000 python-bibtexparser-1.4.3/bibtexparser/tests/data/encoding.bib 0000664 0000000 0000000 00000000556 14731101544 0024547 0 ustar 00root root 0000000 0000000 @ARTICLE{Cesar_2013,
author = {Jean César},
title = {An amazing title: à},
year = {2013},
month = "jan",
volume = {12},
pages = {12-23},
journal = {Elémentaire},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keywords = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/features.bib 0000664 0000000 0000000 00000000645 14731101544 0024576 0 ustar 00root root 0000000 0000000 @comment{ignore this line!}
@Comment{ignore this line too!}
@COMMENT{and ignore this line too!}
@preamble{ "\makeatletter" }
@preamble{ "\@ifundefined{url}{\def\url#1{\texttt{#1}}}{}" }
@preamble{ "\makeatother" }
@string{mystring = "Hello"}
@string{myconf = "My International Conference"}
@string{myname = "Doe"}
@inproceedings{mykey,
author = "John",
title = {Cool Stuff},
booktitle = myconf,
year = 2014,
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/features2.bib 0000664 0000000 0000000 00000001101 14731101544 0024644 0 ustar 00root root 0000000 0000000 @string{CoOl = "Cool"}
@string{stuff = "Stuff"}
@string{myTitle = cool # " " # stuff}
@string{int = "International"}
@string{myconf = "My "#int#" Conference"}
@string{myname = "Doe"}
@String {firstname = "John"}
@String {lastname = myname}
@String {domain = "example"}
@String {tld = "com"}
@String {foo = "1--10"}
@String {BaR = FOO}
@String {pages = baR}
@inproceedings{mykey,
author = "John " # mynamE,
title = mytitle,
booktitle = myconf,
pages = pages,
year = 2014,
note = "Email: " # firstname # "." # lastname #
"@" # domain # "." # tld,
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/features_output.bib 0000664 0000000 0000000 00000000700 14731101544 0026206 0 ustar 00root root 0000000 0000000 @comment{ignore this line!}
@comment{ignore this line too!}
@comment{and ignore this line too!}
@preamble{ "\makeatletter" }
@preamble{ "\@ifundefined{url}{\def\url#1{\texttt{#1}}}{}" }
@preamble{ "\makeatother" }
@string{mystring = "Hello"}
@string{myconf = "My International Conference"}
@string{myname = "Doe"}
@inproceedings{mykey,
author = {John},
booktitle = {My International Conference},
title = {Cool Stuff},
year = {2014}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/fieldname.bib 0000664 0000000 0000000 00000000051 14731101544 0024673 0 ustar 00root root 0000000 0000000 @BOOK{Bird1987,
Dc.Date = {2004-01},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/multiline_comments.bib 0000664 0000000 0000000 00000000767 14731101544 0026674 0 ustar 00root root 0000000 0000000 @comment{Lorem ipsum dolor sit amet,
consectetur adipisicing elit}
@comment{
Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident.
,
}
@comment{
Sunt in culpa qui officia deserunt mollit anim id est laborum.
}
@comment{}
python-bibtexparser-1.4.3/bibtexparser/tests/data/multiple_entries.bib 0000664 0000000 0000000 00000001436 14731101544 0026343 0 ustar 00root root 0000000 0000000 @Book{Yablon2005,
Title = {Optical fiber fusion slicing},
Author = {Yablon, A.D.},
Publisher = {Springer},
Year = {2005},
}
@Article{Wigner1938,
Title = {The transition state method},
Author = {Wigner, E.},
Journal = {Trans. Faraday Soc.},
Year = {1938},
Pages = {29--41},
Volume = {34},
Doi = {10.1039/TF9383400029},
ISSN = {0014-7672},
Owner = {fr},
Publisher = {The Royal Society of Chemistry},
}
@Book{Toto3000,
Title = {A title},
Author = {Toto, A and Titi, B},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/multiple_entries_and_comments.bib 0000664 0000000 0000000 00000001500 14731101544 0031062 0 ustar 00root root 0000000 0000000 @Book{Yablon2005,
Title = {Optical fiber fusion slicing},
Author = {Yablon, A.D.},
Publisher = {Springer},
Year = {2005},
}
@Article{Wigner1938,
Title = {The transition state method},
Author = {Wigner, E.},
Journal = {Trans. Faraday Soc.},
Year = {1938},
Pages = {29--41},
Volume = {34},
Doi = {10.1039/TF9383400029},
ISSN = {0014-7672},
Owner = {fr},
Publisher = {The Royal Society of Chemistry},
}
@Book{Toto3000,
Title = {A title},
Author = {Toto, A and Titi, B},
}
@Comment{}
@Comment{A comment}
python-bibtexparser-1.4.3/bibtexparser/tests/data/multiple_entries_and_comments_output.bib 0000664 0000000 0000000 00000001005 14731101544 0032502 0 ustar 00root root 0000000 0000000 @comment{}
@comment{A comment}
@book{Toto3000,
author = {Toto, A and Titi, B},
title = {A title}
}
@article{Wigner1938,
author = {Wigner, E.},
doi = {10.1039/TF9383400029},
issn = {0014-7672},
journal = {Trans. Faraday Soc.},
owner = {fr},
pages = {29--41},
publisher = {The Royal Society of Chemistry},
title = {The transition state method},
volume = {34},
year = {1938}
}
@book{Yablon2005,
author = {Yablon, A.D.},
publisher = {Springer},
title = {Optical fiber fusion slicing},
year = {2005}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/multiple_entries_output.bib 0000664 0000000 0000000 00000000744 14731101544 0027764 0 ustar 00root root 0000000 0000000 @book{Toto3000,
author = {Toto, A and Titi, B},
title = {A title}
}
@article{Wigner1938,
author = {Wigner, E.},
doi = {10.1039/TF9383400029},
issn = {0014-7672},
journal = {Trans. Faraday Soc.},
owner = {fr},
pages = {29--41},
publisher = {The Royal Society of Chemistry},
title = {The transition state method},
volume = {34},
year = {1938}
}
@book{Yablon2005,
author = {Yablon, A.D.},
publisher = {Springer},
title = {Optical fiber fusion slicing},
year = {2005}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/string.bib 0000664 0000000 0000000 00000000451 14731101544 0024261 0 ustar 00root root 0000000 0000000 @STRING{oakland = {Proceedings of the {IEEE} Symposium on Security and Privacy}}
@INPROCEEDINGS{cha:oakland15,
author = {Sang Kil Cha and Maverick Woo and David Brumley},
title = {{Program-Adaptive Mutational Fuzzing}},
booktitle = oakland,
year = {2015},
pages = {725--741}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/traps.bib 0000664 0000000 0000000 00000000601 14731101544 0024101 0 ustar 00root root 0000000 0000000 @ARTICLE{Laide2013,
author = {Jean Laid{\'e},
Ben Loaeb},
title = {{An} amazing {title}},
year = {2013},
month = "jan",
volume = {n.s.~2},
pages = {12-23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keywords = {keyword1, keyword2},
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/website.bib 0000664 0000000 0000000 00000000157 14731101544 0024420 0 ustar 00root root 0000000 0000000 @misc{feder2006,
title = {BibTeX},
author = {Alexander Feder},
link = {http://bibtex.org},
year = {2006}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/wrong.bib 0000664 0000000 0000000 00000000115 14731101544 0024104 0 ustar 00root root 0000000 0000000
@wrong{foo,
author = {wrong}
}
@article{bar,
author = {correct}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/xref_entries.bib 0000664 0000000 0000000 00000002711 14731101544 0025451 0 ustar 00root root 0000000 0000000 % From biber test data : t/tdata/crossrefs.bib
% Kept initial comment but not for our purpose
% Testing mincrossrefs. xr1 and xr2 xrefs should trigger inclusion of xrm and also
% the xreffields in both of them
@INBOOK{xr1,
AUTHOR = {Zoe Zentrum},
TITLE = {Moods Mildly Modified},
ORIGDATE = {1921},
XREF = {xrm}
}
@INBOOK{xr2,
AUTHOR = {Ian Instant},
TITLE = {Migraines Multiplying Madly},
ORIGDATE = {1926},
XREF = {xrm}
}
@BOOK{xrm,
EDITOR = {Peter Prendergast},
TITLE = {Calligraphy, Calisthenics, Culture},
PUBLISHER = {Mainstream},
YEAR = {1970}
}
% Testing explicit cite of xref parent. Should trigger inclusion of child xref field
@INBOOK{xr3,
AUTHOR = {Norman Normal},
TITLE = {Russian Regalia Revisited},
ORIGDATE = {1923},
XREF = {xrt}
}
@BOOK{xrt,
EDITOR = {Lucy Lunders},
TITLE = {Kings, Cork and Calculation},
PUBLISHER = {Middling},
YEAR = {1977}
}
% Testing mincrossrefs not reached. cr4 is cited, cr5 isn't, therefore mincrossrefs (2) for
% crn not reached
@INBOOK{xr4,
AUTHOR = {Megan Mistrel},
TITLE = {Lumbering Lunatics},
ORIGDATE = {1933},
XREF = {xrn}
}
@INBOOK{xr5,
AUTHOR = {Kenneth Kunrath},
TITLE = {Dreadful Dreary Days},
ORIGDATE = {1900},
XREF = {xrn}
}
@BOOK{xrn,
EDITOR = {Victor Vivacious},
TITLE = {Examples of Excellent Exaggerations},
PUBLISHER = {Oxford},
YEAR = {1935}
}
python-bibtexparser-1.4.3/bibtexparser/tests/data/xref_missing_entries.bib 0000664 0000000 0000000 00000000367 14731101544 0027207 0 ustar 00root root 0000000 0000000 % From biber test data : t/tdata/crossrefs.bib
% Kept initial comment but not for our purpose
% Testing missing xref
@INBOOK{mxr,
AUTHOR = {Megan Mistrel},
TITLE = {Lumbering Lunatics},
ORIGDATE = {1933},
XREF = {missing1}
}
python-bibtexparser-1.4.3/bibtexparser/tests/test_bibdatabase.py 0000664 0000000 0000000 00000006745 14731101544 0025232 0 ustar 00root root 0000000 0000000 import unittest
from bibtexparser.bibdatabase import (BibDatabase, BibDataString,
BibDataStringExpression)
class TestBibDatabase(unittest.TestCase):
entries = [{'ENTRYTYPE': 'book',
'year': '1987',
'edition': '2',
'publisher': 'Wiley Edition',
'ID': 'Bird1987',
'volume': '1',
'title': 'Dynamics of Polymeric Liquid',
'author': 'Bird, R.B. and Armstrong, R.C. and Hassager, O.'
}]
def test_entries_list_method(self):
bib_db = BibDatabase()
bib_db.entries = self.entries
self.assertEqual(bib_db.entries, bib_db.get_entry_list())
def test_entries_dict_prop(self):
bib_db = BibDatabase()
bib_db.entries = self.entries
self.assertEqual(bib_db.entries_dict, bib_db.get_entry_dict())
class TestBibDataString(unittest.TestCase):
def setUp(self):
self.bd = BibDatabase()
def test_name_is_lower(self):
bds = BibDataString(self.bd, 'nAmE')
self.assertTrue(bds.name.islower())
def test_raises_KeyError(self):
bds = BibDataString(self.bd, 'name')
with self.assertRaises(KeyError):
bds.get_value()
def test_get_value(self):
bds = BibDataString(self.bd, 'name')
self.bd.strings['name'] = 'value'
self.assertEqual(bds.get_value(), 'value')
def test_expand_string(self):
bds = BibDataString(self.bd, 'name')
self.bd.strings['name'] = 'value'
self.assertEqual(BibDataString.expand_string('name'), 'name')
self.assertEqual(BibDataString.expand_string(bds), 'value')
def test_get_value_string_is_defined_by_expression(self):
self.bd.strings['name'] = 'string'
exp = BibDataStringExpression(['this is a ',
BibDataString(self.bd, 'name')])
self.bd.strings['exp'] = exp
bds = BibDataString(self.bd, 'exp')
self.assertEqual(bds.get_value(), 'this is a string')
def test_strings_are_equal_iif_name_is_equal(self):
self.bd.strings['a'] = 'foo'
self.bd.strings['b'] = 'foo'
a1 = BibDataString(self.bd, 'a')
a2 = BibDataString(self.bd, 'a')
b = BibDataString(self.bd, 'b')
self.assertEqual(a1, a2)
self.assertNotEqual(a1, b)
self.assertNotEqual(a1, b)
self.assertNotEqual(a1, "foo")
class TestBibDataStringExpression(unittest.TestCase):
def setUp(self):
self.bd = BibDatabase()
self.bd.strings['name'] = 'value'
self.bds = BibDataString(self.bd, 'name')
def test_get_value(self):
exp = BibDataStringExpression(
["The string has value: ", self.bds, '.'])
self.assertEqual(exp.get_value(), 'The string has value: value.')
def test_raises_KeyError(self):
bds = BibDataString(self.bd, 'unknown')
exp = BibDataStringExpression([bds, self.bds, 'text'])
with self.assertRaises(KeyError):
exp.get_value()
def test_equations_are_equal_iif_same(self):
a1 = BibDataString(self.bd, 'a')
a2 = BibDataString(self.bd, 'a')
exp = BibDataStringExpression([a1, self.bds, 'text'])
self.assertEqual(exp, BibDataStringExpression([a2, self.bds, 'text']))
self.assertNotEqual(exp, BibDataStringExpression(['foo', self.bds, 'text']))
self.assertNotEqual(exp, 'foovaluetext')
if __name__ == '__main__':
unittest.main()
python-bibtexparser-1.4.3/bibtexparser/tests/test_bibtex_strings.py 0000664 0000000 0000000 00000011303 14731101544 0026021 0 ustar 00root root 0000000 0000000 import io
import unittest
import codecs
import bibtexparser
from bibtexparser.bibdatabase import (BibDatabase, BibDataString,
BibDataStringExpression)
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter
from collections import OrderedDict
class TestStringParse(unittest.TestCase):
def test_single_string_parse_count(self):
parser = BibTexParser(common_strings=False)
bibtex_str = '@string{name1 = "value1"}\n\n'
bib_database = bibtexparser.loads(bibtex_str, parser)
self.assertEqual(len(bib_database.strings), 1)
def test_multiple_string_parse_count(self):
parser = BibTexParser(common_strings=False)
bibtex_str = '@string{name1 = "value1"}\n\n@string{name2 = "value2"}\n\n'
bib_database = bibtexparser.loads(bibtex_str, parser)
self.assertEqual(len(bib_database.strings), 2)
def test_single_string_parse(self):
parser = BibTexParser(common_strings=False)
bibtex_str = '@string{name1 = "value1"}\n\n'
bib_database = bibtexparser.loads(bibtex_str, parser)
expected = {'name1': 'value1'}
self.assertEqual(bib_database.strings, expected)
def test_multiple_string_parse(self):
parser = BibTexParser(common_strings=False)
bibtex_str = '@string{name1 = "value1"}\n\n@string{name2 = "value2"}\n\n'
bib_database = bibtexparser.loads(bibtex_str, parser)
expected = OrderedDict()
expected['name1'] = 'value1'
expected['name2'] = 'value2'
self.assertEqual(bib_database.strings, expected)
def test_string_braces(self):
with codecs.open('bibtexparser/tests/data/string.bib', 'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'author': 'Sang Kil Cha and Maverick Woo and David Brumley',
'ID': 'cha:oakland15',
'year': '2015',
'booktitle': 'Proceedings of the {IEEE} Symposium on Security and Privacy',
'title': '{Program-Adaptive Mutational Fuzzing}',
'ENTRYTYPE': 'inproceedings',
'pages': '725--741'
}]
self.assertEqual(res, expected)
def test_string_parse_accept_chars(self):
parser = BibTexParser(common_strings=False)
bibtex_str = '@string{pub-ieee-std = {IEEE}}\n\n@string{pub-ieee-std:adr = {New York, NY, USA}}'
bib_database = bibtexparser.loads(bibtex_str, parser)
self.assertEqual(len(bib_database.strings), 2)
expected = OrderedDict()
expected['pub-ieee-std'] = 'IEEE'
expected['pub-ieee-std:adr'] = 'New York, NY, USA'
self.assertEqual(bib_database.strings, expected)
class TestStringWrite(unittest.TestCase):
def test_single_string_write(self):
bib_database = BibDatabase()
bib_database.strings['name1'] = 'value1'
result = bibtexparser.dumps(bib_database)
expected = '@string{name1 = {value1}}\n\n'
self.assertEqual(result, expected)
def test_multiple_string_write(self):
bib_database = BibDatabase()
bib_database.strings['name1'] = 'value1'
bib_database.strings['name2'] = 'value2' # Order is important!
result = bibtexparser.dumps(bib_database)
expected = '@string{name1 = {value1}}\n\n@string{name2 = {value2}}\n\n'
self.assertEqual(result, expected)
def test_ignore_common_strings(self):
bib_database = BibDatabase()
bib_database.load_common_strings()
result = bibtexparser.dumps(bib_database)
self.assertEqual(result, '')
def test_ignore_common_strings_only_if_not_overloaded(self):
bib_database = BibDatabase()
bib_database.load_common_strings()
bib_database.strings['jan'] = 'Janvier'
result = bibtexparser.dumps(bib_database)
self.assertEqual(result, '@string{jan = {Janvier}}\n\n')
def test_write_common_strings(self):
bib_database = BibDatabase()
bib_database.load_common_strings()
writer = BibTexWriter(write_common_strings=True)
result = bibtexparser.dumps(bib_database, writer=writer)
with io.open('bibtexparser/tests/data/common_strings.bib') as f:
expected = f.read()
self.assertEqual(result, expected)
def test_write_dependent_strings(self):
bib_database = BibDatabase()
bib_database.strings['title'] = 'Mr'
expr = BibDataStringExpression([BibDataString(bib_database, 'title'), 'Smith'])
bib_database.strings['name'] = expr
result = bibtexparser.dumps(bib_database)
expected = '@string{title = {Mr}}\n\n@string{name = title # {Smith}}\n\n'
self.assertEqual(result, expected)
python-bibtexparser-1.4.3/bibtexparser/tests/test_bibtexexpression.py 0000664 0000000 0000000 00000007547 14731101544 0026407 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from bibtexparser.bibtexexpression import BibtexExpression
class TestBibtexExpression(unittest.TestCase):
def setUp(self):
self.expr = BibtexExpression()
def test_minimal(self):
result = self.expr.entry.parseString('@journal{key, name = 123 }')
self.assertEqual(result.get('EntryType'), 'journal')
self.assertEqual(result.get('Key'), 'key')
self.assertEqual(result.get('Fields'), {'name': '123'})
def test_capital_type(self):
result = self.expr.entry.parseString('@JOURNAL{key, name = 123 }')
self.assertEqual(result.get('EntryType'), 'JOURNAL')
def test_capital_key(self):
result = self.expr.entry.parseString('@journal{KEY, name = 123 }')
self.assertEqual(result.get('Key'), 'KEY')
def test_braced(self):
result = self.expr.entry.parseString('@journal{key, name = {abc} }')
self.assertEqual(result.get('Fields'), {'name': 'abc'})
def test_braced_with_new_line(self):
result = self.expr.entry.parseString(
'@journal{key, name = {abc\ndef} }')
self.assertEqual(result.get('Fields'), {'name': 'abc\ndef'})
def test_braced_unicode(self):
result = self.expr.entry.parseString(
'@journal{key, name = {àbcđéf} }')
self.assertEqual(result.get('Fields'), {'name': 'àbcđéf'})
def test_quoted(self):
result = self.expr.entry.parseString('@journal{key, name = "abc" }')
self.assertEqual(result.get('Fields'), {'name': 'abc'})
def test_quoted_with_new_line(self):
result = self.expr.entry.parseString(
'@journal{key, name = "abc\ndef" }')
self.assertEqual(result.get('Fields'), {'name': 'abc\ndef'})
def test_quoted_with_unicode(self):
result = self.expr.entry.parseString(
'@journal{key, name = "àbcđéf" }')
self.assertEqual(result.get('Fields'), {'name': 'àbcđéf'})
def test_entry_declaration_after_space(self):
self.expr.entry.parseString(' @journal{key, name = {abcd}}')
def test_entry_declaration_no_key(self):
with self.assertRaises(self.expr.ParseException):
self.expr.entry.parseString('@misc{name = {abcd}}')
def test_entry_declaration_no_key_new_line(self):
with self.assertRaises(self.expr.ParseException):
self.expr.entry.parseString('@misc{\n name = {abcd}}')
def test_entry_declaration_no_key_comma(self):
with self.assertRaises(self.expr.ParseException):
self.expr.entry.parseString('@misc{, \nname = {abcd}}')
def test_entry_declaration_no_key_keyvalue_without_space(self):
with self.assertRaises(self.expr.ParseException):
self.expr.entry.parseString('@misc{\nname=aaa}')
def test_entry_declaration_key_with_whitespace(self):
with self.assertRaises(self.expr.ParseException):
self.expr.entry.parseString('@misc{ xx yy, \n name = aaa}')
def test_string_declaration_after_space(self):
self.expr.string_def.parseString(' @string{ name = {abcd}}')
def test_preamble_declaration_after_space(self):
self.expr.preamble_decl.parseString(' @preamble{ "blah blah " }')
def test_declaration_after_space(self):
keys = []
self.expr.entry.addParseAction(
lambda s, l, t: keys.append(t.get('Key'))
)
self.expr.main_expression.parseString(' @journal{key, name = {abcd}}')
self.assertEqual(keys, ['key'])
def test_declaration_after_space_and_comment(self):
keys = []
self.expr.entry.addParseAction(
lambda s, l, t: keys.append(t.get('Key'))
)
self.expr.main_expression.parseString(
'% Implicit comment\n @article{key, name={abcd}}'
)
self.assertEqual(keys, ['key'])
python-bibtexparser-1.4.3/bibtexparser/tests/test_bibtexparser.py 0000664 0000000 0000000 00000011043 14731101544 0025466 0 ustar 00root root 0000000 0000000 import unittest
import bibtexparser
from bibtexparser.bparser import BibTexParser
from tempfile import TemporaryFile
class TestBibtexParserParserMethods(unittest.TestCase):
input_file_path = 'bibtexparser/tests/data/book.bib'
input_bom_file_path = 'bibtexparser/tests/data/book_bom.bib'
entries_expected = [{'ENTRYTYPE': 'book',
'year': '1987',
'edition': '2',
'publisher': 'Wiley Edition',
'ID': 'Bird1987',
'volume': '1',
'title': 'Dynamics of Polymeric Liquid',
'author': 'Bird, R.B. and Armstrong, R.C. and Hassager, O.',
}]
def test_parse_immediately(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = BibTexParser(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_str(self):
parser = BibTexParser()
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_bom_str(self):
parser = BibTexParser()
with open(self.input_bom_file_path, encoding="utf-8") as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_empty(self):
parser = BibTexParser()
bibtex_database_from_str = parser.parse('')
self.assertEqual(bibtex_database_from_str.entries, [])
with open('bibtexparser/tests/data/empty.bib') as bibtex_file:
bibtex_database_from_file = parser.parse_file(bibtex_file)
self.assertEqual(bibtex_database_from_file.entries, [])
def test_parse_bom_bytes(self):
parser = BibTexParser()
with open(self.input_bom_file_path, 'rb') as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_file(self):
parser = BibTexParser()
with open(self.input_file_path) as bibtex_file:
bibtex_database = parser.parse_file(bibtex_file)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_str_module(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = bibtexparser.loads(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_file_module(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibtexparser.load(bibtex_file)
self.assertEqual(bibtex_database.entries, self.entries_expected)
class TestBibtexparserWriteMethods(unittest.TestCase):
input_file_path = 'bibtexparser/tests/data/book.bib'
expected = \
"""@book{Bird1987,
author = {Bird, R.B. and Armstrong, R.C. and Hassager, O.},
edition = {2},
publisher = {Wiley Edition},
title = {Dynamics of Polymeric Liquid},
volume = {1},
year = {1987}
}
"""
def test_write_str(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibtexparser.load(bibtex_file)
result = bibtexparser.dumps(bibtex_database)
self.assertEqual(result, self.expected)
def test_write_file(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibtexparser.load(bibtex_file)
with TemporaryFile(mode='w+') as bibtex_out_file:
bibtexparser.dump(bibtex_database, bibtex_out_file)
bibtex_out_file.seek(0)
bibtex_out_str = bibtex_out_file.read()
self.assertEqual(bibtex_out_str, self.expected)
class TestBibtexparserFieldNames(unittest.TestCase):
input_file_path = 'bibtexparser/tests/data/fieldname.bib'
entries_expected = [{'ENTRYTYPE': 'book',
'ID': 'Bird1987',
'dc.date': '2004-01'
}]
def test_parse_immediately(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = BibTexParser(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
if __name__ == '__main__':
unittest.main()
python-bibtexparser-1.4.3/bibtexparser/tests/test_bibtexwriter.py 0000664 0000000 0000000 00000042276 14731101544 0025522 0 ustar 00root root 0000000 0000000 # coding: utf-8
import tempfile
import unittest
import bibtexparser
from bibtexparser.bwriter import BibTexWriter, SortingStrategy
from bibtexparser.bibdatabase import BibDatabase
class TestBibTexWriter(unittest.TestCase):
def test_content_entries_only(self):
with open('bibtexparser/tests/data/multiple_entries_and_comments.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.contents = ['entries']
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{Toto3000,
author = {Toto, A and Titi, B},
title = {A title}
}
@article{Wigner1938,
author = {Wigner, E.},
doi = {10.1039/TF9383400029},
issn = {0014-7672},
journal = {Trans. Faraday Soc.},
owner = {fr},
pages = {29--41},
publisher = {The Royal Society of Chemistry},
title = {The transition state method},
volume = {34},
year = {1938}
}
@book{Yablon2005,
author = {Yablon, A.D.},
publisher = {Springer},
title = {Optical fiber fusion slicing},
year = {2005}
}
"""
self.assertEqual(result, expected)
def test_content_comment_only(self):
with open('bibtexparser/tests/data/multiple_entries_and_comments.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.contents = ['comments']
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@comment{}
@comment{A comment}
"""
self.assertEqual(result, expected)
def test_indent(self):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'abc123',
'ENTRYTYPE': 'book',
'author': 'test'}]
writer = BibTexWriter()
writer.indent = ' '
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
author = {test}
}
"""
self.assertEqual(result, expected)
def test_align_bool(self):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'abc123',
'ENTRYTYPE': 'book',
'author': 'test',
'thisisaverylongkey': 'longvalue'}]
writer = BibTexWriter()
writer.align_values = True
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
author = {test},
thisisaverylongkey = {longvalue}
}
"""
self.assertEqual(result, expected)
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'veryveryverylongID',
'ENTRYTYPE': 'book',
'a': 'test',
'bb': 'longvalue'}]
writer = BibTexWriter()
writer.align_values = True
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{veryveryverylongID,
a = {test},
bb = {longvalue}
}
"""
self.assertEqual(result, expected)
with open('bibtexparser/tests/data/multiple_entries_and_comments.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.contents = ['entries']
writer.align_values = True
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{Toto3000,
author = {Toto, A and Titi, B},
title = {A title}
}
@article{Wigner1938,
author = {Wigner, E.},
doi = {10.1039/TF9383400029},
issn = {0014-7672},
journal = {Trans. Faraday Soc.},
owner = {fr},
pages = {29--41},
publisher = {The Royal Society of Chemistry},
title = {The transition state method},
volume = {34},
year = {1938}
}
@book{Yablon2005,
author = {Yablon, A.D.},
publisher = {Springer},
title = {Optical fiber fusion slicing},
year = {2005}
}
"""
self.assertEqual(result, expected)
def test_align_int(self):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'abc123',
'ENTRYTYPE': 'book',
'author': 'test',
'thisisaverylongkey': 'longvalue'}]
# Negative value should have no effect
writer = BibTexWriter()
writer.align_values = -20
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
author = {test},
thisisaverylongkey = {longvalue}
}
"""
self.assertEqual(result, expected)
# Value smaller than longest field name should only impact the "short" field names
writer = BibTexWriter()
writer.align_values = 10
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
author = {test},
thisisaverylongkey = {longvalue}
}
"""
self.assertEqual(result, expected)
with open('bibtexparser/tests/data/multiple_entries_and_comments.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.contents = ['entries']
writer.align_values = 15
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{Toto3000,
author = {Toto, A and Titi, B},
title = {A title}
}
@article{Wigner1938,
author = {Wigner, E.},
doi = {10.1039/TF9383400029},
issn = {0014-7672},
journal = {Trans. Faraday Soc.},
owner = {fr},
pages = {29--41},
publisher = {The Royal Society of Chemistry},
title = {The transition state method},
volume = {34},
year = {1938}
}
@book{Yablon2005,
author = {Yablon, A.D.},
publisher = {Springer},
title = {Optical fiber fusion slicing},
year = {2005}
}
"""
self.assertEqual(result, expected)
def test_entry_separator(self):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'abc123',
'ENTRYTYPE': 'book',
'author': 'test'}]
writer = BibTexWriter()
writer.entry_separator = ''
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
author = {test}
}
"""
self.assertEqual(result, expected)
def test_display_order(self):
with open('bibtexparser/tests/data/multiple_entries_and_comments.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.contents = ['entries']
writer.display_order = ['year', 'publisher', 'title']
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{Toto3000,
title = {A title},
author = {Toto, A and Titi, B}
}
@article{Wigner1938,
year = {1938},
publisher = {The Royal Society of Chemistry},
title = {The transition state method},
author = {Wigner, E.},
doi = {10.1039/TF9383400029},
issn = {0014-7672},
journal = {Trans. Faraday Soc.},
owner = {fr},
pages = {29--41},
volume = {34}
}
@book{Yablon2005,
year = {2005},
publisher = {Springer},
title = {Optical fiber fusion slicing},
author = {Yablon, A.D.}
}
"""
self.assertEqual(result, expected)
def test_align_multiline_values(self):
with open('bibtexparser/tests/data/article_multilines.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.align_multiline_values = True
writer.display_order = ["author", "title", "year", "journal", "abstract", "comments", "keyword"]
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@article{Cesar2013,
author = {Jean César},
title = {A mutline line title is very amazing. It should be
long enough to test multilines... with two lines or should we
even test three lines... What an amazing title.},
year = {2013},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2,
multiline-keyword1, multiline-keyword2}
}
"""
self.assertEqual(result, expected)
def test_align_multiline_values_with_align(self):
with open('bibtexparser/tests/data/article_multilines.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.align_multiline_values = True
writer.align_values = True
writer.display_order = ["author", "title", "year", "journal", "abstract", "comments", "keyword"]
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@article{Cesar2013,
author = {Jean César},
title = {A mutline line title is very amazing. It should be
long enough to test multilines... with two lines or should we
even test three lines... What an amazing title.},
year = {2013},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2,
multiline-keyword1, multiline-keyword2}
}
"""
self.assertEqual(result, expected)
def test_display_order_sorting(self):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'abc123',
'ENTRYTYPE': 'book',
'b': 'test2',
'a': 'test1',
'd': 'test4',
'c': 'test3',
'e': 'test5'}]
# Only 'a' is not ordered. As it's only one element, strategy should not matter.
for strategy in SortingStrategy:
writer = BibTexWriter()
writer.display_order = ['b', 'c', 'd', 'e', 'a']
writer.display_order_sorting = strategy
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
b = {test2},
c = {test3},
d = {test4},
e = {test5},
a = {test1}
}
"""
self.assertEqual(result, expected)
# Test ALPHABETICAL_ASC strategy
writer = BibTexWriter()
writer.display_order = ['c']
writer.display_order_sorting = SortingStrategy.ALPHABETICAL_ASC
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
c = {test3},
a = {test1},
b = {test2},
d = {test4},
e = {test5}
}
"""
self.assertEqual(result, expected)
# Test ALPHABETICAL_DESC strategy
writer = BibTexWriter()
writer.display_order = ['c']
writer.display_order_sorting = SortingStrategy.ALPHABETICAL_DESC
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
c = {test3},
e = {test5},
d = {test4},
b = {test2},
a = {test1}
}
"""
self.assertEqual(result, expected)
# Test PRESERVE strategy
writer = BibTexWriter()
writer.display_order = ['c']
writer.display_order_sorting = SortingStrategy.PRESERVE
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
c = {test3},
b = {test2},
a = {test1},
d = {test4},
e = {test5}
}
"""
self.assertEqual(result, expected)
def test_align_multiline_values_with_indent(self):
with open('bibtexparser/tests/data/article_multilines.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.align_multiline_values = True
writer.indent = ' ' * 3
writer.display_order = ["author", "title", "year", "journal", "abstract", "comments", "keyword"]
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@article{Cesar2013,
author = {Jean César},
title = {A mutline line title is very amazing. It should be
long enough to test multilines... with two lines or should we
even test three lines... What an amazing title.},
year = {2013},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2,
multiline-keyword1, multiline-keyword2}
}
"""
self.assertEqual(result, expected)
def test_align_multiline_values_with_align_with_indent(self):
with open('bibtexparser/tests/data/article_multilines.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.align_multiline_values = True
writer.indent = ' ' * 3
writer.align_values = True
writer.display_order = ["author", "title", "year", "journal", "abstract", "comments", "keyword"]
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@article{Cesar2013,
author = {Jean César},
title = {A mutline line title is very amazing. It should be
long enough to test multilines... with two lines or should we
even test three lines... What an amazing title.},
year = {2013},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines... and with a french érudit word},
comments = {A comment},
keyword = {keyword1, keyword2,
multiline-keyword1, multiline-keyword2}
}
"""
self.assertEqual(result, expected)
class TestEntrySorting(unittest.TestCase):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'b',
'ENTRYTYPE': 'article'},
{'ID': 'c',
'ENTRYTYPE': 'book'},
{'ID': 'a',
'ENTRYTYPE': 'book'}]
def test_sort_default(self):
result = bibtexparser.dumps(self.bib_database)
expected = "@book{a\n}\n\n@article{b\n}\n\n@book{c\n}\n"
self.assertEqual(result, expected)
def test_sort_none(self):
writer = BibTexWriter()
writer.order_entries_by = None
result = bibtexparser.dumps(self.bib_database, writer)
expected = "@article{b\n}\n\n@book{c\n}\n\n@book{a\n}\n"
self.assertEqual(result, expected)
def test_sort_id(self):
writer = BibTexWriter()
writer.order_entries_by = ('ID', )
result = bibtexparser.dumps(self.bib_database, writer)
expected = "@book{a\n}\n\n@article{b\n}\n\n@book{c\n}\n"
self.assertEqual(result, expected)
def test_sort_type(self):
writer = BibTexWriter()
writer.order_entries_by = ('ENTRYTYPE', )
result = bibtexparser.dumps(self.bib_database, writer)
expected = "@article{b\n}\n\n@book{c\n}\n\n@book{a\n}\n"
self.assertEqual(result, expected)
def test_sort_type_id(self):
writer = BibTexWriter()
writer.order_entries_by = ('ENTRYTYPE', 'ID')
result = bibtexparser.dumps(self.bib_database, writer)
expected = "@article{b\n}\n\n@book{a\n}\n\n@book{c\n}\n"
self.assertEqual(result, expected)
def test_sort_missing_field(self):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'b',
'ENTRYTYPE': 'article',
'year': '2000'},
{'ID': 'c',
'ENTRYTYPE': 'book',
'year': '2010'},
{'ID': 'a',
'ENTRYTYPE': 'book'}]
writer = BibTexWriter()
writer.order_entries_by = ('year', )
result = bibtexparser.dumps(bib_database, writer)
expected = "@book{a\n}\n\n@article{b,\n year = {2000}\n}\n\n@book{c,\n year = {2010}\n}\n"
self.assertEqual(result, expected)
def test_unicode_problems(self):
# See #51
bibtex = """
@article{Mesa-Gresa2013,
abstract = {During a 4-week period half the mice (n = 16) were exposed to EE and the other half (n = 16) remained in a standard environment (SE). Aggr. Behav. 9999:XX-XX, 2013. © 2013 Wiley Periodicals, Inc.},
author = {Mesa-Gresa, Patricia and P\'{e}rez-Martinez, Asunci\'{o}n and Redolat, Rosa},
doi = {10.1002/ab.21481},
file = {:Users/jscholz/Documents/mendeley/Mesa-Gresa, P\'{e}rez-Martinez, Redolat - 2013 - Environmental Enrichment Improves Novel Object Recognition and Enhances Agonistic Behavior.pdf:pdf},
issn = {1098-2337},
journal = {Aggressive behavior},
month = "apr",
number = {April},
pages = {269--279},
pmid = {23588702},
title = {{Environmental Enrichment Improves Novel Object Recognition and Enhances Agonistic Behavior in Male Mice.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/23588702},
volume = {39},
year = {2013}
}
"""
bibdb = bibtexparser.loads(bibtex)
with tempfile.TemporaryFile(mode='w+', encoding="utf-8") as bibtex_file:
bibtexparser.dump(bibdb, bibtex_file)
# No exception should be raised
python-bibtexparser-1.4.3/bibtexparser/tests/test_bparser.py 0000664 0000000 0000000 00000075742 14731101544 0024452 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
import codecs
import warnings
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.bibdatabase import (COMMON_STRINGS, BibDataStringExpression)
from bibtexparser.customization import *
from bibtexparser import customization
def customizations_unicode(record):
"""Use all functions related to specific fields
+ converter to unicode.
:param record: a record
:returns: -- customized record
"""
record = type(record)
record = author(record)
record = editor(record)
record = journal(record)
record = keyword(record)
record = link(record)
record = page_double_hyphen(record)
record = doi(record)
record = convert_to_unicode(record)
return record
def customizations_latex(record):
"""Use all functions related to specific fields
+ converter to latex.
:param record: a record
:returns: -- customized record
"""
record = homogenize_latex_encoding(record)
record = type(record)
record = author(record)
record = editor(record)
record = journal(record)
record = keyword(record)
record = link(record)
record = page_double_hyphen(record)
record = doi(record)
return record
class TestBibtexParserList(unittest.TestCase):
def test_empty_string(self):
bib = BibTexParser("", common_strings=False)
self.assertEqual(bib.entries, [])
self.assertEqual(bib.comments, [])
self.assertEqual(bib.preambles, [])
self.assertEqual(bib.strings, {})
###########
# ARTICLE
###########
# test also that list and dict are equivalent
def test_article(self):
with codecs.open('bibtexparser/tests/data/article.bib', 'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read())
res_list = bib.get_entry_list()
res_dict = bib.get_entry_dict()
expected_list = [{'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word',
'year': '2013',
'journal': 'Nice Journal',
'ID': 'Cesar2013',
'pages': '12-23',
'title': 'An amazing title',
'comments': 'A comment',
'author': 'Jean César',
'volume': '12',
'month': 'jan'
}]
expected_dict = {'Cesar2013': {'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word',
'year': '2013',
'journal': 'Nice Journal',
'ID': 'Cesar2013',
'pages': '12-23',
'title': 'An amazing title',
'comments': 'A comment',
'author': 'Jean César',
'volume': '12',
'month': 'jan'
}}
self.assertEqual(res_list, expected_list)
self.assertEqual(res_dict, expected_dict)
def test_article_annotation(self):
with codecs.open('bibtexparser/tests/data/article_with_annotation.bib', 'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read())
res_list = bib.get_entry_list()
res_dict = bib.get_entry_dict()
expected_list = [{'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word',
'year': '2013',
'journal': 'Nice Journal',
'ID': 'Cesar2013',
'pages': '12-23',
'title': 'An amazing title',
'comments': 'A comment',
'author': 'Jean César',
'author+an': '1=highlight',
'volume': '12',
'month': 'jan'
}]
expected_dict = {'Cesar2013': {'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word',
'year': '2013',
'journal': 'Nice Journal',
'ID': 'Cesar2013',
'pages': '12-23',
'title': 'An amazing title',
'comments': 'A comment',
'author': 'Jean César',
'author+an': '1=highlight',
'volume': '12',
'month': 'jan'
}}
self.assertEqual(res_list, expected_list)
self.assertEqual(res_dict, expected_dict)
def test_article_start_bom(self):
with codecs.open('bibtexparser/tests/data/article_start_with_bom.bib', 'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word',
'ENTRYTYPE': 'article',
'pages': '12-23',
'volume': '12',
'ID': 'Cesar2013',
'year': '2013',
'author': 'Jean César',
'journal': 'Nice Journal',
'comments': 'A comment',
'month': 'jan',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
}]
self.assertEqual(res, expected)
def test_article_cust_unicode(self):
with codecs.open('bibtexparser/tests/data/article.bib', 'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read(), customization=customizations_unicode)
res = bib.get_entry_list()
expected = [{'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word',
'ENTRYTYPE': 'article',
'pages': '12--23',
'volume': '12',
'ID': 'Cesar2013',
'year': '2013',
'author': ['César, Jean'],
'journal': {'ID': 'NiceJournal', 'name': 'Nice Journal'},
'comments': 'A comment',
'month': 'jan',
'keyword': ['keyword1', 'keyword2'],
'title': 'An amazing title'
}]
self.assertEqual(res, expected)
def test_article_cust_latex(self):
with codecs.open('bibtexparser/tests/data/article.bib', 'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read(), customization=customizations_latex)
res = bib.get_entry_list()
expected = [{'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french {\\\'e}rudit word',
'ENTRYTYPE': 'article',
'pages': '12--23',
'volume': '12',
'ID': 'Cesar2013',
'year': '2013',
'author': ['C{\\\'e}sar, Jean'],
'journal': {'ID': 'NiceJournal', 'name': 'Nice Journal'},
'comments': 'A comment',
'month': 'jan',
'keyword': ['keyword1', 'keyword2'],
'title': '{A}n amazing title'
}]
self.assertEqual(res, expected)
def test_article_cust_order(self):
def cust(record):
record = customization.page_double_hyphen(record)
record = customization.homogenize_latex_encoding(record)
record = customization.author(record)
return record
def cust2(record):
record = customization.author(record)
record = customization.page_double_hyphen(record)
record = customization.homogenize_latex_encoding(record)
return record
with open('bibtexparser/tests/data/multiple_entries.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read(), customization=cust)
res = bib.get_entry_list()
with open('bibtexparser/tests/data/multiple_entries.bib', 'r') as bibfile:
bib2 = BibTexParser(bibfile.read(), customization=cust2)
res2 = bib2.get_entry_list()
self.assertEqual(res, res2)
def test_article_missing_coma(self):
with open('bibtexparser/tests/data/article_missing_coma.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Cesar2013',
'year': '2013',
'author': 'Jean Cesar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
},
{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Baltazar2013',
'year': '2013',
'author': 'Jean Baltazar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
},
{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Aimar2013',
'year': '2013',
'author': 'Jean Aimar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title',
'month': 'january'
},
{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Doute2013',
'year': '2013',
'author': 'Jean Doute',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
}]
self.assertEqual(res, expected)
def test_oneline(self):
with open('bibtexparser/tests/data/article_oneline.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Cesar2013',
'year': '2013',
'author': 'Jean Cesar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
},
{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Baltazar2013',
'year': '2013',
'author': 'Jean Baltazar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
}]
self.assertEqual(res, expected)
def test_article_start_with_whitespace(self):
with open('bibtexparser/tests/data/article_start_with_whitespace.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
self.assertEqual(len(bib.get_entry_list()), 2)
def test_article_comma_first(self):
with open('bibtexparser/tests/data/article_comma_first.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Cesar2013',
'year': '2013',
'author': 'Jean Cesar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
},
{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Baltazar2013',
'year': '2013',
'author': 'Jean Baltazar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
}]
self.assertEqual(res, expected)
def test_article_no_braces(self):
with open('bibtexparser/tests/data/article_no_braces.bib', 'r',
encoding="utf-8") as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'pages': '12-23',
'ID': 'Cesar2013',
'year': '2013',
'month': 'jan',
'author': 'Jean C{\\\'e}sar{\\\"u}',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title',
'abstract': "This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word",
},
]
self.assertEqual(res, expected)
def test_article_special_characters(self):
with open('bibtexparser/tests/data/article_with_special_characters.bib', 'r',
encoding="utf-8") as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'pages': '12-23',
'ID': 'Cesar2013',
'year': '2013',
'month': 'jan',
'author': 'Jean C{\\\'e}sar{\\\"u}',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title',
'abstract': "This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word",
},
]
self.assertEqual(res, expected)
def test_article_protection_braces(self):
with open('bibtexparser/tests/data/article_with_protection_braces.bib', 'r',
encoding="utf-8") as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'article',
'journal': '{Nice Journal}',
'volume': '12',
'pages': '12-23',
'ID': 'Cesar2013',
'year': '2013',
'month': 'jan',
'author': 'Jean César',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': '{An amazing title}',
'abstract': "This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word",
},
]
self.assertEqual(res, expected)
###########
# BOOK
###########
def test_book(self):
with open('bibtexparser/tests/data/book.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'book',
'year': '1987',
'edition': '2',
'publisher': 'Wiley Edition',
'ID': 'Bird1987',
'volume': '1',
'title': 'Dynamics of Polymeric Liquid',
'author': 'Bird, R.B. and Armstrong, R.C. and Hassager, O.'
}]
self.assertEqual(res, expected)
def test_book_cust_unicode(self):
with open('bibtexparser/tests/data/book_capital_AND.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read(), customization=customizations_unicode)
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'book',
'year': '1987',
'edition': '2',
'publisher': 'Wiley Edition',
'ID': 'Bird1987',
'volume': '1',
'title': 'Dynamics of Polymeric Liquid',
'author': ['Bird, R.B.', 'Armstrong, R.C.', 'Hassager, O.']
}]
self.assertEqual(res, expected)
def test_book_cust_latex(self):
with open('bibtexparser/tests/data/book.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read(), customization=customizations_latex)
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'book',
'year': '1987',
'edition': '2',
'publisher': 'Wiley Edition',
'ID': 'Bird1987',
'volume': '1',
'title': '{D}ynamics of {P}olymeric {L}iquid',
'author': ['Bird, R.B.', 'Armstrong, R.C.', 'Hassager, O.']
}]
self.assertEqual(res, expected)
def test_traps(self):
with codecs.open('bibtexparser/tests/data/traps.bib', 'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'keywords': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word',
'year': '2013',
'journal': 'Nice Journal',
'ID': 'Laide2013',
'pages': '12-23',
'title': '{An} amazing {title}',
'comments': 'A comment',
'author': 'Jean Laid{\\\'e},\nBen Loaeb',
'volume': 'n.s.~2',
'month': 'jan'
}]
self.assertEqual(res, expected)
def test_features(self):
with open('bibtexparser/tests/data/features.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'inproceedings',
'year': '2014',
'title': 'Cool Stuff',
'author': 'John',
'ID': 'mykey',
'booktitle': 'My International Conference',
}]
self.assertEqual(res, expected)
def test_features2(self):
with open('bibtexparser/tests/data/features2.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'inproceedings',
'year': '2014',
'title': 'Cool Stuff',
'author': 'John Doe',
'ID': 'mykey',
'booktitle': 'My International Conference',
'note': 'Email: John.Doe@example.com',
'pages': '1--10',
}]
self.assertEqual(res, expected)
def test_nonstandard_ignored(self):
with open('bibtexparser/tests/data/wrong.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'author': 'correct',
'ID': 'bar',
'ENTRYTYPE': 'article'}]
self.assertEqual(res, expected)
def test_nonstandard_not_ignored(self):
with open('bibtexparser/tests/data/wrong.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read(), ignore_nonstandard_types=False)
res = bib.get_entry_list()
self.assertEqual(len(res), 2)
def test_encoding(self):
with codecs.open('bibtexparser/tests/data/encoding.bib', 'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'keywords': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word',
'year': '2013',
'journal': 'Elémentaire',
'ID': 'Cesar_2013',
'pages': '12-23',
'title': 'An amazing title: à',
'comments': 'A comment',
'author': 'Jean César',
'volume': '12',
'month': 'jan'
}]
self.assertEqual(res, expected)
def test_encoding_with_homogenize(self):
with codecs.open('bibtexparser/tests/data/encoding.bib', 'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read(), customization=homogenize_latex_encoding)
res = bib.get_entry_list()
expected = [{'keywords': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french {\\\'e}rudit word',
'year': '2013',
'journal': 'El{\\\'e}mentaire',
'ID': 'Cesar_2013',
'pages': '12-23',
'title': '{A}n amazing title: {\\`a}',
'comments': 'A comment',
'author': 'Jean C{\\\'e}sar',
'volume': '12',
'month': 'jan'
}]
self.assertEqual(res, expected)
def test_field_name_with_dash_underscore(self):
with open('bibtexparser/tests/data/article_field_name_with_underscore.bib', 'r',
encoding="utf-8") as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{
'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'year': '2013',
'journal': 'Nice Journal',
'ID': 'Cesar2013',
'pages': '12-23',
'title': 'An amazing title',
'comments': 'A comment',
'author': 'Jean César',
'volume': '12',
'strange_field_name': 'val',
'strange-field-name2': 'val2',
}]
self.assertEqual(res, expected)
def test_string_definitions(self):
with open('bibtexparser/tests/data/article_with_strings.bib', 'r',
encoding="utf-8") as bibfile:
bib = BibTexParser(bibfile.read(), common_strings=True)
res = dict(bib.strings)
expected = COMMON_STRINGS.copy()
expected.update({
'nice_journal': 'Nice Journal',
'jean': 'Jean',
'cesar': "César",
})
self.assertEqual(res, expected)
def test_string_is_interpolated(self):
with open('bibtexparser/tests/data/article_with_strings.bib', 'r',
encoding="utf-8") as bibfile:
bib = BibTexParser(bibfile.read(), common_strings=True,
interpolate_strings=True)
res = bib.get_entry_list()
expected = [{
'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'year': '2013',
'month': 'January',
'journal': 'Nice Journal',
'ID': 'Cesar2013',
'pages': '12-23',
'title': 'An amazing title',
'comments': 'A comment',
'author': 'Jean César',
'volume': '12',
}]
self.assertEqual(res, expected)
def test_string_is_not_interpolated(self):
with open('bibtexparser/tests/data/article_with_strings.bib', 'r',
encoding="utf-8") as bibfile:
bib = BibTexParser(bibfile.read(), common_strings=True,
interpolate_strings=False)
res = bib.get_entry_list()[0]
self.assertIsInstance(res['month'], BibDataStringExpression)
self.assertEqual(len(res['month'].expr), 1)
self.assertEqual(res['month'].get_value(), 'January')
self.assertIsInstance(res['author'], BibDataStringExpression)
self.assertEqual(len(res['author'].expr), 3)
self.assertEqual(res['author'].get_value(), 'Jean César')
self.assertIsInstance(res['journal'], BibDataStringExpression)
self.assertEqual(len(res['journal'].expr), 1)
self.assertEqual(res['journal'].get_value(), 'Nice Journal')
def test_comments_spaces_and_declarations(self):
with codecs.open(
'bibtexparser/tests/data/comments_spaces_and_declarations.bib',
'r', 'utf-8') as bibfile:
bib = BibTexParser(bibfile.read())
res_dict = bib.get_entry_dict()
expected_dict = {'Cesar2013': {
'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines... and with a french érudit word',
'year': '2013',
'journal': 'Nice Journal',
'ID': 'Cesar2013',
'pages': '12-23',
'title': 'A great title',
'comments': 'A comment',
'author': 'Jean César',
'volume': '12',
'month': 'jan'
}}
self.assertEqual(res_dict, expected_dict)
self.assertEqual(bib.preambles, ["Blah blah"])
def test_does_not_fail_on_non_bibtex_with_partial(self):
bibraw = '''@misc{this looks,
like = a = bibtex file but
, is not a real one!
'''
parser = BibTexParser(common_strings=False)
bib = parser.parse(bibraw, partial=False)
self.assertEqual(bib.entries, [])
self.assertEqual(bib.preambles, [])
self.assertEqual(bib.strings, {})
self.assertEqual(bib.comments, [
'@misc{this looks,\n'
' like = a = bibtex file but\n'
' , is not a real one!'])
def test_no_citekey_parsed_as_comment(self):
bib = BibTexParser('@BOOK{, title = "bla"}', common_strings=False)
self.assertEqual(bib.entries, [])
self.assertEqual(bib.preambles, [])
self.assertEqual(bib.strings, {})
self.assertEqual(bib.comments, ['@BOOK{, title = "bla"}'])
def test_parsing_just_once_not_raising_warnings_with_default_settings(self):
parser = BibTexParser()
with open('bibtexparser/tests/data/article_comma_normal_single.bib',
'r', encoding='utf-8') as bibfile:
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
bibtexparser.load(bibfile, parser)
assert len(warning) == 0
def test_parsing_twice_raise_warnings_with_default_settings(self):
parser = BibTexParser()
with open('bibtexparser/tests/data/article_comma_normal_single.bib',
'r', encoding='utf-8') as bibfile_first, \
open('bibtexparser/tests/data/article_comma_normal_multiple.bib', 'r', encoding='utf-8') \
as bibfile_second:
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
bibtexparser.load(bibfile_first, parser)
bibtexparser.load(bibfile_second, parser)
assert len(warning) != 0
def test_parsing_three_times_raise_warnings_only_once_with_default_settings(self):
parser = BibTexParser()
with open('bibtexparser/tests/data/article_comma_normal_single.bib',
'r', encoding='utf-8') as bibfile_first, \
open('bibtexparser/tests/data/article_comma_normal_multiple.bib', 'r', encoding='utf-8') \
as bibfile_second, \
open('bibtexparser/tests/data/article.bib', 'r', encoding='utf-8') as bibfile_third:
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
bibtexparser.load(bibfile_first, parser)
bibtexparser.load(bibfile_second, parser)
bibtexparser.load(bibfile_third, parser)
assert len(warning) == 1
def test_parsing_three_times_not_raising_a_warning_if_expect_multiple_parse_is_true(self):
parser = BibTexParser()
parser.expect_multiple_parse = True
with open('bibtexparser/tests/data/article_comma_normal_single.bib',
'r', encoding='utf-8') as bibfile_first, \
open('bibtexparser/tests/data/article_comma_normal_multiple.bib', 'r', encoding='utf-8') \
as bibfile_second, \
open('bibtexparser/tests/data/article.bib', 'r', encoding='utf-8') as bibfile_third:
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
bibtexparser.load(bibfile_first, parser)
bibtexparser.load(bibfile_second, parser)
bibtexparser.load(bibfile_third, parser)
assert len(warning) == 0
if __name__ == '__main__':
unittest.main()
python-bibtexparser-1.4.3/bibtexparser/tests/test_bwriter.py 0000664 0000000 0000000 00000007765 14731101544 0024472 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Francois Boulogne
# License:
from __future__ import unicode_literals
import unittest
import os
import io
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter, to_bibtex
from bibtexparser.customization import author
def _data_path(filename):
return os.path.join('bibtexparser/tests/data', filename)
class TestBibtexWriterList(unittest.TestCase):
def test_article(self):
with io.open(_data_path('article.bib'), 'r') as bibfile:
bib = BibTexParser(bibfile.read())
with io.open(_data_path('article_output.bib'), 'r') as bibfile:
expected = bibfile.read()
result = to_bibtex(bib)
self.maxDiff = None
self.assertEqual(expected, result)
def test_article_with_annotation(self):
with io.open(_data_path('article_with_annotation.bib'), 'r') as bibfile:
bib = BibTexParser(bibfile.read())
with io.open(_data_path('article_with_annotation_output.bib'), 'r') \
as bibfile:
expected = bibfile.read()
result = to_bibtex(bib)
self.maxDiff = None
self.assertEqual(expected, result)
def test_book(self):
with io.open(_data_path('book.bib'), 'r') as bibfile:
bib = BibTexParser(bibfile.read())
with io.open(_data_path('book_output.bib'), 'r') as bibfile:
expected = bibfile.read()
result = to_bibtex(bib)
self.maxDiff = None
self.assertEqual(expected, result)
def test_comma_first(self):
with io.open(_data_path('book.bib'), 'r') as bibfile:
bib = BibTexParser(bibfile.read())
with io.open(_data_path('book_comma_first.bib'), 'r') as bibfile:
expected = bibfile.read()
writer = BibTexWriter()
writer.indent = ' '
writer.comma_first = True
result = writer.write(bib)
self.maxDiff = None
self.assertEqual(expected, result)
def test_multiple(self):
with io.open(_data_path('multiple_entries.bib'), 'r') as bibfile:
bib = BibTexParser(bibfile.read())
with io.open(_data_path('multiple_entries_output.bib'), 'r') as bibfile:
expected = bibfile.read()
result = to_bibtex(bib)
self.maxDiff = None
self.assertEqual(expected, result)
def test_exception_typeerror(self):
with io.open(_data_path('article.bib'), 'r') as bibfile:
bib = BibTexParser(bibfile.read(), customization=author)
self.assertRaises(TypeError, to_bibtex, bib)
def test_with_strings(self):
with io.open(_data_path('article_with_strings.bib'), 'r') as bibfile:
bib = BibTexParser(bibfile.read(), common_strings=True,
interpolate_strings=False)
with io.open(_data_path(
'article_with_strings_output.bib'), 'r') as bibfile:
expected = bibfile.read()
result = to_bibtex(bib)
self.maxDiff = None
self.assertEqual(expected, result)
def test_trailing_comma(self):
with io.open(_data_path('article.bib'), 'r') as bibfile:
bib = BibTexParser(bibfile.read())
with io.open(_data_path('article_trailing_comma_output.bib'), 'r') as bibfile:
expected = bibfile.read()
writer = BibTexWriter()
writer.add_trailing_comma = True
result = writer.write(bib)
self.maxDiff = None
self.assertEqual(expected, result)
def test_comma_first_and_trailing_comma(self):
with io.open(_data_path('article.bib'), 'r') as bibfile:
bib = BibTexParser(bibfile.read())
with io.open(_data_path('article_comma_first_and_trailing_comma_output.bib'), 'r') as bibfile:
expected = bibfile.read()
writer = BibTexWriter()
writer.add_trailing_comma = True
writer.comma_first = True
result = writer.write(bib)
self.maxDiff = None
self.assertEqual(expected, result)
python-bibtexparser-1.4.3/bibtexparser/tests/test_comments.py 0000664 0000000 0000000 00000015154 14731101544 0024630 0 ustar 00root root 0000000 0000000 import unittest
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import to_bibtex
""" The code is supposed to treat comments the following way:
Each @Comment opens a comment that ends when something
that is not a comment is encountered. More precisely
this means a line starting with an @. Lines that are not
parsed as anything else are also considered comments.
If the comment starts and ends with braces, they are removed.
Current issues:
- a comment followed by a line starting with @smthing
that is not a valid bibtex element are parsed separately,
that is as two comments.
- braces are either ignored or removed which is not easily
predictable.
"""
class TestParseComment(unittest.TestCase):
def test_comment_count(self):
with open('bibtexparser/tests/data/features.bib') as bibfile:
bib = BibTexParser(bibfile.read())
self.assertEqual(len(bib.comments), 3)
def test_comment_list(self):
with open('bibtexparser/tests/data/features.bib') as bibfile:
bib = BibTexParser(bibfile.read())
expected = ["ignore this line!",
"ignore this line too!",
"and ignore this line too!"]
self.assertEqual(bib.comments, expected)
def test_multiline_comments(self):
with open('bibtexparser/tests/data/multiline_comments.bib') as bibfile:
bib = BibTexParser(bibfile.read())
expected = [
"""Lorem ipsum dolor sit amet,
consectetur adipisicing elit""",
"""
Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident.
,
""",
"""
Sunt in culpa qui officia deserunt mollit anim id est laborum.
""",
""
]
self.maxDiff = None
self.assertEqual(bib.comments, expected)
def test_multiple_entries(self):
with open('bibtexparser/tests/data/multiple_entries_and_comments.bib') as bibfile:
bparser = BibTexParser()
bib = bparser.parse_file(bibfile)
expected = ["",
"A comment"]
self.assertEqual(bib.comments, expected)
def test_comments_percentage(self):
with open('bibtexparser/tests/data/comments_percentage.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Cesar2013',
'year': '2013',
'author': 'Jean Cesar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
},
{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Baltazar2013',
'year': '2013',
'author': 'Jean Baltazar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
}]
self.assertEqual(res, expected)
def test_comments_percentage_nocoma(self):
with open('bibtexparser/tests/data/comments_percentage_nolastcoma.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
res = bib.get_entry_list()
expected = [{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Cesar2013',
'year': '2013',
'author': 'Jean Cesar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
},
{'ENTRYTYPE': 'article',
'journal': 'Nice Journal',
'volume': '12',
'ID': 'Baltazar2013',
'year': '2013',
'author': 'Jean Baltazar',
'comments': 'A comment',
'keyword': 'keyword1, keyword2',
'title': 'An amazing title'
}]
self.assertEqual(res, expected)
def test_no_newline(self):
comments = """This is a comment."""
expected = ["This is a comment."]
bib = BibTexParser(comments)
self.assertEqual(bib.comments, expected)
def test_43(self):
comment = "@STRING{foo = \"bar\"}\n" \
"This is a comment\n" \
"This is a second comment."
expected = "This is a comment\nThis is a second comment."
bib = BibTexParser(comment, common_strings=False)
self.assertEqual(bib.comments, [expected])
self.assertEqual(bib.strings, {'foo': 'bar'})
def test_43_bis(self):
comment = "@STRING{foo = \"bar\"}\n" \
"This is a comment\n" \
"STRING{Baz = \"This should be interpreted as comment.\"}"
expected = "This is a comment\n" \
"STRING{Baz = \"This should be interpreted as comment.\"}"
bib = BibTexParser(comment, common_strings=False)
self.assertEqual(bib.comments, [expected])
self.assertEqual(bib.strings, {'foo': 'bar'})
class TestWriteComment(unittest.TestCase):
def test_comment_write(self):
with open('bibtexparser/tests/data/comments_only.bib') as bibfile:
bib = BibTexParser(bibfile.read())
with open('bibtexparser/tests/data/comments_only_output.bib') as bibfile:
expected = bibfile.read()
result = to_bibtex(bib)
self.assertEqual(result, expected)
def test_multiline_comment_write(self):
with open('bibtexparser/tests/data/multiline_comments.bib') as bibfile:
expected = bibfile.read()
bib = BibTexParser(expected)
result = to_bibtex(bib)
self.assertEqual(result, expected)
def test_multiple_entries(self):
with open('bibtexparser/tests/data/multiple_entries_and_comments.bib') as bibfile:
bib = BibTexParser(bibfile.read())
with open('bibtexparser/tests/data/multiple_entries_and_comments_output.bib') as bibfile:
expected = bibfile.read()
result = to_bibtex(bib)
self.assertEqual(result, expected)
python-bibtexparser-1.4.3/bibtexparser/tests/test_crossref_resolving.py 0000664 0000000 0000000 00000031740 14731101544 0026720 0 ustar 00root root 0000000 0000000 import unittest
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.bparser import BibTexParser
class TestCrossRef(unittest.TestCase):
def test_crossref(self):
self.maxDiff = None
input_file_path = 'bibtexparser/tests/data/crossref_entries.bib'
entries_expected = {'cr1': {'ENTRYTYPE': 'inbook',
'ID': 'cr1',
'_FROM_CROSSREF': ['editor', 'publisher', 'year'],
'archiveprefix': 'SomEPrFiX',
'author': 'Graham Gullam',
'crossref': 'cr_m',
'editor': 'Edgar Erbriss',
'origdate': '1955',
'primaryclass': 'SOMECLASS',
'publisher': 'Grimble',
'title': 'Great and Good Graphs',
'year': '1974'},
'cr2': {'ENTRYTYPE': 'inbook',
'ID': 'cr2',
'_FROM_CROSSREF': ['editor', 'publisher', 'year'],
'author': 'Frederick Fumble',
'crossref': 'cr_m',
'editor': 'Edgar Erbriss',
'institution': 'Institution',
'origdate': '1943',
'publisher': 'Grimble',
'school': 'School',
'title': 'Fabulous Fourier Forms',
'year': '1974'},
'cr3': {'ENTRYTYPE': 'inbook',
'ID': 'cr3',
'_FROM_CROSSREF': ['editor', 'publisher', 'year'],
'archiveprefix': 'SomEPrFiX',
'author': 'Arthur Aptitude',
'crossref': 'crt',
'editor': 'Mark Monkley',
'eprinttype': 'sometype',
'origdate': '1934',
'publisher': 'Rancour',
'title': 'Arrangements of All Articles',
'year': '1996'},
'cr4': {'ENTRYTYPE': 'inbook',
'ID': 'cr4',
'_FROM_CROSSREF': ['editor', 'publisher', 'year'],
'author': 'Morris Mumble',
'crossref': 'crn',
'editor': 'Jeremy Jermain',
'origdate': '1911',
'publisher': 'Pillsbury',
'title': 'Enterprising Entities',
'year': '1945'},
'cr5': {'ENTRYTYPE': 'inbook',
'ID': 'cr5',
'_FROM_CROSSREF': ['editor', 'publisher', 'year'],
'author': 'Oliver Ordinary',
'crossref': 'crn',
'editor': 'Jeremy Jermain',
'origdate': '1919',
'publisher': 'Pillsbury',
'title': 'Questionable Quidities',
'year': '1945'},
'cr6': {'ENTRYTYPE': 'inproceedings',
'ID': 'cr6',
'_FROM_CROSSREF': ['address',
'editor',
'eventdate',
'eventtitle',
'publisher',
'venue'],
'address': 'Address',
'author': 'Author, Firstname',
'booktitle': 'Manual booktitle',
'crossref': 'cr6i',
'editor': 'Editor',
'eventdate': '2009-08-21/2009-08-24',
'eventtitle': 'Title of the event',
'pages': '123--',
'publisher': 'Publisher of proceeding',
'title': 'Title of inproceeding',
'venue': 'Location of event',
'year': '2009'},
'cr6i': {'ENTRYTYPE': 'proceedings',
'ID': 'cr6i',
'address': 'Address',
'author': 'Spurious Author',
'editor': 'Editor',
'eventdate': '2009-08-21/2009-08-24',
'eventtitle': 'Title of the event',
'publisher': 'Publisher of proceeding',
'title': 'Title of proceeding',
'venue': 'Location of event',
'year': '2009'},
'cr7': {'ENTRYTYPE': 'inbook',
'ID': 'cr7',
'_FROM_CROSSREF': ['publisher', 'subtitle', 'titleaddon', 'verba'],
'author': 'Author, Firstname',
'crossref': 'cr7i',
'pages': '123--126',
'publisher': 'Publisher of proceeding',
'subtitle': 'Book Subtitle',
'title': 'Title of Book bit',
'titleaddon': 'Book Titleaddon',
'verba': 'String',
'year': '2010'},
'cr7i': {'ENTRYTYPE': 'book',
'ID': 'cr7i',
'author': 'Brian Bookauthor',
'publisher': 'Publisher of proceeding',
'subtitle': 'Book Subtitle',
'title': 'Book Title',
'titleaddon': 'Book Titleaddon',
'verba': 'String',
'year': '2009'},
'cr8': {'ENTRYTYPE': 'incollection',
'ID': 'cr8',
'_FROM_CROSSREF': ['editor', 'publisher', 'subtitle', 'titleaddon'],
'author': 'Smith, Firstname',
'crossref': 'cr8i',
'editor': 'Brian Editor',
'pages': '1--12',
'publisher': 'Publisher of Collection',
'subtitle': 'Book Subtitle',
'title': 'Title of Collection bit',
'titleaddon': 'Book Titleaddon',
'year': '2010'},
'cr8i': {'ENTRYTYPE': 'collection',
'ID': 'cr8i',
'editor': 'Brian Editor',
'publisher': 'Publisher of Collection',
'subtitle': 'Book Subtitle',
'title': 'Book Title',
'titleaddon': 'Book Titleaddon',
'year': '2009'},
'cr_m': {'ENTRYTYPE': 'book',
'ID': 'cr_m',
'editor': 'Edgar Erbriss',
'publisher': 'Grimble',
'title': 'Graphs of the Continent',
'year': '1974'},
'crn': {'ENTRYTYPE': 'book',
'ID': 'crn',
'editor': 'Jeremy Jermain',
'publisher': 'Pillsbury',
'title': 'Vanquished, Victor, Vandal',
'year': '1945'},
'crt': {'ENTRYTYPE': 'book',
'ID': 'crt',
'editor': 'Mark Monkley',
'publisher': 'Rancour',
'title': 'Beasts of the Burbling Burns',
'year': '1996'}}
parser = BibTexParser(add_missing_from_crossref=True, ignore_nonstandard_types=False)
with open(input_file_path) as bibtex_file:
bibtex_database = parser.parse_file(bibtex_file)
self.assertDictEqual(bibtex_database.entries_dict, entries_expected)
def test_crossref_cascading(self):
input_file_path = 'bibtexparser/tests/data/crossref_cascading.bib'
entries_expected = {'r1': {'ENTRYTYPE': 'book',
'ID': 'r1',
'_FROM_CROSSREF': [],
'crossref': 'r2',
'date': '1911'},
'r2': {'ENTRYTYPE': 'book',
'ID': 'r2',
'_FROM_CROSSREF': [],
'crossref': 'r3',
'date': '1911'},
'r3': {'ENTRYTYPE': 'book',
'ID': 'r3',
'_FROM_CROSSREF': [],
'crossref': 'r4',
'date': '1911'},
'r4': {'ENTRYTYPE': 'book',
'ID': 'r4',
'date': '1911'}}
parser = BibTexParser(add_missing_from_crossref=True)
with open(input_file_path) as bibtex_file:
bibtex_database = parser.parse_file(bibtex_file)
self.assertDictEqual(bibtex_database.entries_dict, entries_expected)
def test_crossref_cascading_cycle(self):
input_file_path = 'bibtexparser/tests/data/crossref_cascading_cycle.bib'
entries_expected = {'circ1': {'ENTRYTYPE': 'book',
'ID': 'circ1',
'_FROM_CROSSREF': [],
'crossref': 'circ2',
'date': '1911'},
'circ2': {'ENTRYTYPE': 'book',
'ID': 'circ2',
'_FROM_CROSSREF': [],
'crossref': 'circ1',
'date': '1911'}}
parser = BibTexParser(add_missing_from_crossref=True)
with self.assertLogs('bibtexparser.bibdatabase', level='ERROR') as cm:
with open(input_file_path) as bibtex_file:
bibtex_database = parser.parse_file(bibtex_file)
self.assertIn("ERROR:bibtexparser.bibdatabase:Circular crossref dependency: circ1->circ2->circ1.", cm.output)
self.assertDictEqual(bibtex_database.entries_dict, entries_expected)
def test_crossref_missing_entries(self):
input_file_path = 'bibtexparser/tests/data/crossref_missing_entries.bib'
entries_expected = {'mcr': {'ENTRYTYPE': 'inbook',
'ID': 'mcr',
'_crossref': 'missing1',
'author': 'Megan Mistrel',
'crossref': 'missing1',
'origdate': '1933',
'title': 'Lumbering Lunatics'}}
parser = BibTexParser(add_missing_from_crossref=True)
with self.assertLogs('bibtexparser.bibdatabase', level='ERROR') as cm:
with open(input_file_path) as bibtex_file:
bibtex_database = parser.parse_file(bibtex_file)
self.assertIn("ERROR:bibtexparser.bibdatabase:Crossref reference missing1 for mcr is missing.", cm.output)
self.assertDictEqual(bibtex_database.entries_dict, entries_expected)
if __name__ == '__main__':
unittest.main()
python-bibtexparser-1.4.3/bibtexparser/tests/test_customization.py 0000664 0000000 0000000 00000012730 14731101544 0025710 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from bibtexparser.customization import getnames, convert_to_unicode, homogenize_latex_encoding, page_double_hyphen, keyword, add_plaintext_fields
class TestBibtexParserMethod(unittest.TestCase):
###########
# getnames
###########
def test_getnames(self):
names = ['Foo Bar',
'Foo B. Bar',
'F. B. Bar',
'F.B. Bar',
'F. Bar',
'Jean de Savigny',
'Jean la Tour',
'Jean le Tour',
'Mike ben Akar',
'A. {Delgado de Molina}',
r'M. Vign{\'e}',
'Tom {de Geus}',
'Tom {de \{Geus}',
'Tom \{de Geus\}',
'Tom de {G\{eus}',
'Foo B{\'a}r',
r'{G{\'{e}}rard} {Ben Arous}',
'Incorrect {{name}',
#'Jean de la Tour',
#'Johannes Diderik van der Waals',
]
result = getnames(names)
expected = ['Bar, Foo',
'Bar, Foo B.',
'Bar, F. B.',
'Bar, F. B.',
'Bar, F.',
'de Savigny, Jean',
'la Tour, Jean',
'le Tour, Jean',
'ben Akar, Mike',
'{Delgado de Molina}, A.',
r'Vign{\'e}, M.',
'{de Geus}, Tom',
'{de \{Geus}, Tom',
'Geus\}, Tom \{de',
'de {G\{eus}, Tom',
'B{\'a}r, Foo',
r'{Ben Arous}, {G{\'{e}}rard}',
'Incorrect {{name}',
#'de la Tour, Jean',
#'van der Waals, Johannes Diderik',
]
self.assertEqual(result, expected)
###########
# page_double_hyphen
###########
def test_page_double_hyphen_alreadyOK(self):
record = {'pages': '12--24'}
result = page_double_hyphen(record)
expected = record
self.assertEqual(result, expected)
def test_page_double_hyphen_simple(self):
record = {'pages': '12-24'}
result = page_double_hyphen(record)
expected = {'pages': '12--24'}
self.assertEqual(result, expected)
def test_page_double_hyphen_space(self):
record = {'pages': '12 - 24'}
result = page_double_hyphen(record)
expected = {'pages': '12--24'}
self.assertEqual(result, expected)
def test_page_double_hyphen_nothing(self):
record = {'pages': '12 24'}
result = page_double_hyphen(record)
expected = {'pages': '12 24'}
self.assertEqual(result, expected)
###########
# convert to unicode
###########
def test_convert_to_unicode(self):
record = {'toto': r'{\`a} \`{a}'}
result = convert_to_unicode(record)
expected = {'toto': r'à à'}
self.assertEqual(result, expected)
record = {'toto': r'{\"u} \"{u}'}
result = convert_to_unicode(record)
expected = {'toto': r'ü ü'}
self.assertEqual(result, expected)
# From issue 121
record = {'title': r'{Two Gedenk\"uberlieferung der Angelsachsen}'}
result = convert_to_unicode(record)
expected = {'title': r'Two Gedenküberlieferung der Angelsachsen'}
self.assertEqual(result, expected)
# From issue 161
record = {'title': r"p\^{a}t\'{e}"}
result = convert_to_unicode(record)
expected = {'title': r"pâté"}
self.assertEqual(result, expected)
record = {'title': r"\^{i}le"}
result = convert_to_unicode(record)
expected = {'title': r"île"}
self.assertEqual(result, expected)
record = {'title': r"\texttimes{}{\texttimes}\texttimes"}
result = convert_to_unicode(record)
expected = {'title': r"×××"}
self.assertEqual(result, expected)
###########
# homogenize
###########
def test_homogenize(self):
record = {'toto': r'à {\`a} \`{a}'}
result = homogenize_latex_encoding(record)
expected = {'toto': r'{\`a} {\`a} {\`a}'}
self.assertEqual(result, expected)
###########
# add_plaintext_fields
###########
def test_add_plaintext_fields(self):
record = {
'title': r'On-line {Recognition} of {Handwritten} {Mathematical} {Symbols}',
'foobar': ['{FFT} {Foobar}', '{foobar}'],
'foobar2': {'item1': '{FFT} {Foobar}', 'item2': '{foobar}'}
}
result = add_plaintext_fields(record)
expected = {
'title': 'On-line {Recognition} of {Handwritten} {Mathematical} {Symbols}',
'plain_title': 'On-line Recognition of Handwritten Mathematical Symbols',
'foobar': ['{FFT} {Foobar}', '{foobar}'],
'plain_foobar': ['FFT Foobar', 'foobar'],
'foobar2': {'item1': '{FFT} {Foobar}', 'item2': '{foobar}'},
'plain_foobar2': {'item1': 'FFT Foobar', 'item2': 'foobar'}
}
self.assertEqual(result, expected)
###########
# keywords
###########
def test_keywords(self):
record = {'keyword': "a b, a b , a b;a b ; a b, a b\n"}
result = keyword(record)
expected = {'keyword': ['a b'] * 6}
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
python-bibtexparser-1.4.3/bibtexparser/tests/test_homogenise_fields.py 0000664 0000000 0000000 00000004514 14731101544 0026464 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import unittest
from bibtexparser.bparser import BibTexParser
class TestHomogenizeFields(unittest.TestCase):
def test_homogenize_default(self):
with open('bibtexparser/tests/data/website.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read())
entries = bib.get_entry_list()
self.assertNotIn('url', entries[0])
self.assertIn('link', entries[0])
def test_homogenize_on(self):
with open('bibtexparser/tests/data/website.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read(), homogenize_fields=True)
entries = bib.get_entry_list()
self.assertIn('url', entries[0])
self.assertNotIn('link', entries[0])
def test_homogenize_off(self):
with open('bibtexparser/tests/data/website.bib', 'r') as bibfile:
bib = BibTexParser(bibfile.read(), homogenize_fields=False)
entries = bib.get_entry_list()
self.assertNotIn('url', entries[0])
self.assertIn('link', entries[0])
def test_homogenizes_fields(self):
self.maxDiff = None
with io.open('bibtexparser/tests/data/article_homogenize.bib',
'r', encoding='utf-8') as bibfile:
bib = BibTexParser(bibfile.read(), homogenize_fields=True)
expected_dict = {
'Cesar2013': {
'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article',
'abstract': 'This is an abstract. This line should be '
'long enough to test\nmultilines... and with '
'a french érudit word',
'year': '2013',
'journal': 'Nice Journal',
'ID': 'Cesar2013',
'pages': '12-23',
'title': 'An amazing title',
'comments': 'A comment',
'author': 'Jean César',
'volume': '12',
'month': 'jan',
'url': "http://my.link/to-content",
'subject': "Some topic of interest",
'editor': "Edith Or",
}
}
self.assertEqual(bib.get_entry_dict(), expected_dict)
python-bibtexparser-1.4.3/bibtexparser/tests/test_latexenc.py 0000664 0000000 0000000 00000006705 14731101544 0024610 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see
#
# Author: Francois Boulogne , 2012
from __future__ import unicode_literals
import unittest
from bibtexparser.latexenc import (string_to_latex, latex_to_unicode,
protect_uppercase)
class TestLatexConverter(unittest.TestCase):
def test_accent(self):
string = 'à é è ö'
result = string_to_latex(string)
expected = "{\`a} {\\\'e} {\`e} {\\\"o}"
self.assertEqual(result, expected)
def test_special_caracter(self):
string = 'ç'
result = string_to_latex(string)
expected = '{\c c}'
self.assertEqual(result, expected)
class TestUppercaseProtection(unittest.TestCase):
def test_uppercase(self):
string = 'An upPer Case A'
result = protect_uppercase(string)
expected = '{A}n up{P}er {C}ase {A}'
self.assertEqual(result, expected)
def test_lowercase(self):
string = 'a'
result = protect_uppercase(string)
expected = 'a'
self.assertEqual(result, expected)
def test_alreadyprotected(self):
string = '{A}, m{A}gnificient, it is a {A}...'
result = protect_uppercase(string)
expected = '{A}, m{A}gnificient, it is a {A}...'
self.assertEqual(result, expected)
def test_traps(self):
string = '{A, m{Agnificient, it is a {A'
result = protect_uppercase(string)
expected = '{A, m{Agnificient, it is a {A'
self.assertEqual(result, expected)
def test_traps2(self):
string = 'A}, mA}gnificient, it is a A}'
result = protect_uppercase(string)
expected = 'A}, mA}gnificient, it is a A}'
self.assertEqual(result, expected)
class TestUnicodeConversion(unittest.TestCase):
def test_accents(self):
string = "{\`a} {\\\'e} {\`e} {\\\"o}"
result = latex_to_unicode(string)
expected = 'à é è ö'
self.assertEqual(result, expected)
def test_ignores_trailing_modifier(self):
string = "a\\\'"
result = latex_to_unicode(string)
expected = 'a'
self.assertEqual(result, expected)
def test_special_caracter(self):
string = '{\c c}'
result = latex_to_unicode(string)
expected = 'ç'
self.assertEqual(result, expected)
def test_does_not_modify_existing_combining(self):
string = b'ph\xc6\xa1\xcc\x89'.decode('utf8')
result = latex_to_unicode(string)
expected = 'phở' # normalized
self.assertEqual(result, expected)
def test_does_not_modify_two_existing_combining(self):
string = b'pho\xcc\x9b\xcc\x89'.decode('utf8')
result = latex_to_unicode(string)
expected = 'phở' # normalized
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
python-bibtexparser-1.4.3/bibtexparser/tests/test_preambles.py 0000664 0000000 0000000 00000003165 14731101544 0024754 0 ustar 00root root 0000000 0000000 import unittest
import bibtexparser
from bibtexparser.bibdatabase import BibDatabase
from collections import OrderedDict
class TestPreambleParse(unittest.TestCase):
def test_single_preamble_parse_count(self):
bibtex_str = '@preamble{" a "}\n\n'
bib_database = bibtexparser.loads(bibtex_str)
self.assertEqual(len(bib_database.preambles), 1)
def test_multiple_preamble_parse_count(self):
bibtex_str = '@preamble{" a "}\n\n@preamble{"b"}\n\n'
bib_database = bibtexparser.loads(bibtex_str)
self.assertEqual(len(bib_database.preambles), 2)
def test_single_preamble_parse(self):
bibtex_str = '@preamble{" a "}\n\n'
bib_database = bibtexparser.loads(bibtex_str)
expected = [' a ']
self.assertEqual(bib_database.preambles, expected)
def test_multiple_preamble_parse(self):
bibtex_str = '@preamble{" a "}\n\n@preamble{"b"}\n\n'
bib_database = bibtexparser.loads(bibtex_str)
expected = [' a ', 'b']
self.assertEqual(bib_database.preambles, expected)
class TestPreambleWrite(unittest.TestCase):
def test_single_preamble_write(self):
bib_database = BibDatabase()
bib_database.preambles = [' a ']
result = bibtexparser.dumps(bib_database)
expected = '@preamble{" a "}\n\n'
self.assertEqual(result, expected)
def test_multiple_string_write(self):
bib_database = BibDatabase()
bib_database.preambles = [' a ', 'b']
result = bibtexparser.dumps(bib_database)
expected = '@preamble{" a "}\n\n@preamble{"b"}\n\n'
self.assertEqual(result, expected)
python-bibtexparser-1.4.3/bibtexparser/tests/test_splitname.py 0000664 0000000 0000000 00000052715 14731101544 0025003 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from bibtexparser.customization import InvalidName, splitname
class TestSplitnameMethod(unittest.TestCase):
def test_splitname_basic(self):
"""Basic tests of customization.splitname() """
# Empty input.
result = splitname("")
expected = {}
self.assertEqual(result, expected, msg="Invalid output for empty name")
# Non-whitespace names.
result = splitname(" ")
expected = {}
self.assertEqual(result, expected, msg="Invalid output for space-only name")
result = splitname(" \t~~")
expected = {}
self.assertEqual(result, expected, msg="Invalid output for whitespace name")
# Test strict mode.
with self.assertRaises(InvalidName): # Trailing comma (4 cases).
splitname("BB,", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("BB, ", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("BB, ~\t", strict_mode=True)
with self.assertRaises(InvalidName):
splitname(", ~\t", strict_mode=True)
with self.assertRaises(InvalidName): # Too many sections.
splitname("AA, BB, CC, DD", strict_mode=True)
with self.assertRaises(InvalidName): # Unterminated opening brace (x3).
splitname("AA {BB CC", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("AA {{{BB CC", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("AA {{{BB} CC}", strict_mode=True)
with self.assertRaises(InvalidName): # Unmatched closing brace (x3).
splitname("AA BB CC}", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("AA BB CC}}}", strict_mode=True)
with self.assertRaises(InvalidName):
splitname("{AA {BB CC}}}", strict_mode=True)
# Test strict mode off for trailing comma.
expected = {'first': [], 'von': [], 'last': ["BB"], 'jr': []}
result = splitname("BB,", strict_mode=False)
self.assertEqual(result, expected, msg="Invalid output for trailing comma with strict mode off")
result = splitname("BB, ", strict_mode=False)
self.assertEqual(result, expected, msg="Invalid output for trailing comma with strict mode off")
result = splitname("BB, ~\t ", strict_mode=False)
self.assertEqual(result, expected, msg="Invalid output for trailing comma with strict mode off")
expected = {}
result = splitname(", ~\t", strict_mode=False)
self.assertEqual(result, expected, msg="Invalid output for trailing comma with strict mode off")
# Test strict mode off for too many sections.
expected = {'first': ["CC", "DD"], 'von': [], 'last': ["AA"], 'jr': ["BB"]}
result = splitname("AA, BB, CC, DD", strict_mode=False)
self.assertEqual(result, expected, msg="Invalid output for too many sections with strict mode off")
# Test strict mode off for an unterminated opening brace.
result = splitname("AA {BB CC", strict_mode=False)
expected = {'first': ["AA"], 'von': [], 'last': ["{BB CC}"], 'jr': []}
self.assertEqual(result, expected, msg="Invalid output for unterminated opening brace with strict mode off")
result = splitname("AA {{{BB CC", strict_mode=False)
expected = {'first': ["AA"], 'von': [], 'last': ["{{{BB CC}}}"], 'jr': []}
self.assertEqual(result, expected, msg="Invalid output for unterminated opening brace with strict mode off")
result = splitname("AA {{{BB} CC}", strict_mode=False)
expected = {'first': ["AA"], 'von': [], 'last': ["{{{BB} CC}}"], 'jr': []}
self.assertEqual(result, expected, msg="Invalid output for unterminated opening brace with strict mode off")
# Test strict mode off for an unmatched closing brace.
result = splitname("AA BB CC}", strict_mode=False)
expected = {'first': ["AA", "BB"], 'von': [], 'last': ["{CC}"], 'jr': []}
self.assertEqual(result, expected, msg="Invalid output for unmatched closing brace with strict mode off")
result = splitname("AA BB CC}}}", strict_mode=False)
expected = {'first': ["AA", "BB"], 'von': [], 'last': ["{{{CC}}}"], 'jr': []}
self.assertEqual(result, expected, msg="Invalid output for unmatched closing brace with strict mode off")
result = splitname("{AA {BB CC}}}", strict_mode=False)
expected = {'first': [], 'von': [], 'last': ["{{AA {BB CC}}}"], 'jr': []}
self.assertEqual(result, expected, msg="Invalid output for unmatched closing brace with strict mode off")
# Test it handles commas at higher brace levels.
result = splitname("CC, dd, {AA, BB}")
expected = {'first': ["{AA, BB}"], 'von': [], 'last': ["CC"], 'jr': ["dd"]}
self.assertEqual(result, expected, msg="Invalid output for braced commas")
def test_splitname_cases(self):
"""Test customization.splitname() vs output from BibTeX """
for name, expected in splitname_test_cases:
result = splitname(name)
self.assertEqual(result, expected, msg="Input name: {0}".format(name))
splitname_test_cases = (
(r'Per Brinch Hansen',
{'first': ['Per', 'Brinch'], 'von': [], 'last': ['Hansen'], 'jr': []}),
(r'Brinch Hansen, Per',
{'first': ['Per'], 'von': [], 'last': ['Brinch', 'Hansen'], 'jr': []}),
(r'Brinch Hansen,, Per',
{'first': ['Per'], 'von': [], 'last': ['Brinch', 'Hansen'], 'jr': []}),
(r"Charles Louis Xavier Joseph de la Vall{\'e}e Poussin",
{'first': ['Charles', 'Louis', 'Xavier', 'Joseph'], 'von': ['de', 'la'],
'last': [r'Vall{\'e}e', 'Poussin'], 'jr': []}),
(r'D[onald] E. Knuth',
{'first': ['D[onald]', 'E.'], 'von': [], 'last': ['Knuth'], 'jr': []}),
(r'A. {Delgado de Molina}',
{'first': ['A.'], 'von': [], 'last': ['{Delgado de Molina}'], 'jr': []}),
(r"M. Vign{\'e}",
{'first': ['M.'], 'von': [], 'last': [r"Vign{\'e}"], 'jr': []}),
###############################################################################
#
# Test cases from
# http://maverick.inria.fr/~Xavier.Decoret/resources/xdkbibtex/bibtex_summary.html
#
###############################################################################
(r'AA BB',
{'first': ['AA'], 'von': [], 'last': ['BB'], 'jr': []}),
(r'AA',
{'first': [], 'von': [], 'last': ['AA'], 'jr': []}),
(r'AA bb',
{'first': ['AA'], 'von': [], 'last': ['bb'], 'jr': []}),
(r'aa',
{'first': [], 'von': [], 'last': ['aa'], 'jr': []}),
(r'AA bb CC',
{'first': ['AA'], 'von': ['bb'], 'last': ['CC'], 'jr': []}),
(r'AA bb CC dd EE',
{'first': ['AA'], 'von': ['bb', 'CC', 'dd'], 'last': ['EE'], 'jr': []}),
(r'AA 1B cc dd',
{'first': ['AA', '1B'], 'von': ['cc'], 'last': ['dd'], 'jr': []}),
(r'AA 1b cc dd',
{'first': ['AA'], 'von': ['1b', 'cc'], 'last': ['dd'], 'jr': []}),
(r'AA {b}B cc dd',
{'first': ['AA', '{b}B'], 'von': ['cc'], 'last': ['dd'], 'jr': []}),
(r'AA {b}b cc dd',
{'first': ['AA'], 'von': ['{b}b', 'cc'], 'last': ['dd'], 'jr': []}),
(r'AA {B}b cc dd',
{'first': ['AA'], 'von': ['{B}b', 'cc'], 'last': ['dd'], 'jr': []}),
(r'AA {B}B cc dd',
{'first': ['AA', '{B}B'], 'von': ['cc'], 'last': ['dd'], 'jr': []}),
(r'AA \BB{b} cc dd',
{'first': ['AA', r'\BB{b}'], 'von': ['cc'], 'last': ['dd'], 'jr': []}),
(r'AA \bb{b} cc dd',
{'first': ['AA'], 'von': [r'\bb{b}', 'cc'], 'last': ['dd'], 'jr': []}),
(r'AA {bb} cc DD',
{'first': ['AA', '{bb}'], 'von': ['cc'], 'last': ['DD'], 'jr': []}),
(r'AA bb {cc} DD',
{'first': ['AA'], 'von': ['bb'], 'last': ['{cc}', 'DD'], 'jr': []}),
(r'AA {bb} CC',
{'first': ['AA', '{bb}'], 'von': [], 'last': ['CC'], 'jr': []}),
(r'bb CC, AA',
{'first': ['AA'], 'von': ['bb'], 'last': ['CC'], 'jr': []}),
(r'bb CC, aa',
{'first': ['aa'], 'von': ['bb'], 'last': ['CC'], 'jr': []}),
(r'bb CC dd EE, AA',
{'first': ['AA'], 'von': ['bb', 'CC', 'dd'], 'last': ['EE'], 'jr': []}),
(r'bb, AA',
{'first': ['AA'], 'von': [], 'last': ['bb'], 'jr': []}),
(r'bb CC,XX, AA',
{'first': ['AA'], 'von': ['bb'], 'last': ['CC'], 'jr': ['XX']}),
(r'bb CC,xx, AA',
{'first': ['AA'], 'von': ['bb'], 'last': ['CC'], 'jr': ['xx']}),
(r'BB,, AA',
{'first': ['AA'], 'von': [], 'last': ['BB'], 'jr': []}),
(r"Paul \'Emile Victor",
{'first': ['Paul', r"\'Emile"], 'von': [], 'last': ['Victor'], 'jr': []}),
(r"Paul {\'E}mile Victor",
{'first': ['Paul', r"{\'E}mile"], 'von': [], 'last': ['Victor'], 'jr': []}),
(r"Paul \'emile Victor",
{'first': ['Paul'], 'von': [r"\'emile"], 'last': ['Victor'], 'jr': []}),
(r"Paul {\'e}mile Victor",
{'first': ['Paul'], 'von': [r"{\'e}mile"], 'last': ['Victor'], 'jr': []}),
(r"Victor, Paul \'Emile",
{'first': ['Paul', r"\'Emile"], 'von': [], 'last': ['Victor'], 'jr': []}),
(r"Victor, Paul {\'E}mile",
{'first': ['Paul', r"{\'E}mile"], 'von': [], 'last': ['Victor'], 'jr': []}),
(r"Victor, Paul \'emile",
{'first': ['Paul', r"\'emile"], 'von': [], 'last': ['Victor'], 'jr': []}),
(r"Victor, Paul {\'e}mile",
{'first': ['Paul', r"{\'e}mile"], 'von': [], 'last': ['Victor'], 'jr': []}),
(r'Dominique Galouzeau de Villepin',
{'first': ['Dominique', 'Galouzeau'], 'von': ['de'], 'last': ['Villepin'], 'jr': []}),
(r'Dominique {G}alouzeau de Villepin',
{'first': ['Dominique'], 'von': ['{G}alouzeau', 'de'],
'last': ['Villepin'], 'jr': []}),
(r'Galouzeau de Villepin, Dominique',
{'first': ['Dominique'], 'von': ['Galouzeau', 'de'],
'last': ['Villepin'], 'jr': []}),
###############################################################################
#
# Test cases from pybtex
# See file /pybtex/tests/parse_name_test.py in the pybtex source.
#
###############################################################################
(r'A. E. Siegman',
{'first': ['A.', 'E.'], 'von': [], 'last': ['Siegman'], 'jr': []}),
(r'A. G. W. Cameron',
{'first': ['A.', 'G.', 'W.'], 'von': [], 'last': ['Cameron'], 'jr': []}),
(r'A. Hoenig',
{'first': ['A.'], 'von': [], 'last': ['Hoenig'], 'jr': []}),
(r'A. J. Van Haagen',
{'first': ['A.', 'J.', 'Van'], 'von': [], 'last': ['Haagen'], 'jr': []}),
(r'A. S. Berdnikov',
{'first': ['A.', 'S.'], 'von': [], 'last': ['Berdnikov'], 'jr': []}),
(r'A. Trevorrow',
{'first': ['A.'], 'von': [], 'last': ['Trevorrow'], 'jr': []}),
(r'Adam H. Lewenberg',
{'first': ['Adam', 'H.'], 'von': [], 'last': ['Lewenberg'], 'jr': []}),
(r'Addison-Wesley Publishing Company',
{'first': ['Addison-Wesley', 'Publishing'], 'von': [],
'last': ['Company'], 'jr': []}),
(r'Advogato (Raph Levien)',
{'first': ['Advogato', '(Raph'], 'von': [], 'last': ['Levien)'], 'jr': []}),
(r'Andrea de Leeuw van Weenen',
{'first': ['Andrea'], 'von': ['de', 'Leeuw', 'van'], 'last': ['Weenen'], 'jr': []}),
(r'Andreas Geyer-Schulz',
{'first': ['Andreas'], 'von': [], 'last': ['Geyer-Schulz'], 'jr': []}),
(r'Andr{\'e} Heck',
{'first': [r'Andr{\'e}'], 'von': [], 'last': ['Heck'], 'jr': []}),
(r'Anne Br{\"u}ggemann-Klein',
{'first': ['Anne'], 'von': [], 'last': [r'Br{\"u}ggemann-Klein'], 'jr': []}),
(r'Anonymous',
{'first': [], 'von': [], 'last': ['Anonymous'], 'jr': []}),
(r'B. Beeton',
{'first': ['B.'], 'von': [], 'last': ['Beeton'], 'jr': []}),
(r'B. Hamilton Kelly',
{'first': ['B.', 'Hamilton'], 'von': [], 'last': ['Kelly'], 'jr': []}),
(r'B. V. Venkata Krishna Sastry',
{'first': ['B.', 'V.', 'Venkata', 'Krishna'], 'von': [],
'last': ['Sastry'], 'jr': []}),
(r'Benedict L{\o}fstedt',
{'first': ['Benedict'], 'von': [], 'last': [r'L{\o}fstedt'], 'jr': []}),
(r'Bogus{\l}aw Jackowski',
{'first': ['Bogus{\l}aw'], 'von': [], 'last': ['Jackowski'], 'jr': []}),
(r'Christina A. L.\ Thiele',
{'first': ['Christina', 'A.', 'L.\\'], 'von': [],
'last': ['Thiele'], 'jr': []}),
(r"D. Men'shikov",
{'first': ['D.'], 'von': [], 'last': ["Men'shikov"], 'jr': []}),
(r'Darko \v{Z}ubrini{\'c}',
{'first': ['Darko'], 'von': [], 'last': [r'\v{Z}ubrini{\'c}'], 'jr': []}),
(r'Dunja Mladeni{\'c}',
{'first': ['Dunja'], 'von': [], 'last': [r'Mladeni{\'c}'], 'jr': []}),
(r'Edwin V. {Bell, II}',
{'first': ['Edwin', 'V.'], 'von': [], 'last': ['{Bell, II}'], 'jr': []}),
(r'Frank G. {Bennett, Jr.}',
{'first': ['Frank', 'G.'], 'von': [], 'last': ['{Bennett, Jr.}'], 'jr': []}),
(r'Fr{\'e}d{\'e}ric Boulanger',
{'first': [r'Fr{\'e}d{\'e}ric'], 'von': [], 'last': ['Boulanger'], 'jr': []}),
(r'Ford, Jr., Henry',
{'first': ['Henry'], 'von': [], 'last': ['Ford'], 'jr': ['Jr.']}),
(r'mr Ford, Jr., Henry',
{'first': ['Henry'], 'von': ['mr'], 'last': ['Ford'], 'jr': ['Jr.']}),
(r'Fukui Rei',
{'first': ['Fukui'], 'von': [], 'last': ['Rei'], 'jr': []}),
(r'G. Gr{\"a}tzer',
{'first': ['G.'], 'von': [], 'last': [r'Gr{\"a}tzer'], 'jr': []}),
(r'George Gr{\"a}tzer',
{'first': ['George'], 'von': [], 'last': [r'Gr{\"a}tzer'], 'jr': []}),
(r'Georgia K. M. Tobin',
{'first': ['Georgia', 'K.', 'M.'], 'von': [], 'last': ['Tobin'], 'jr': []}),
(r'Gilbert van den Dobbelsteen',
{'first': ['Gilbert'], 'von': ['van', 'den'], 'last': ['Dobbelsteen'], 'jr': []}),
(r'Gy{\"o}ngyi Bujdos{\'o}',
{'first': [r'Gy{\"o}ngyi'], 'von': [], 'last': [r'Bujdos{\'o}'], 'jr': []}),
(r'Helmut J{\"u}rgensen',
{'first': ['Helmut'], 'von': [], 'last': [r'J{\"u}rgensen'], 'jr': []}),
(r'Herbert Vo{\ss}',
{'first': ['Herbert'], 'von': [], 'last': ['Vo{\ss}'], 'jr': []}),
(r"H{\'a}n Th{\^e}\llap{\raise 0.5ex\hbox{\'{\relax}}} Th{\'a}nh",
{'first': [r'H{\'a}n', r"Th{\^e}\llap{\raise 0.5ex\hbox{\'{\relax}}}"],
'von': [], 'last': [r"Th{\'a}nh"], 'jr': []}),
(r"H{\`a}n Th\^e\llap{\raise0.5ex\hbox{\'{\relax}}} Th{\`a}nh",
{'first': [r'H{\`a}n', r"Th\^e\llap{\raise0.5ex\hbox{\'{\relax}}}"],
'von': [], 'last': [r"Th{\`a}nh"], 'jr': []}),
(r'J. Vesel{\'y}',
{'first': ['J.'], 'von': [], 'last': [r'Vesel{\'y}'], 'jr': []}),
(r'Javier Rodr\'{\i}guez Laguna',
{'first': ['Javier', r'Rodr\'{\i}guez'], 'von': [], 'last': ['Laguna'], 'jr': []}),
(r'Ji\v{r}\'{\i} Vesel{\'y}',
{'first': [r'Ji\v{r}\'{\i}'], 'von': [], 'last': [r'Vesel{\'y}'], 'jr': []}),
(r'Ji\v{r}\'{\i} Zlatu{\v{s}}ka',
{'first': [r'Ji\v{r}\'{\i}'], 'von': [], 'last': [r'Zlatu{\v{s}}ka'], 'jr': []}),
(r'Ji\v{r}{\'\i} Vesel{\'y}',
{'first': [r'Ji\v{r}{\'\i}'], 'von': [], 'last': [r'Vesel{\'y}'], 'jr': []}),
(r'Ji\v{r}{\'{\i}}Zlatu{\v{s}}ka',
{'first': [], 'von': [], 'last': [r'Ji\v{r}{\'{\i}}Zlatu{\v{s}}ka'], 'jr': []}),
(r'Jim Hef{}feron',
{'first': ['Jim'], 'von': [], 'last': ['Hef{}feron'], 'jr': []}),
(r'J{\"o}rg Knappen',
{'first': [r'J{\"o}rg'], 'von': [], 'last': ['Knappen'], 'jr': []}),
(r'J{\"o}rgen L. Pind',
{'first': [r'J{\"o}rgen', 'L.'], 'von': [], 'last': ['Pind'], 'jr': []}),
(r'J{\'e}r\^ome Laurens',
{'first': [r'J{\'e}r\^ome'], 'von': [], 'last': ['Laurens'], 'jr': []}),
(r'J{{\"o}}rg Knappen',
{'first': [r'J{{\"o}}rg'], 'von': [], 'last': ['Knappen'], 'jr': []}),
(r'K. Anil Kumar',
{'first': ['K.', 'Anil'], 'von': [], 'last': ['Kumar'], 'jr': []}),
(r'Karel Hor{\'a}k',
{'first': ['Karel'], 'von': [], 'last': [r'Hor{\'a}k'], 'jr': []}),
(r'Karel P\'{\i}{\v{s}}ka',
{'first': ['Karel'], 'von': [], 'last': [r'P\'{\i}{\v{s}}ka'], 'jr': []}),
(r'Karel P{\'\i}{\v{s}}ka',
{'first': ['Karel'], 'von': [], 'last': [r'P{\'\i}{\v{s}}ka'], 'jr': []}),
(r'Karel Skoup\'{y}',
{'first': ['Karel'], 'von': [], 'last': [r'Skoup\'{y}'], 'jr': []}),
(r'Karel Skoup{\'y}',
{'first': ['Karel'], 'von': [], 'last': [r'Skoup{\'y}'], 'jr': []}),
(r'Kent McPherson',
{'first': ['Kent'], 'von': [], 'last': ['McPherson'], 'jr': []}),
(r'Klaus H{\"o}ppner',
{'first': ['Klaus'], 'von': [], 'last': [r'H{\"o}ppner'], 'jr': []}),
(r'Lars Hellstr{\"o}m',
{'first': ['Lars'], 'von': [], 'last': [r'Hellstr{\"o}m'], 'jr': []}),
(r'Laura Elizabeth Jackson',
{'first': ['Laura', 'Elizabeth'], 'von': [], 'last': ['Jackson'], 'jr': []}),
(r'M. D{\'{\i}}az',
{'first': ['M.'], 'von': [], 'last': [r'D{\'{\i}}az'], 'jr': []}),
(r'M/iche/al /O Searc/oid',
{'first': [r'M/iche/al', r'/O'], 'von': [], 'last': [r'Searc/oid'], 'jr': []}),
(r'Marek Ry{\'c}ko',
{'first': ['Marek'], 'von': [], 'last': [r'Ry{\'c}ko'], 'jr': []}),
(r'Marina Yu. Nikulina',
{'first': ['Marina', 'Yu.'], 'von': [], 'last': ['Nikulina'], 'jr': []}),
(r'Max D{\'{\i}}az',
{'first': ['Max'], 'von': [], 'last': [r'D{\'{\i}}az'], 'jr': []}),
(r'Merry Obrecht Sawdey',
{'first': ['Merry', 'Obrecht'], 'von': [], 'last': ['Sawdey'], 'jr': []}),
(r'Miroslava Mis{\'a}kov{\'a}',
{'first': ['Miroslava'], 'von': [], 'last': [r'Mis{\'a}kov{\'a}'], 'jr': []}),
(r'N. A. F. M. Poppelier',
{'first': ['N.', 'A.', 'F.', 'M.'], 'von': [], 'last': ['Poppelier'], 'jr': []}),
(r'Nico A. F. M. Poppelier',
{'first': ['Nico', 'A.', 'F.', 'M.'], 'von': [], 'last': ['Poppelier'], 'jr': []}),
(r'Onofrio de Bari',
{'first': ['Onofrio'], 'von': ['de'], 'last': ['Bari'], 'jr': []}),
(r'Pablo Rosell-Gonz{\'a}lez',
{'first': ['Pablo'], 'von': [], 'last': [r'Rosell-Gonz{\'a}lez'], 'jr': []}),
(r'Paco La Bruna',
{'first': ['Paco', 'La'], 'von': [], 'last': ['Bruna'], 'jr': []}),
(r'Paul Franchi-Zannettacci',
{'first': ['Paul'], 'von': [], 'last': ['Franchi-Zannettacci'], 'jr': []}),
(r'Pavel \v{S}eve\v{c}ek',
{'first': ['Pavel'], 'von': [], 'last': [r'\v{S}eve\v{c}ek'], 'jr': []}),
(r'Petr Ol{\v{s}}ak',
{'first': ['Petr'], 'von': [], 'last': [r'Ol{\v{s}}ak'], 'jr': []}),
(r'Petr Ol{\v{s}}{\'a}k',
{'first': ['Petr'], 'von': [], 'last': [r'Ol{\v{s}}{\'a}k'], 'jr': []}),
(r'Primo\v{z} Peterlin',
{'first': [r'Primo\v{z}'], 'von': [], 'last': ['Peterlin'], 'jr': []}),
(r'Prof. Alban Grimm',
{'first': ['Prof.', 'Alban'], 'von': [], 'last': ['Grimm'], 'jr': []}),
(r'P{\'e}ter Husz{\'a}r',
{'first': [r'P{\'e}ter'], 'von': [], 'last': [r'Husz{\'a}r'], 'jr': []}),
(r'P{\'e}ter Szab{\'o}',
{'first': [r'P{\'e}ter'], 'von': [], 'last': [r'Szab{\'o}'], 'jr': []}),
(r'Rafa{\l}\.Zbikowski',
{'first': [], 'von': [], 'last': [r'Rafa{\l}\.Zbikowski'], 'jr': []}),
(r'Rainer Sch{\"o}pf',
{'first': ['Rainer'], 'von': [], 'last': [r'Sch{\"o}pf'], 'jr': []}),
(r'T. L. (Frank) Pappas',
{'first': ['T.', 'L.', '(Frank)'], 'von': [], 'last': ['Pappas'], 'jr': []}),
(r'TUG 2004 conference',
{'first': ['TUG', '2004'], 'von': [], 'last': ['conference'], 'jr': []}),
(r'TUG {\sltt DVI} Driver Standards Committee',
{'first': ['TUG', '{\sltt DVI}', 'Driver', 'Standards'], 'von': [],
'last': ['Committee'], 'jr': []}),
(r'TUG {\sltt xDVIx} Driver Standards Committee',
{'first': ['TUG'], 'von': ['{\sltt xDVIx}'],
'last': ['Driver', 'Standards', 'Committee'], 'jr': []}),
(r'University of M{\"u}nster',
{'first': ['University'], 'von': ['of'], 'last': [r'M{\"u}nster'], 'jr': []}),
(r'Walter van der Laan',
{'first': ['Walter'], 'von': ['van', 'der'], 'last': ['Laan'], 'jr': []}),
(r'Wendy G. McKay',
{'first': ['Wendy', 'G.'], 'von': [], 'last': ['McKay'], 'jr': []}),
(r'Wendy McKay',
{'first': ['Wendy'], 'von': [], 'last': ['McKay'], 'jr': []}),
(r'W{\l}odek Bzyl',
{'first': [r'W{\l}odek'], 'von': [], 'last': ['Bzyl'], 'jr': []}),
(r'\LaTeX Project Team',
{'first': [r'\LaTeX', 'Project'], 'von': [], 'last': ['Team'], 'jr': []}),
(r'\rlap{Lutz Birkhahn}',
{'first': [], 'von': [], 'last': [r'\rlap{Lutz Birkhahn}'], 'jr': []}),
(r'{Jim Hef{}feron}',
{'first': [], 'von': [], 'last': ['{Jim Hef{}feron}'], 'jr': []}),
(r'{Kristoffer H\o{}gsbro Rose}',
{'first': [], 'von': [], 'last': ['{Kristoffer H\o{}gsbro Rose}'], 'jr': []}),
(r'{TUG} {Working} {Group} on a {\TeX} {Directory} {Structure}',
{'first': ['{TUG}', '{Working}', '{Group}'], 'von': ['on', 'a'],
'last': [r'{\TeX}', '{Directory}', '{Structure}'], 'jr': []}),
(r'{The \TUB{} Team}',
{'first': [], 'von': [], 'last': [r'{The \TUB{} Team}'], 'jr': []}),
(r'{\LaTeX} project team',
{'first': [r'{\LaTeX}'], 'von': ['project'], 'last': ['team'], 'jr': []}),
(r'{\NTG{} \TeX{} future working group}',
{'first': [], 'von': [], 'last': [r'{\NTG{} \TeX{} future working group}'], 'jr': []}),
(r'{{\LaTeX\,3} Project Team}',
{'first': [], 'von': [], 'last': [r'{{\LaTeX\,3} Project Team}'], 'jr': []}),
(r'Johansen Kyle, Derik Mamania M.',
{'first': ['Derik', 'Mamania', 'M.'], 'von': [], 'last': ['Johansen', 'Kyle'], 'jr': []}),
(r"Johannes Adam Ferdinand Alois Josef Maria Marko d'Aviano Pius von und zu Liechtenstein",
{'first': ['Johannes', 'Adam', 'Ferdinand', 'Alois', 'Josef', 'Maria', 'Marko'],
'von': ["d'Aviano", 'Pius', 'von', 'und', 'zu'], 'last': ['Liechtenstein'], 'jr': []}),
(r"Brand\~{a}o, F",
{'first': ['F'], 'von': [], 'last': ['Brand\\', '{a}o'], 'jr': []}),
)
if __name__ == '__main__':
unittest.main()
python-bibtexparser-1.4.3/docs/ 0000775 0000000 0000000 00000000000 14731101544 0016460 5 ustar 00root root 0000000 0000000 python-bibtexparser-1.4.3/docs/Makefile 0000664 0000000 0000000 00000012735 14731101544 0020130 0 ustar 00root root 0000000 0000000 # Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make ' where is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/BibtexParser.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/BibtexParser.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/BibtexParser"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/BibtexParser"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
python-bibtexparser-1.4.3/docs/source/ 0000775 0000000 0000000 00000000000 14731101544 0017760 5 ustar 00root root 0000000 0000000 python-bibtexparser-1.4.3/docs/source/bibtex_conv.rst 0000664 0000000 0000000 00000002404 14731101544 0023014 0 ustar 00root root 0000000 0000000 ===============================================
Bibtex tips, conventions and unrelated projects
===============================================
This page presents various resources about bibtex in general.
Format
======
http://maverick.inria.fr/~Xavier.Decoret/resources/xdkbibtex/bibtex_summary.html
* Comments
* Variable
* @preamble
* Name convention
Upper case letters in titles
----------------------------
Put the letter/word in curly braces like {this}.
General references
------------------
* http://tug.ctan.org/tex-archive/info/bibtex/tamethebeast/ttb_en.pdf
* http://ctan.mirrors.hoobly.com/macros/latex/contrib/biblatex/doc/biblatex.pdf
IEEE citation reference
-----------------------
* https://origin.www.ieee.org/documents/ieeecitationref.pdf
Common Errors in Bibliographies John Owens
------------------------------------------
* http://www.ece.ucdavis.edu/~jowens/biberrors.html
Common abbreviations for journals
---------------------------------
* Jabref list http://jabref.sourceforge.net/resources.php#downloadlists
Projects
========
Here are some interesting projects using bibtex but not necessarily this parser.
Display your bibliography in html pages
---------------------------------------
* http://www.monperrus.net/martin/bibtexbrowser/
python-bibtexparser-1.4.3/docs/source/bibtexparser.rst 0000664 0000000 0000000 00000002524 14731101544 0023207 0 ustar 00root root 0000000 0000000 .. _bibtexparser_api:
.. contents::
bibtexparser: API
=================
:mod:`bibtexparser` --- Parsing and writing BibTeX files
--------------------------------------------------------
.. automodule:: bibtexparser
:members: load, loads, dumps, dump
:mod:`bibtexparser.bibdatabase` --- The bibliographic database object
---------------------------------------------------------------------
.. autoclass:: bibtexparser.bibdatabase.BibDatabase
:members: entries, entries_dict, comments, strings, preambles
:mod:`bibtexparser.bparser` --- Tune the default parser
--------------------------------------------------------
.. automodule:: bibtexparser.bparser
:members:
:mod:`bibtexparser.customization` --- Functions to customize records
--------------------------------------------------------------------
.. automodule:: bibtexparser.customization
:members:
Exception classes
^^^^^^^^^^^^^^^^^
.. autoclass:: bibtexparser.customization.InvalidName
:mod:`bibtexparser.bwriter` --- Tune the default writer
-------------------------------------------------------
.. autoclass:: bibtexparser.bwriter.BibTexWriter
:members:
:mod:`bibtexparser.bibtexexpression` --- Parser's core relying on pyparsing
---------------------------------------------------------------------------
.. automodule:: bibtexparser.bibtexexpression
:members:
python-bibtexparser-1.4.3/docs/source/conf.py 0000664 0000000 0000000 00000017506 14731101544 0021270 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# BibtexParser documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 1 13:30:23 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'BibtexParser'
copyright = '2013-2016, F. Boulogne and other contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
import bibtexparser as bp
# The short X.Y version.
version = bp.__version__
# The full version, including alpha/beta/rc tags.
release = bp.__version__
except ImportError:
version = 'latest'
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BibtexParserdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BibtexParser.tex', 'BibtexParser Documentation',
'F. Boulogne', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bibtexparser', 'BibtexParser Documentation',
['F. Boulogne'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'BibtexParser', 'BibtexParser Documentation',
'F. Boulogne', 'BibtexParser', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
python-bibtexparser-1.4.3/docs/source/demo.py 0000664 0000000 0000000 00000002403 14731101544 0021255 0 ustar 00root root 0000000 0000000 import logging
import logging.config
import io
logger = logging.getLogger(__name__)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s %(funcName)s:%(lineno)d: %(message)s'
},
},
'handlers': {
'default': {
'level':'DEBUG',
'formatter': 'standard',
'class':'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'formatter': 'standard',
'propagate': True
}
}
})
if __name__ == '__main__':
bibtex_source = """@ARTICLE{Cesar2013,
author = {Jean César},
title = {An amazing title},
year = {2013},
month = {1},
volume = {12},
pages = {12--23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines...},
comments = {A comment},
keywords = {keyword1, keyword2},
}
"""
from bibtexparser.bparser import BibTexParser
with io.StringIO(bibtex_source) as f:
bp = BibTexParser(f.read())
print(bp.get_entry_list())
python-bibtexparser-1.4.3/docs/source/index.rst 0000664 0000000 0000000 00000002453 14731101544 0021625 0 ustar 00root root 0000000 0000000 .. BibtexParser documentation master file, created by
sphinx-quickstart on Thu Aug 1 13:30:23 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to BibtexParser's documentation!
========================================
:Author: François Boulogne, Olivier Mangin, Lucas Verney, and other contributors.
:Source Code: `github.com project `_
:Bugs: `github.com `_
:Generated: |today|
:License: LGPL v3 or BSD
:Version: |release|
BibtexParser is a python library to parse bibtex files. The code relies on `pyparsing `_ and is tested with unittests.
BibtexParser is used in more than 1300 open-source `repositories `_.
Contents:
.. toctree::
:maxdepth: 2
install.rst
tutorial.rst
bibtexparser.rst
logging.rst
bibtex_conv.rst
Other projects
==============
* http://pybtex.sourceforge.net/
* http://pybliographer.org/
* https://github.com/matthew-brett/babybib
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
python-bibtexparser-1.4.3/docs/source/install.rst 0000664 0000000 0000000 00000003435 14731101544 0022165 0 ustar 00root root 0000000 0000000 ========================
How to install and test?
========================
How to install?
===============
Requirements
------------
* python **2.7** or python **3.3** or newer
* pyparsing **2.0.3** or newer
Package manager (recommended for those OS users)
------------------------------------------------
* `Archlinux `_
* `Debian `_
pip (recommended to other users)
---------------------------------
To install with pip:
.. code-block:: sh
pip install bibtexparser
Manual installation (recommended for packagers)
-----------------------------------------------
Download the archive on `Pypi `_.
.. code-block:: sh
python setup.py install
How to run the test suite?
==========================
This paragraph briefly describes how to run the test suite.
This is useful for contributors, for packagers but also for users who wants to check their environment.
Virtualenv
----------
You can make a virtualenv. I like `pew `_ for that because the API is easier.
The first time, you need to make a virtualenv
.. code-block:: sh
pew mkproject bibtexparser
pip install -r requirements.txt
python setup.py install
nosetest
If you already have a virtualenv, you can use workon
.. code-block:: sh
pew workon bibtexparser
Tox
---
The advantage of `Tox `_ is that you can build and test the code against several versions of python.
Of course, you need tox to be installed on your system.
The configuration file is tox.ini, in the root of the project. There, you can change the python versions.
.. code-block:: sh
tox # and nothing more :)
python-bibtexparser-1.4.3/docs/source/logging.rst 0000664 0000000 0000000 00000004740 14731101544 0022145 0 ustar 00root root 0000000 0000000 How to report a bug?
====================
Bugs can be reported on github or via private communications.
Steps
-----
1. Make a minimal code, which reproduces the problem.
2. Provide the code, the bibtex (if necessary), the output.
3. For a parsing error, provide the expected output.
4. For a crash, set the logger to the debug level (see below).
If you want to provide a patch (that's wonderful! thank you), please, take few minutes to write a unit test that fails without your contribution.
Logging module to understand failures
-------------------------------------
Syntax of bibtex files is simple but there are many possible variations. This library probably fails for some of them.
Bibtexparser includes a large quantity of debug messages which helps to understand why and where the parser fails.
The example below can be used to print these messages in the console.
.. code-block:: python
import logging
import logging.config
logger = logging.getLogger(__name__)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s %(funcName)s:%(lineno)d: %(message)s'
},
},
'handlers': {
'default': {
'level':'DEBUG',
'formatter': 'standard',
'class':'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'formatter': 'standard',
'propagate': True
}
}
})
if __name__ == '__main__':
bibtex = """@ARTICLE{Cesar2013,
author = {Jean César},
title = {An amazing title},
year = {2013},
month = jan,
volume = {12},
pages = {12--23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines...},
comments = {A comment},
keywords = {keyword1, keyword2},
}
"""
with open('/tmp/bibtex.bib', 'w') as bibfile:
bibfile.write(bibtex)
from bibtexparser.bparser import BibTexParser
with open('/tmp/bibtex.bib', 'r') as bibfile:
bp = BibTexParser(bibfile.read())
print(bp.get_entry_list())
I recommend you to use this output if you would like to report a bug.
python-bibtexparser-1.4.3/docs/source/tutorial.rst 0000664 0000000 0000000 00000030034 14731101544 0022355 0 ustar 00root root 0000000 0000000 ========
Tutorial
========
Step 0: Vocabulary
==================
* An **entry** designates for example `@book{...}`, `@article{...}`, etc.
* A **comment** is written as `@comment{...}`.
* A **preamble** is a `@preamble{...}` block.
* A **string** is `@string{...}`.
In an entry, you can find
* an **entry type** like `article`, `book`, etc.
* **entry keys** or **keys** such as `author`, `title`, `year`...
* and also **records**, which designates the values of those keys.
Step 1: Prepare a BibTeX file
=============================
First, we prepare a BibTeX sample file. This is just for the purpose of illustration:
.. code-block:: python
bibtex = """@ARTICLE{Cesar2013,
author = {Jean César},
title = {An amazing title},
year = {2013},
volume = {12},
pages = {12--23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines...},
comments = {A comment},
keywords = {keyword1, keyword2}
}
"""
with open('bibtex.bib', 'w') as bibfile:
bibfile.write(bibtex)
Step 2: Parse it!
=================
Simplest call
-------------
OK. Everything is in place. Let's parse the BibTeX file.
.. code-block:: python
import bibtexparser
with open('bibtex.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
print(bib_database.entries)
It prints a list of dictionaries for reference entries, for example books, articles:
.. code-block:: python
[{'journal': 'Nice Journal',
'comments': 'A comment',
'pages': '12--23',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines...',
'title': 'An amazing title',
'year': '2013',
'volume': '12',
'ID': 'Cesar2013',
'author': 'Jean César',
'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article'}]
Note that, by convention, uppercase keys (ID, ENTRYTYPE) are data generated by the parser, while lowercase keys come from the original bibtex file.
You can also print comments, preambles and string:
.. code-block:: python
print(bib_database.comments)
print(bib_database.preambles)
print(bib_database.strings)
.. note::
If your bibtex contains months defined as strings such as :code:`month = jan`, you will need to parse it with the :code:`common_strings` option:
:code:`bib_database = bibtexparser.bparser.BibTexParser(common_strings=True).parse_file(bibtex_file)`. (More in `Using bibtex strings`_.)
Parse a string
--------------
If for some reason, you prefer to parse a string, that's also possible:
.. code-block:: python
import bibtexparser
with open('bibtex.bib') as bibtex_file:
bibtex_str = bibtex_file.read()
bib_database = bibtexparser.loads(bibtex_str)
Tune parser's options
---------------------
In the previous snippet, several default options are used.
You can tweak them as you wish.
.. code-block:: python
import bibtexparser
from bibtexparser.bparser import BibTexParser
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
parser.homogenise_fields = False
bib_database = bibtexparser.loads(bibtex_str, parser)
.. note::
The :code:`common_strings` option needs to be set when the parser object is created and has no effect if changed afterwards.
Step 3: Export
==============
Once you worked on your parsed database, you may want to export the result. This library provides some functions to help on that. However, you can write your own functions if you have specific requirements.
Create a BibTeX file or string
--------------------------------
The bibliographic data can be converted back into a string :
.. code-block:: python
import bibtexparser
bibtex_str = bibtexparser.dumps(bib_database)
or a BibTeX file like this:
.. code-block:: python
import bibtexparser
with open('bibtex.bib', 'w') as bibtex_file:
bibtexparser.dump(bibtex_database, bibtex_file)
Call the writer
---------------
In the first section we prepared a BibTeX sample file, we can prepare the same file using pure python and the ``BibTexWriter`` class.
.. code-block:: python
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
db = BibDatabase()
db.entries = [
{'journal': 'Nice Journal',
'comments': 'A comment',
'pages': '12--23',
'month': 'jan',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines...',
'title': 'An amazing title',
'year': '2013',
'volume': '12',
'ID': 'Cesar2013',
'author': 'Jean César',
'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article'}]
writer = BibTexWriter()
with open('bibtex.bib', 'w') as bibfile:
bibfile.write(writer.write(db))
This code generates the following file:
.. code-block:: latex
@article{Cesar2013,
abstract = {This is an abstract. This line should be long enough to test
multilines...},
author = {Jean César},
comments = {A comment},
journal = {Nice Journal},
keyword = {keyword1, keyword2},
month = {jan},
pages = {12--23},
title = {An amazing title},
volume = {12},
year = {2013}
}
The writer also has several flags that can be enabled to customize the output file.
For example we can use ``indent`` and ``comma_first`` to customize the previous entry, first the code:
.. code-block:: python
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
db = BibDatabase()
db.entries = [
{'journal': 'Nice Journal',
'comments': 'A comment',
'pages': '12--23',
'month': 'jan',
'abstract': 'This is an abstract. This line should be long enough to test\nmultilines...',
'title': 'An amazing title',
'year': '2013',
'volume': '12',
'ID': 'Cesar2013',
'author': 'Jean César',
'keyword': 'keyword1, keyword2',
'ENTRYTYPE': 'article'}]
writer = BibTexWriter()
writer.indent = ' ' # indent entries with 4 spaces instead of one
writer.comma_first = True # place the comma at the beginning of the line
with open('bibtex.bib', 'w') as bibfile:
bibfile.write(writer.write(db))
This code results in the following, customized, file:
.. code-block:: latex
@article{Cesar2013
, abstract = {This is an abstract. This line should be long enough to test
multilines...}
, author = {Jean César}
, comments = {A comment}
, journal = {Nice Journal}
, keyword = {keyword1, keyword2}
, month = {jan}
, pages = {12--23}
, title = {An amazing title}
, volume = {12}
, year = {2013}
}
Flags to the writer object can modify not only how an entry is printed but how several BibTeX entries are sorted and separated.
See the :ref:`API ` for the full list of flags.
Step 4: Add salt and pepper
===========================
In this section, we discuss about some customizations and details.
Customizations
--------------
By default, the parser does not alter the content of each field and keeps it as a simple string. There are many cases
where this is not desired. For example, instead of a string with a multiple of authors, it could be parsed as a list.
To modify field values during parsing, a callback function can be supplied to the parser which can be used to modify
BibTeX entries. The library includes several functions which may be used. Alternatively, you can read them to create
your own functions.
.. code-block:: python
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import *
# Let's define a function to customize our entries.
# It takes a record and return this record.
def customizations(record):
"""Use some functions delivered by the library
:param record: a record
:returns: -- customized record
"""
record = type(record)
record = author(record)
record = editor(record)
record = journal(record)
record = keyword(record)
record = link(record)
record = page_double_hyphen(record)
record = doi(record)
return record
with open('bibtex.bib') as bibtex_file:
parser = BibTexParser()
parser.customization = customizations
bib_database = bibtexparser.load(bibtex_file, parser=parser)
print(bib_database.entries)
If you think that you have a customization which could be useful to others, please share with us!
Accents and weird characters
----------------------------
Your bibtex may contain accents and specific characters.
They are sometimes coded like this ``\'{e}`` but this is not the correct way, ``{\'e}`` is preferred. Moreover, you may want to manipulate ``é``. There is different situations:
* Case 1: you plan to use this library to work with latex and you assume that the original bibtex is clean. You have nothing to do.
* Case 2: you plan to use this library to work with latex but your bibtex is not really clean.
.. code-block:: python
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import homogenize_latex_encoding
with open('bibtex.bib') as bibtex_file:
parser = BibTexParser()
parser.customization = homogenize_latex_encoding
bib_database = bibtexparser.load(bibtex_file, parser=parser)
print(bib_database.entries)
* Case 3: you plan to use this library to work with something different and your bibtex is not really clean.
Then, you probably want to use unicode.
.. code-block:: python
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import convert_to_unicode
with open('bibtex.bib') as bibtex_file:
parser = BibTexParser()
parser.customization = convert_to_unicode
bib_database = bibtexparser.load(bibtex_file, parser=parser)
print(bib_database.entries)
.. Note::
If you want to mix different customization functions, you can write your own function.
Using bibtex strings
--------------------
.. Warning:: support for bibtex strings representation is still an experimental feature; the way strings are represented is likely to change in future releases.
Bibtex strings and string expressions are expanded by default into the value they represent.
This behavior is controlled by the ``interpolate_string`` argument of the BibTexParser. It defaults to ``True`` but can be set to ``False``, in which case bibtex strings and string expressions from input files are represented with the :class:`bibdatabase.BibDataString` and :class:`bibdatabase.BibDataStringExpression` from the :mod:`bibdatabase` module. Both classes retain the intrinsic structure of the string or expression so that they can be written to a new file, the same way. Each instance provides a :func:`get_value` method to interpolate the string or expression and the module also provide an :func:`bibdatabase.as_text` helper to expand a string or an expression when needed.
Using the code would yield the following output.
.. code-block:: python
from bibtexparser.bparser import BibTexParser
from bibtexparser.bibdatabase import as_text
bibtex = """@STRING{ jean = "Jean"}
@ARTICLE{Cesar2013,
author = jean # { César},
title = {An amazing title},
year = {2013},
month = jan,
volume = {12},
pages = {12--23},
journal = {Nice Journal},
}
"""
bp = BibTexParser(interpolate_strings=False)
bib_database = bp.parse(bibtex)
bib_database.entries[0]
as_text(bib_database.entries[0]['author'])
.. code-block:: python
{'ENTRYTYPE': 'article',
'ID': 'Cesar2013',
'author': BibDataStringExpression([BibDataString('jean'), ' César']),
'journal': 'Nice Journal',
'month': BibDataStringExpression([BibDataString('jan')]),
'pages': '12--23',
'title': 'An amazing title',
}
'Jean César'
python-bibtexparser-1.4.3/requirements.txt 0000664 0000000 0000000 00000000021 14731101544 0021005 0 ustar 00root root 0000000 0000000 pyparsing>=2.0.3
python-bibtexparser-1.4.3/setup.py 0000664 0000000 0000000 00000001621 14731101544 0017242 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
try:
from setuptools import setup
except ImportError as ex:
print('[python-bibtexparser] setuptools not found. Falling back to distutils.core')
from distutils.core import setup
with open('bibtexparser/__init__.py') as fh:
for line in fh:
if line.startswith('__version__'):
version = line.strip().split()[-1][1:-1]
break
def load_readme():
with open("README.rst") as f:
return f.read()
setup(
name='bibtexparser',
version=version,
url="https://github.com/sciunto-org/python-bibtexparser",
author="Francois Boulogne and other contributors",
license="LGPLv3 or BSD",
author_email="code@mweiss.ch",
description="Bibtex parser for python 3",
long_description_content_type="text/x-rst",
long_description=load_readme(),
packages=['bibtexparser'],
install_requires=['pyparsing>=2.0.3'],
)
python-bibtexparser-1.4.3/tox.ini 0000664 0000000 0000000 00000000126 14731101544 0017042 0 ustar 00root root 0000000 0000000 [tox]
envlist = py27,py35
[testenv]
deps = nose
pyparsing
commands = nosetests