diff options
author | AidenRushbrooke <72034940+AidenRushbrooke@users.noreply.github.com> | 2021-12-13 01:09:03 +0000 |
---|---|---|
committer | AidenRushbrooke <72034940+AidenRushbrooke@users.noreply.github.com> | 2021-12-13 01:09:03 +0000 |
commit | b5f34c03cfec59625135deb725788392433c2102 (patch) | |
tree | cb81798b608bc68dc54cc2824e9d871bae849f82 /report | |
parent | f9d9cd9931c82f0d61ddee529bdeb57705b0f84d (diff) | |
download | esotericFORTRAN-b5f34c03cfec59625135deb725788392433c2102.tar.gz esotericFORTRAN-b5f34c03cfec59625135deb725788392433c2102.zip |
Added to chapter 3 of the report
Diffstat (limited to 'report')
-rw-r--r-- | report/References.bib | 135 | ||||
-rw-r--r-- | report/esoteric_project_report.pdf | bin | 300200 -> 301062 bytes | |||
-rw-r--r-- | report/esoteric_project_report.tex | 21 |
3 files changed, 21 insertions, 135 deletions
diff --git a/report/References.bib b/report/References.bib index a6c8964..e8ac243 100644 --- a/report/References.bib +++ b/report/References.bib @@ -1,132 +1,4 @@ -@article{VandenBergen:1999, - author = {Van den Bergen, Gino}, - title = {A Fast and Robust GJK Implementation for Collision Detection of Convex Objects}, - journal = {J. Graph. Tools}, - issue_date = {March 1999}, - volume = {4}, - number = {2}, - month = mar, - year = {1999}, - issn = {1086-7651}, - pages = {7--25}, - numpages = {19}, - url = {http://dx.doi.org/10.1080/10867651.1999.10487502}, - doi = {10.1080/10867651.1999.10487502}, - acmid = {334711}, - publisher = {A. K. Peters, Ltd.}, - address = {Natick, MA, USA} -} - -@phdthesis{BVs, - author = {Nguyen, An}, - advisor = {Guibas, Leonidas J.}, - title = {Implicit bounding volumes and bounding volume hierarchies}, - year = {2006}, - isbn = {978-0-542-89521-0}, - note = {AAI3235306}, - publisher = {Stanford University}, - address = {Stanford, CA, USA} -} - -@book{realTimeColDet, - author = {Ericson, Christer}, - title = {Real-Time Collision Detection (The Morgan Kaufmann Series in Interactive 3D Technology)}, - year = {2004}, - isbn = {1558607323}, - publisher = {Morgan Kaufmann Publishers Inc.}, - address = {San Francisco, CA, USA}, -} - -@inproceedings{analyticalColResponse, - author = {Baraff, D.}, - title = {Analytical methods for dynamic simulation of non-penetrating rigid bodies}, - booktitle = {Proceedings of the 16th annual conference on Computer graphics and interactive techniques}, - series = {SIGGRAPH '89}, - year = {1989}, - isbn = {0-89791-312-4}, - pages = {223--232}, - numpages = {10}, - url = {http://doi.acm.org/10.1145/74333.74356}, - doi = {10.1145/74333.74356}, - acmid = {74356}, - publisher = {ACM}, - address = {New York, NY, USA}, -} - -@incollection{Taylor:2007, -address = {Berlin, Heidelberg}, -author = {Taylor, Zeike A. and Cheng, Mario and Ourselin, S{\'{e}}bastien}, -booktitle = {Medical Image Computing and Computer-Assisted Intervention – MICCAI 2007}, -doi = {10.1007/978-3-540-75757-3{\_}85}, -file = {:C$\backslash$:/Users/Zelim/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Taylor, Cheng, Ourselin - 2007 - Real-Time Nonlinear Finite Element Analysis for Surgical Simulation Using Graphics Processing Units.pdf:pdf}, -pages = {701--708}, -publisher = {Springer Berlin Heidelberg}, -title = {{Real-Time Nonlinear Finite Element Analysis for Surgical Simulation Using Graphics Processing Units}}, -url = {http://link.springer.com/10.1007/978-3-540-75757-3{\_}85}, -year = {2007} -} - -@misc{Soos:2012, -author = {Soos, Mate}, -title = {{AMD's} {OpenCL} heaven and hell}, -journal = {Wonderings of a {SAT} geek}, -type = {Blog}, -number = {January 5}, -year = {2012}, -howpublished = {https://www.msoos.org/2012/01/amds-opencl-heaven-and-hell/} -} - -@techreport{Morton:1966, - author = {G.M. Morton}, - citeulike-article-id = {3709193}, - keywords = {cited\_in\_thesis}, - number = {Ottawa, Ontario, Canada}, - posted-at = {2008-11-26 20:41:35}, - priority = {0}, - publisher = {IBM Ltd.}, - title = {{A computer oriented geodetic data base and a new technique in file sequencing}}, - year = {1966} -} - -@manual{Blender, - title = {Blender - a 3D modelling and rendering package}, - author = {{Blender Online Community}}, - organization = {Blender Foundation}, - address = {Blender Institute, Amsterdam}, - year = {2015}, - url = {http://www.blender.org}, - } - -@article{Lapeer:2001, - volume = {34}, - number = {9}, - month = {September}, - author = {R. J. Lapeer and R. W. Prager}, - title = {Fetal head moulding: finite element analysis of a fetal skull subjected to uterine pressures during the first stage of labour}, - publisher = {Elsevier}, - year = {2001}, - journal = {Journal of Biomechanics}, - pages = {1125--1133}, - url = {https://ueaeprints.uea.ac.uk/23372/}, - abstract = {Fetal head moulding is a phenomenon which may contribute to satisfactory progress during delivery as it allows the fetal head to accommodate to the geometry of the passage. In contrast, excessive head moulding may result in cranial birth injuries and thus affect the infant shortly or even long after birth. One group of researchers in the past investigated the biomechanics of fetal head moulding from an engineering point of view and limited themselves to a static, linear model of the parietal bones. In this paper, we present a non-linear model of the deformation of a complete fetal skull, when subjected to pressures exerted by the cervix, during the first stage of labour. The design of the model involves four main steps: shape recovery of the fetal skull, the generation of a valid and compatible mesh for finite element analysis (FEA), the specification of a physical model and the analysis of deformation. Results of the analysis show good agreement with those obtained from clinical experiments on the quantitative assessment of fetal head moulding. The model also displays shapes after moulding which have been reported in previous studies and which are generally known in the obstetric and paediatric communities.} -} - -@article {Sorbe:1983, -author = {Sorbe, B. and Dahlgren, S.}, -title = {Some important factors in the molding of the fetal head during vaginal delivery - A photographic study}, -journal = {International Journal of Gynecology \& Obstetrics}, -volume = {21}, -number = {3}, -issn = {1879-3479}, -url = {http://dx.doi.org/10.1016/0020-7292(83)90081-4}, -doi = {10.1016/0020-7292(83)90081-4}, -pages = {205--212}, -keywords = {Fetal head molding, Delivery, Photographic method}, -year = {1983} -} - - @book{davie1982recursive, title={Recursive descent compiling}, author={Davie, JT and Morrison, Ronald}, @@ -134,6 +6,13 @@ year = {1983} publisher={John Wiley \& Sons, Inc.} } +@misc{GCCParser, + title={GCC New C Parser}, + author={Myers, Joseph}, + year={2008}, + url={https://gcc.gnu.org/wiki/New_C_Parser} +} +
\ No newline at end of file diff --git a/report/esoteric_project_report.pdf b/report/esoteric_project_report.pdf Binary files differindex b9d9acb..b212a3f 100644 --- a/report/esoteric_project_report.pdf +++ b/report/esoteric_project_report.pdf diff --git a/report/esoteric_project_report.tex b/report/esoteric_project_report.tex index be12b46..4ab77e5 100644 --- a/report/esoteric_project_report.tex +++ b/report/esoteric_project_report.tex @@ -114,13 +114,20 @@ Esolangs are programming languages designed to be jokes or proof of concept lang \section{Example - Shakespearian Programming Language} The Shakespeare Programming Language is esoteric code parodied on extracts from Romeo and Juliet. Here, code is written so it reads like how a script would, such as \texttt{[Enter Juliet]} where \texttt{Juliet} may be a variable name. Other examples include using \texttt{Acts} or \texttt{Scenes} as GOTO statements, where the interpreter can jump to a certain point in the code and read from there. Otherwise, these are ignored similar to the title or character descriptions. Other examples include LOLCODE, Hodor or White Space. This serves the purpose of writing code in a different manner to usual, sometimes for humour. The aim is to replace typical language features so that it can still be read by a compiler or interpreter but also look and read very differently by the user. -\chapter{The Structure of our Programming Language} -\section{The Lexar (Lexical Analysis)} -In the field of linguistics or computer programming language design, lexical analysis is the process of converting a sequence of characters (such as a computer program) into a sequence of tokens. -This process is also known as tokenisation, particularly in the field of natural language processing (NLP). -When designing a programming language, the task of lexical analysis or tokenisation is performed by a lexer. -The actual matching of tokens is often performed using regular expressions, defined in the design stage in order to reliably match all tokens in the language. - +\chapter{Compiler Design and Methodology} +This chapter will discuss how compilers work, and the design behind them. The goal of a compiler is to convert the source code into an from which the CPU can execute. As mentioned in chapter 2, a compiler works by converting the entire source code in one go, then executing, unlike a interpreter which executes each line at a time. However, before the code can be executed a number of steps have to be taken. +\section{The Lexer (Lexical Analysis)} +The first step for a compiler is to convert the initial source code into groups of characters which the computer can understand, generally known as tokens. This process is also known as tokenisation, particularly in the field of natural language processing (NLP). When designing a programming language, the task of lexical analysis or tokenisation is performed by a lexer or scanner. This lexer looks through each character and creates equivalent tokens based on the character values. For example, the character '+' may be converted into the token "plus", which in a programming language would likely represent addition. Some tokens may also be multiple lines long, such as strings, identifiers and keywords like "if" or "int". This stage can also detect some syntax errors, for any unrecognised characters. The end result of lexical analysis is a series of tokens representing the starting source code. +\section{The Parser} +The second stage of the compiler is to try and understand how the tokens link together to form a a program. The parser uses the list of tokens created and compares it to the language grammar. This allows it to understand how the tokens are linked, and can then use this information to create abstract syntax trees (AST), which represents the code in a logical fashion. The parser can also detect any syntax errors in the initial source code when comparing against the language grammar. + +The specific parser we have chosen is a recursive descent parser, a Top-Down parser where each rule in the grammar maps to a specific function in the parser implementation. A recursive descent parser works by starting at the first rule in the grammar, then working downward, finding each statement or expression. We chose this parser design as it is both simple to implement and gives a lot of control to how the parser functions and error reporting compared to parsers created through a parser generator and grammar table. Many compiler implementations also use recursive descent parsers, such as GCC \cite{GCCParser} and Clang. +\section{Optimization} +This step involves making a variety of optimizations to the current program to speed up processing. This may involve simplifying loops, or pre-calculating complicated expressions which only contain constants. However, this step is not vital to the function of an effective compiler, with some deciding to ignore optimization entirely. +\section{Code Generation} +At this stage, the code is converted into a form which can be executed by the CPU, such as machine code. However, due to the complexity of converting directly to machine code, an alternative is commonly done. Here, the compiler writes to a alternate form designed to run on a created virtual machine. This form is known as bytecode. Java is a well known example which uses this method, where java programs are compiled to run in the Java virtual machine. + +One other method is to compile into another, existing programming language. This method is known as a source-to-source compiler. Here, instead of writing to machine code or bytecode, the compiler creates a string of valid source code for the language being targeted, based on the initial source code for the main compiler. This can then be executed using other, pre-existing compilers. One common use of these compilers is for web browsers, where many languages compile to javascript. \chapter{Grammar} |