Webpages.iust.ac.ir



ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "author" : [ { "dropping-particle" : "", "family" : "Goodfellow", "given" : "Ian", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Bengio", "given" : "Yoshua", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Courville", "given" : "Aaron", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "id" : "ITEM-1", "issued" : { "date-parts" : [ [ "2016" ] ] }, "note" : "\\url{}", "publisher" : "MIT Press", "title" : "Deep learning", "type" : "book" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Goodfellow et al. 2016)", "plainTextFormattedCitation" : "(Goodfellow et al. 2016)", "previouslyFormattedCitation" : "(Goodfellow et al. 2016)" }, "properties" : { }, "schema" : "" }(Goodfellow et al. 2016)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "author" : [ { "dropping-particle" : "Le", "family" : "Ilya Sutskever, Oriol Vinyals", "given" : "Quoc V.", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Sutskever", "given" : "Ilya", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Vinyals", "given" : "Oriol", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "V.", "family" : "Le", "given" : "Quoc", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "container-title" : "Nips", "id" : "ITEM-1", "issued" : { "date-parts" : [ [ "2014" ] ] }, "page" : "1-9", "title" : "Sequence to sequence learning with neural networks", "type" : "article-journal" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Ilya Sutskever, Oriol Vinyals et al. 2014)", "plainTextFormattedCitation" : "(Ilya Sutskever, Oriol Vinyals et al. 2014)", "previouslyFormattedCitation" : "(Ilya Sutskever, Oriol Vinyals et al. 2014)" }, "properties" : { }, "schema" : "" }(Ilya Sutskever, Oriol Vinyals et al. 2014)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "ISBN" : "0131873210", "author" : [ { "dropping-particle" : "", "family" : "Jurafsky", "given" : "Daniel", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Martin", "given" : "James H", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "id" : "ITEM-1", "issued" : { "date-parts" : [ [ "2009" ] ] }, "publisher" : "Prentice-Hall, Inc.", "publisher-place" : "Upper Saddle River, NJ, USA", "title" : "Speech and Language Processing (2Nd Edition)", "type" : "book" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Jurafsky & Martin 2009)", "plainTextFormattedCitation" : "(Jurafsky & Martin 2009)", "previouslyFormattedCitation" : "(Jurafsky & Martin 2009)" }, "properties" : { }, "schema" : "" }(Jurafsky & Martin 2009)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "abstract" : "\u5173\u4e8e\u795e\u7ecf\u673a\u5668\u7ffb\u8bd1\u7684\u4e00\u7bc7\u535a\u58eb\u8bba\u6587", "author" : [ { "dropping-particle" : "", "family" : "Luong", "given" : "Minh Thang", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "id" : "ITEM-1", "issue" : "December", "issued" : { "date-parts" : [ [ "2016" ] ] }, "publisher" : "Stanford university", "title" : "Neural Machine Translation", "type" : "thesis" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Luong 2016)", "plainTextFormattedCitation" : "(Luong 2016)", "previouslyFormattedCitation" : "(Luong 2016)" }, "properties" : { }, "schema" : "" }(Luong 2016)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "DOI" : "10.1146/annurev.neuro.26.041002.131047", "ISBN" : "9781937284978", "ISSN" : "0147-006X", "PMID" : "14527267", "abstract" : "We introduce a class of probabilistic continuous translation models called Recurrent Continuous Translation Models that are purely based on continuous representations for words, phrases and sentences and do not rely on alignments or phrasal translation units. The models have a generation and a conditioning aspect. The generation of the translation is modelled with a target Recurrent Language Model, whereas the conditioning on the source sentence is modelled with a Convolutional Sentence Model. Through various experiments, we show first that our models obtain a perplexity with respect to gold translations that is > 43% lower than that of state-of-the-art alignment-based translation models. Secondly, we show that they are remarkably sensitive to the word order, syntax, and mean- ing of the source sentence despite lacking alignments. Finally we show that they match a state-of-the-art system when rescoring n-best lists of translations.", "author" : [ { "dropping-particle" : "", "family" : "Kalchbrenner", "given" : "Nal", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Blunsom", "given" : "Phil", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "container-title" : "Emnlp", "id" : "ITEM-1", "issue" : "October", "issued" : { "date-parts" : [ [ "2013" ] ] }, "page" : "1700-1709", "title" : "Recurrent continuous translation models", "type" : "article-journal" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Kalchbrenner & Blunsom 2013)", "plainTextFormattedCitation" : "(Kalchbrenner & Blunsom 2013)", "previouslyFormattedCitation" : "(Kalchbrenner & Blunsom 2013)" }, "properties" : { }, "schema" : "" }(Kalchbrenner & Blunsom 2013)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "DOI" : "10.3115/v1/D14-1179", "ISBN" : "9781937284961", "ISSN" : "09205691", "PMID" : "2079951", "abstract" : "In this paper, we propose a novel neural network model called RNN Encoder-Decoder that consists of two recurrent neural networks (RNN). One RNN encodes a sequence of symbols into a fixed-length vector representation, and the other decodes the representation into another sequence of symbols. The encoder and decoder of the proposed model are jointly trained to maximize the conditional probability of a target sequence given a source sequence. The performance of a statistical machine translation system is empirically found to improve by using the conditional probabilities of phrase pairs computed by the RNN Encoder-Decoder as an additional feature in the existing log-linear model. Qualitatively, we show that the proposed model learns a semantically and syntactically meaningful representation of linguistic phrases.", "author" : [ { "dropping-particle" : "", "family" : "Cho", "given" : "Kyunghyun", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Merrienboer", "given" : "Bart", "non-dropping-particle" : "van", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Gulcehre", "given" : "Caglar", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Bahdanau", "given" : "Dzmitry", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Bougares", "given" : "Fethi", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Schwenk", "given" : "Holger", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Bengio", "given" : "Yoshua", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "id" : "ITEM-1", "issued" : { "date-parts" : [ [ "2014" ] ] }, "title" : "Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation", "type" : "article-journal" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Cho et al. 2014)", "plainTextFormattedCitation" : "(Cho et al. 2014)", "previouslyFormattedCitation" : "(Cho et al. 2014)" }, "properties" : { }, "schema" : "" }(Cho et al. 2014)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "URL" : "", "accessed" : { "date-parts" : [ [ "2017", "11", "13" ] ] }, "author" : [ { "dropping-particle" : "", "family" : "Keras-Team", "given" : "", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "container-title" : "Keras", "id" : "ITEM-1", "issued" : { "date-parts" : [ [ "2017" ] ] }, "title" : "Sequence to sequence example in Keras (character-level)", "type" : "webpage" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Keras-Team 2017)", "plainTextFormattedCitation" : "(Keras-Team 2017)", "previouslyFormattedCitation" : "(Keras-Team 2017)" }, "properties" : { }, "schema" : "" }(Keras-Team 2017)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "author" : [ { "dropping-particle" : "", "family" : "Luong", "given" : "Minh-Thang", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Brevdo", "given" : "Eugene", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Zhao", "given" : "Rui", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "container-title" : "", "id" : "ITEM-1", "issued" : { "date-parts" : [ [ "2017" ] ] }, "title" : "Neural Machine Translation (seq2seq) Tutorial", "type" : "article-journal" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Luong et al. 2017)", "plainTextFormattedCitation" : "(Luong et al. 2017)", "previouslyFormattedCitation" : "(Luong et al. 2017)" }, "properties" : { }, "schema" : "" }(Luong et al. 2017)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "URL" : "", "accessed" : { "date-parts" : [ [ "2017", "11", "13" ] ] }, "author" : [ { "dropping-particle" : "", "family" : "Pecina", "given" : "Pavel", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "id" : "ITEM-1", "issued" : { "date-parts" : [ [ "2014" ] ] }, "title" : "ACL 2014 Ninth Workshop on Statistical Machine Translation", "type" : "webpage" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Pecina 2014)", "plainTextFormattedCitation" : "(Pecina 2014)", "previouslyFormattedCitation" : "(Pecina 2014)" }, "properties" : { }, "schema" : "" }(Pecina 2014)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "URL" : "", "accessed" : { "date-parts" : [ [ "2017", "11", "13" ] ] }, "author" : [ { "dropping-particle" : "", "family" : "Kelly", "given" : "Charles", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "id" : "ITEM-1", "issued" : { "date-parts" : [ [ "2017" ] ] }, "title" : "Tab-delimited Bilingual Sentence Pairs from the Tatoeba Project (Good for Anki and Similar Flashcard Applications)", "type" : "webpage" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Kelly 2017)", "plainTextFormattedCitation" : "(Kelly 2017)", "previouslyFormattedCitation" : "(Kelly 2017)" }, "properties" : { }, "schema" : "" }(Kelly 2017)ADDIN CSL_CITATION { "citationItems" : [ { "id" : "ITEM-1", "itemData" : { "DOI" : "10.1146/annurev.neuro.26.041002.131047", "ISBN" : "0147-006X (Print)", "ISSN" : "0147-006X", "PMID" : "14527267", "abstract" : "Neural machine translation is a recently proposed approach to machine translation. Unlike the traditional statistical machine translation, the neural machine translation aims at building a single neural network that can be jointly tuned to maximize the translation performance. The models proposed recently for neural machine translation often belong to a family of encoder-decoders and consists of an encoder that encodes a source sentence into a fixed-length vector from which a decoder generates a translation. In this paper, we conjecture that the use of a fixed-length vector is a bottleneck in improving the performance of this basic encoder-decoder architecture, and propose to extend this by allowing a model to automatically (soft-)search for parts of a source sentence that are relevant to predicting a target word, without having to form these parts as a hard segment explicitly. With this new approach, we achieve a translation performance comparable to the existing state-of-the-art phrase-based system on the task of English-to-French translation. Furthermore, qualitative analysis reveals that the (soft-)alignments found by the model agree well with our intuition.", "author" : [ { "dropping-particle" : "", "family" : "Bahdanau", "given" : "Dzmitry", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Cho", "given" : "Kyunghyun", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" }, { "dropping-particle" : "", "family" : "Bengio", "given" : "Yoshua", "non-dropping-particle" : "", "parse-names" : false, "suffix" : "" } ], "id" : "ITEM-1", "issued" : { "date-parts" : [ [ "2014" ] ] }, "page" : "1-15", "title" : "Neural machine translation by jointly learning to align and translate", "type" : "article-journal" }, "uris" : [ "" ] } ], "mendeley" : { "formattedCitation" : "(Bahdanau et al. 2014)", "plainTextFormattedCitation" : "(Bahdanau et al. 2014)" }, "properties" : { }, "schema" : "" }(Bahdanau et al. 2014)ADDIN Mendeley Bibliography CSL_BIBLIOGRAPHY Bahdanau, D., Cho, K. & Bengio, Y., 2014. Neural machine translation by jointly learning to align and translate. , pp.1–15. Available at: , K. et al., 2014. Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation. Available at: , I., Bengio, Y. & Courville, A., 2016. Deep learning, MIT Press. Available at: Sutskever, Oriol Vinyals, Q.V. Le et al., 2014. Sequence to sequence learning with neural networks. Nips, pp.1–9.Jurafsky, D. & Martin, J.H., 2009. Speech and Language Processing (2Nd Edition), Upper Saddle River, NJ, USA: Prentice-Hall, Inc.Kalchbrenner, N. & Blunsom, P., 2013. Recurrent continuous translation models. Emnlp, (October), pp.1700–1709.Kelly, C., 2017. Tab-delimited Bilingual Sentence Pairs from the Tatoeba Project (Good for Anki and Similar Flashcard Applications). Available at: [Accessed November 13, 2017].Keras-Team, 2017. Sequence to sequence example in Keras (character-level). Keras. Available at: [Accessed November 13, 2017].Luong, M.-T., Brevdo, E. & Zhao, R., 2017. Neural Machine Translation (seq2seq) Tutorial. , M.T., 2016. Neural Machine Translation. Stanford university. Available at: , P., 2014. ACL 2014 Ninth Workshop on Statistical Machine Translation. Available at: [Accessed November 13, 2017]. ................
................

In order to avoid copyright disputes, this page is only a partial summary.

Google Online Preview   Download