Skip to content

Instantly share code, notes, and snippets.

@gousiosg
Last active December 9, 2020 13:29
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save gousiosg/83349b893ca635a30b411964d1b13c37 to your computer and use it in GitHub Desktop.
Save gousiosg/83349b893ca635a30b411964d1b13c37 to your computer and use it in GitHub Desktop.
My reading list for ML4SE
@article{Alon19,
author = {Alon, Uri and Zilberstein, Meital and Levy, Omer and Yahav, Eran},
title = {Code2Vec: Learning Distributed Representations of Code},
journal = {Proc. ACM Program. Lang.},
issue_date = {January 2019},
volume = {3},
number = {POPL},
month = jan,
year = {2019},
issn = {2475-1421},
pages = {40:1--40:29},
articleno = {40},
numpages = {29},
url = {http://doi.acm.org/10.1145/3290353},
doi = {10.1145/3290353},
acmid = {3290353},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Big Code, Distributed Representations, Machine Learning},
Notes = {
Code2vec can learn the "meaning" of a method. Uses AST path representation: builds an
AST per method, and identifies all paths from any terminal symbol to any other terminal
symbol. Denotes the direction of the path with "up" if the path needs to traverse a node
that is over it in the tree and "down" otherwise. Per method, trains a bag of paths
(top X most popular paths only) to predict the method name. Attention is applied to
identify the most important paths during training.
Code/data here: https://github.com/tech-srl/code2vec
}
}
@article{Yin18,
author = {{Yin}, P. and {Neubig}, G. and {Allamanis}, M. and {Brockschmidt}, M. and {Gaunt}, A.~L.},
title = "{Learning to Represent Edits}",
journal = {ArXiv e-prints},
archivePrefix = "arXiv",
eprint = {1810.13337},
year = 2018,
month = oct,
Notes = {
The task is given a dataset of edits (from, to) to learn a representation of a function that encodes the "meaning"
of the edits, so that it can predict edits to other code snippets. They use 2 representations of the from-to data,
embeddings of tokens and graph with embedded edges to represent the edits. They train 2 autoencoders, a seq2seq
one (with bottleneck) and a Graph NN (need to read up on this). They do manual evaluation (the openreview for this
paper is rather negative on this aspect https://openreview.net/forum?id=BJl6AjC5F7). The dataset they use is
a set of automated refactorings, the representation of which by tSNE groups them close together (in the same clusters).
The prediction part is rather weak.
One could try to build an alternative to Gumtree or an autorefactoring tool based on this.
Code/data: https://github.com/Microsoft/msrc-dpu-learning-to-represent-edits
}
}
@article{Allam17,
title={Learning to represent programs with graphs},
author={Allamanis, Miltiadis and Brockschmidt, Marc and Khademi, Mahmoud},
journal={arXiv preprint arXiv:1711.00740},
year={2017},
Note = {
Current work is mostly treating source code as token sequences, but in this
way, we are loosing src code semantics. To capture semantics, the authors propose
the use of GraphNNs. A node in such a graph is an embedding of the function name,
plus a state vector. Nodes are AST nodes like terminals (represent syntax) and
tokens (non-terminals). A list of edges denotes the relationships between nodes (they
are directional). Edges can have various types, for example read from, write to,
computed from, depending on the semantics of the links between the nodes.
During training, a user specified function simulates the transfering of "messages"
among the graph nodes (e.g. to simulate data or control flow). Messages are aggregated
per node using a gated reccurent unit, and the aggregation updates the state vector of
the node. The initial state is set to an average embebedding of the node name + type tokens.
The generated NNs is trained using ASTs converted to the graph structure for input.
The trained NN can learn to identify issues such as complex data flow scenaria (incorrect
variable use) and is good at finding names as well.
Code: https://github.com/Microsoft/gated-graph-neural-network-samples
}
}
@article{Allam18,
author = {Allamanis, Miltiadis and Barr, Earl T. and Devanbu, Premkumar and Sutton, Charles},
title = {A Survey of Machine Learning for Big Code and Naturalness},
journal = {ACM Comput. Surv.},
issue_date = {September 2018},
volume = {51},
number = {4},
month = jul,
year = {2018},
issn = {0360-0300},
pages = {81:1--81:37},
articleno = {81},
numpages = {37},
url = {http://doi.acm.org/10.1145/3212695},
doi = {10.1145/3212695},
acmid = {3212695},
Note = {
The authors survey all related work up to early 2018, in the area of ML4Code. They classify existing work
in three broad categories, namely: i) code-generating models (used to model the statistical properties of
how code is being created), ii) representational models (take abstracted forms of code as input, i.e. embeddings,
to allow for more high-level properties, i.e. dataflow, to be encapsulated) and iii) pattern-mining models (infer
latent properties in the code.). The authors also summarise the applications of ML4Code, in tasks such as
recommendation, infering coding conventions (function names, identation etc), defect prediction, code
cloning prediction, code search engines, requirements traceability and program synthesis. They conclude by
identifying important challenges, such as data sparsity, the absense of both good benchmarks and good metrics
tuned for code and future opportunities in the areas of debugging, tracebility and code completion and synthesis.
}
}
@article{ratner2017snorkel,
title={Snorkel: Rapid training data creation with weak supervision},
author={Ratner, Alexander and Bach, Stephen H and Ehrenberg, Henry and Fries, Jason and Wu, Sen and R{\'e}, Christopher},
journal={Proceedings of the VLDB Endowment},
volume={11},
number={3},
pages={269--282},
year={2017},
publisher={VLDB Endowment},
Note = {
Snorkel combines various input signals for unlabeled data in order to learn a classification function.
The output is a set of probabilistic labels for each data point and a generative model to create them.
Snorkel expect the user to write several approximate labeling functions (like regular expressions) or
chose one from their own function templates.
Then it syntheses a probability vector for each data point tha denotes the probabilities of each label for
each data point. The output can be fed as the label to a ML model will try to predict when consuming
the corresponding input.
}
}
@article{ratner2018training,
title={Training complex models with multi-task weak supervision},
author={Ratner, Alexander and Hancock, Braden and Dunnmon, Jared and Sala, Frederic and Pandey, Shreyash and R{\'e}, Christopher},
journal={arXiv preprint arXiv:1810.02840},
year={2018},
Note = {
The authors consider the task of labelling a graph of related tasks, using weak labeling functions. The key
idea is that by exploring the disagreement rates of the weak labels, one can estimate they overall accuracy.
The relationship between the tasks also help to provide further evidence of {dis-}agreements between the labels.
An example problem is identifying whether a name denotes a person or an organization: if it denotes a person,
then we further explore if the person is a doctor or a lawyer (using external databases).
}
}
@inproceedings{DeFree18,
author = {DeFreez, Daniel and Thakur, Aditya V. and Rubio-Gonz\'{a}lez, Cindy},
title = {Path-based Function Embedding and Its Application to Error-handling Specification Mining},
booktitle = {Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering},
series = {ESEC/FSE 2018},
year = {2018},
isbn = {978-1-4503-5573-5},
location = {Lake Buena Vista, FL, USA},
pages = {423--433},
numpages = {11},
url = {http://doi.acm.org/10.1145/3236024.3236059},
doi = {10.1145/3236024.3236059},
acmid = {3236059},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {error handling, program analysis, program comprehension, program embeddings, specification mining},
Note = {
Try to identify functions that are semantically similar, i.e. they serve the same purpose in code (e.g. funtions
that get the screen brightness given 2-3 different graphics drivers). The core idea is that functions are similar
if the contextes they are called in are similar. They represent code as a CFG per function (a CFG is represented as an
adjacency matrix). On the CFG, they perform random walks and keep the labels for all intermediate functions and
operations (e.g. loads / stores of data). Then, they train a representation using word2vec, to which they embed
the function representations. They evaluate this on a large set of hand-annotated functionally equivalent functions.
Code/data: https://github.com/defreez-ucd/func2vec-fse2018-artifact
}
}
@inproceedings{Zhang19,
author = {Jian Zhang, Xu Wang, Hongyu Zhang, Hailong Sun, Kaixuan Wang and Xudong Liu},
title = {A Novel Neural Source Code Representation based on Abstract Syntax Tree},
booktitle = {ICSE 2019},
year = {2019},
Note = {
The authors try out a new representation of source code. They take an AST, extract all *statements*
(including method declarations as statements), and then they restrict the scope of each statement
to block boundaries. Those "scope" vectors are fed into word2vec to create an embedding dictionary,
which is then used to encode all statements. To represent sequences of block embeddings the authors
use a GRU. A final vector representation is constructed through a pooling layer. The paper does
not detail how the training happens, i.e. what is the NN trying to learn.
Code: https://github.com/zhangj1994/astnn
}
}
@inproceedings{Liu19,
author = {Liu, Kui and Kim, Dongsun and Bissyand{\'e}, Tegawend{\'e} F. and Kim, Taeyoung and Kim, Kisub and Koyuncu, Anil and Kim, Suntae and Traon, Yves Le},
title = {Learning to Spot and Refactor Inconsistent Method Names},
booktitle = {Proceedings of the 41st International Conference on Software Engineering},
series = {ICSE '19},
year = {2019},
location = {Montreal, Quebec, Canada},
pages = {1--12},
numpages = {12},
url = {https://doi.org/10.1109/ICSE.2019.00019},
doi = {10.1109/ICSE.2019.00019},
acmid = {3339507},
publisher = {IEEE Press},
address = {Piscataway, NJ, USA},
Note = {
Most developers use libraries, but when searching for API methods, they only
look in their names. Thus inconsistent method names are detrimental to the
developer experience and perhaps final product quality. The authors obtained
commit data from GitHub by search for "inconsistent", and identified several
1000s such commits. The key idea is that similar method bodies should have
similar method names. So they build 2 autoencoders to i) learn method names,
and ii) learn method bodies. For the former, they used Doc2Vec. For the later,
they first serialized ASTs, parsed them depth first (i.e. per statement),
substituted local names, embedded each statement vector with Word2Vec and used
several 2D convolutional layers on the statement vectors before feeding the
auto encoder (no explanation why such a complex architecture is needed).
They calculate similarity by taking the top-n most similar (given 4 different
similarity strategies) method vectors for each method vector, finding the
corresponding method names and interesecting them to the current function name.
They find that their work improves over similar work by Allamanis et al. which
used ngrams (F1 ~70%, so not that great). They've also sent PRs to 100
projects, some of which were accepted. Of the ones that where not, several
would break users, so it did not make sense to accept.
}
}
@inproceedings{Tufan19,
author = {Tufano, Michele and Pantiuchina, Jevgenija and Watson, Cody and Bavota, Gabriele and Poshyvanyk, Denys},
title = {On Learning Meaningful Code Changes via Neural Machine Translation},
booktitle = {Proceedings of the 41st International Conference on Software Engineering},
series = {ICSE '19},
year = {2019},
location = {Montreal, Quebec, Canada},
pages = {25--36},
numpages = {12},
url = {https://doi.org/10.1109/ICSE.2019.00021},
doi = {10.1109/ICSE.2019.00021},
acmid = {3339509},
publisher = {IEEE Press},
address = {Piscataway, NJ, USA},
keywords = {empirical study, neural-machine translation},
Note = {
Given the abundance of source code diffs, is it possible to predict what
developers will be doing next? The authors claim that little qualitative
analysis has been done to deeply investigate the meaningfulness of the output
produced by deep learning-based approaches. In the paper, they mine accepted
pull requests and try to reconstruct the patches by feeding GumTree diff logs
(at the method level) of Java files touched by the PR into an encoder/decoder
(with attention) network. What is interesting about this paper is the way they
abstracted names for variables etc: they replaced concrete names with labels
like METHOD\_0 and they kept a map linking METHOD\_0 to the actual name.
Interestingly, they allow those IDs to be reused across different method pairs
they feed to ML: this way their vocabulary is significantly reduced (but at
what cost? the authors don't quantify this). Of the patches their model finds,
31% are perfect matches with the human ones. The authors also describe a
taxonomy of the kinds of changes their model learns: "The NMT model was able to
learn and automatically apply a wide variety of code changes, mostly related
to refactoring and bug- fixing activities."
}
}
@inproceedings{Hellen19,
author = {Hellendoorn, Vincent J. and Proksch, Sebastian and Gall, Harald C. and Bacchelli, Alberto},
title = {When Code Completion Fails: A Case Study on Real-world Completions},
booktitle = {Proceedings of the 41st International Conference on Software Engineering},
series = {ICSE '19},
year = {2019},
location = {Montreal, Quebec, Canada},
pages = {960--970},
numpages = {11},
url = {https://doi.org/10.1109/ICSE.2019.00101},
doi = {10.1109/ICSE.2019.00101},
acmid = {3339625},
publisher = {IEEE Press},
address = {Piscataway, NJ, USA},
keywords = {benchmarks, code completion, language models},
Notes = {
The authors study the limitations of real world code completions. To do so, they
evaluate 4 code completion models with both synthetic and a benchmark extracted from real-world
completion benchmark. They find that current code completion tools and datasets leave much to
be desired. Inintially, while completion context is very important synthetic benchmarks do not
account for this at all; deep learning models have a vocubulary problem; difficult / rare completions
are the ones that save most time, but also what the models cannot really predict.
More benchmark data is needed (Watchdog?).
}
}
@inproceedings{vasic2018neural,
title={Neural Program Repair by Jointly Learning to Localize and Repair},
author={Marko Vasic and Aditya Kanade and Petros Maniatis and David Bieber and Rishabh singh},
booktitle={International Conference on Learning Representations},
year={2019},
url={https://openreview.net/forum?id=ByloJ20qtm},
Note = {
This paper provides an approach to jointly localize and repair VarMisuse bugs, where a wrong
variable from the context has been used. The proposed work provides an end-to-end training pipeline
for jointly localizing and repairing, as opposed to independent predictions in existing work. The
problem is bug detection and repair of a particular type of bug called VarMisuse, which occurs at a
point in a program where the wrong identifier is used. Not extremely interesting.
}
}
@incollection{Chen19,
title = {Tree-to-tree Neural Networks for Program Translation},
author = {Chen, Xinyun and Liu, Chang and Song, Dawn},
booktitle = {Advances in Neural Information Processing Systems 31},
editor = {S. Bengio and H. Wallach and H. Larochelle and K. Grauman and N. Cesa-Bianchi and R. Garnett},
pages = {2547--2557},
year = {2018},
publisher = {Curran Associates, Inc.},
url = {http://papers.nips.cc/paper/7521-tree-to-tree-neural-networks-for-program-translation.pdf},
Note = {
Building a seq2seq based program translator is hard when a program has the same repeating
AST structure (e.g., x==1). However, we can actually exploit these structures to build a
tree2tree translator, using attention to search and recover those repeating blocks. To do
so, we convert an AST into a binary tree (using the left-child right-simbling encoding)
and then we train an embedding (one hot) by recursively aggregating all sub trees.
The decoder part takes the top level LSTM and expands each node denoting a branch by
applying an attention mechanism on the encoder LSTM to identify the correct sub-branch
to decode.
https://openreview.net/forum?id=rkxY-sl0W
}
}
@article{Pradel2018,
author = {Pradel, Michael and Sen, Koushik},
title = {DeepBugs: A Learning Approach to Name-based Bug Detection},
journal = {Proc. ACM Program. Lang.},
issue_date = {November 2018},
volume = {2},
number = {OOPSLA},
month = oct,
year = {2018},
issn = {2475-1421},
pages = {147:1--147:25},
articleno = {147},
numpages = {25},
url = {http://doi.acm.org/10.1145/3276517},
doi = {10.1145/3276517},
acmid = {3276517},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Bug detection, JavaScript, Machine learning, Name-based program analysis, Natural language},
Note = {
The authors consider the problem of learning to finding bugs. To do so, they create a process that injects
bugs of a particular kind into existing code and train a bug-specific learner on those autogenerated
samples. The code (both buggy and fixed) converted into a weird embedding, based on heuristics from
parsing the source code tree. They train 2 bug detectors (swapped function args and wrong binary op)
and find 100+ bugs in real-world code.
code: https://github.com/michaelpradel/DeepBugs
}
}
@inproceedings{Dash18,
author = {Dash, Santanu Kumar and Allamanis, Miltiadis and Barr, Earl T.},
title = {RefiNym: Using Names to Refine Types},
booktitle = {Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering},
series = {ESEC/FSE 2018},
year = {2018},
isbn = {978-1-4503-5573-5},
location = {Lake Buena Vista, FL, USA},
pages = {107--117},
numpages = {11},
url = {http://doi.acm.org/10.1145/3236024.3236042},
doi = {10.1145/3236024.3236042},
acmid = {3236042},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Information-theoretic Clustering, Type Reinement},
Note = {
The paper considers the problem of "stringly-typed" programs and proposes a way
of automatically extracting type names appropriate for those types. To do so,
the authors cluster over the names of source code elements (using a clustering technique
called "information-theoretic clustering" that uses information theory metrics
to calculate almost optimal clusterings without providing the number of clusters),
exploiting "name-flow graphs", or data flow dependencies between src code elements.
The technique generates a name-flow graph and then clusters it by solving an
optimization problem in order to globally minimize the variation of information (VI)
between the clusters.
code: https://github.com/askdash/refinym
}
}
@inproceedings{LeCla19,
author = {LeClair, Alexander and Jiang, Siyuan and McMillan, Collin},
title = {A Neural Model for Generating Natural Language Summaries of Program Subroutines},
booktitle = {Proceedings of the 41st International Conference on Software Engineering},
series = {ICSE '19},
year = {2019},
location = {Montreal, Quebec, Canada},
pages = {795--806},
numpages = {12},
url = {https://doi.org/10.1109/ICSE.2019.00087},
doi = {10.1109/ICSE.2019.00087},
acmid = {3339605},
publisher = {IEEE Press},
address = {Piscataway, NJ, USA},
keywords = {automatic documentation generation, code comment generation, source code summarization},
Note = {
The paper considers the problem of producing short, natural language summaries directly
from source code. The model takes 2 inputs: NLP-processed source code (just names of everything)
and AST and tries to predict the first sentence of a Javadoc comment.
The AST is flattened and serialized in a format called structured-based traversal.
The model is a twin encoder/decoder model (one for NLP and one for AST), both with attention.
}
@article{Nye19,
title={Learning to infer program sketches},
author={Nye, Maxwell and Hewitt, Luke and Tenenbaum, Joshua and Solar-Lezama, Armando},
journal={arXiv preprint arXiv:1902.06349},
year={2019},
Note = {
The authors consider the problem of producing simple programs given example inputs and outputs, within
a time budget T. They solve it by combining a rule-based program synthesiser with a neural input
seq2seq parser and generator. The neural part generates sketches that are evaluated by the program
synthesizer as for their suitability; the result of the evaluation is feedback to the training
function. The neural model consists of two parts: the sketch generator and the sketch recognizer.
The generator is a standard seq2seq model that takes examples (natural language or data examples)
and outputs sketches (in a lisp-like DSL). The recognizer samples results from the generator,
feeds them into the program synthesis module; the latter's feedback trains the recognizer.
}
}
}
@inproceedings{Al-Rfou:2019:DLG:3308558.3313668,
author = {Al-Rfou, Rami and Perozzi, Bryan and Zelle, Dustin},
title = {DDGK: Learning Graph Representations for Deep Divergence Graph Kernels},
booktitle = {The World Wide Web Conference},
series = {WWW '19},
year = {2019},
isbn = {978-1-4503-6674-8},
location = {San Francisco, CA, USA},
pages = {37--48},
numpages = {12},
url = {http://doi.acm.org/10.1145/3308558.3313668},
doi = {10.1145/3308558.3313668},
acmid = {3313668},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Graph Kernels, Graph Neural Networks, Representation Learning, Similarity and Search},
}
@inproceedings{Agrawal:2019:LEP:3308558.3313695,
author = {Agrawal, Rakshit and de Alfaro, Luca},
title = {Learning Edge Properties in Graphs from Path Aggregations},
booktitle = {The World Wide Web Conference},
series = {WWW '19},
year = {2019},
isbn = {978-1-4503-6674-8},
location = {San Francisco, CA, USA},
pages = {15--25},
numpages = {11},
url = {http://doi.acm.org/10.1145/3308558.3313695},
doi = {10.1145/3308558.3313695},
acmid = {3313695},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Edge Learning, Neural Networks, Path Aggregation},
}
BibTeX | EndNote | ACM Ref
@inproceedings{Cui:2019:DWO:3308558.3313444,
author = {Cui, Zeyu and Li, Zekun and Wu, Shu and Zhang, Xiao-Yu and Wang, Liang},
title = {Dressing As a Whole: Outfit Compatibility Learning Based on Node-wise Graph Neural Networks},
booktitle = {The World Wide Web Conference},
series = {WWW '19},
year = {2019},
isbn = {978-1-4503-6674-8},
location = {San Francisco, CA, USA},
pages = {307--317},
numpages = {11},
url = {http://doi.acm.org/10.1145/3308558.3313444},
doi = {10.1145/3308558.3313444},
acmid = {3313444},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Compatibility learning, graph neural networks, multi-modal},
}
@inproceedings{Dai:2019:ATM:3308558.3313445,
author = {Dai, Quanyu and Shen, Xiao and Zhang, Liang and Li, Qiang and Wang, Dan},
title = {Adversarial Training Methods for Network Embedding},
booktitle = {The World Wide Web Conference},
series = {WWW '19},
year = {2019},
isbn = {978-1-4503-6674-8},
location = {San Francisco, CA, USA},
pages = {329--339},
numpages = {11},
url = {http://doi.acm.org/10.1145/3308558.3313445},
doi = {10.1145/3308558.3313445},
acmid = {3313445},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Adversarial Training, Network Embedding, Robustness},
}
@inproceedings{Fan:2019:GNN:3308558.3313488,
author = {Fan, Wenqi and Ma, Yao and Li, Qing and He, Yuan and Zhao, Eric and Tang, Jiliang and Yin, Dawei},
title = {Graph Neural Networks for Social Recommendation},
booktitle = {The World Wide Web Conference},
series = {WWW '19},
year = {2019},
isbn = {978-1-4503-6674-8},
location = {San Francisco, CA, USA},
pages = {417--426},
numpages = {10},
url = {http://doi.acm.org/10.1145/3308558.3313488},
doi = {10.1145/3308558.3313488},
acmid = {3313488},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Graph Neural Networks, Neural Networks, Recommender Systems, Social Network, Social Recommendation},
}
@inproceedings{Epasto:2019:SEE:3308558.3313660,
author = {Epasto, Alessandro and Perozzi, Bryan},
title = {Is a Single Embedding Enough? Learning Node Representations That Capture Multiple Social Contexts},
booktitle = {The World Wide Web Conference},
series = {WWW '19},
year = {2019},
isbn = {978-1-4503-6674-8},
location = {San Francisco, CA, USA},
pages = {394--404},
numpages = {11},
url = {http://doi.acm.org/10.1145/3308558.3313660},
doi = {10.1145/3308558.3313660},
acmid = {3313660},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {graph embeddings, polysemous representations, representation learning},
}
@inproceedings{Yang:2019:PDE:3308558.3313456,
author = {Yang, Carl and Hoang, Do Huy and Mikolov, Tomas and Han, Jiawei},
title = {Place Deduplication with Embeddings},
booktitle = {The World Wide Web Conference},
series = {WWW '19},
year = {2019},
isbn = {978-1-4503-6674-8},
location = {San Francisco, CA, USA},
pages = {3420--3426},
numpages = {7},
url = {http://doi.acm.org/10.1145/3308558.3313456},
doi = {10.1145/3308558.3313456},
acmid = {3313456},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {feature generation, metric learning, place deduplication},
}
@inproceedings{Wang:2019:HGA:3308558.3313562,
author = {Wang, Xiao and Ji, Houye and Shi, Chuan and Wang, Bai and Ye, Yanfang and Cui, Peng and Yu, Philip S},
title = {Heterogeneous Graph Attention Network},
booktitle = {The World Wide Web Conference},
series = {WWW '19},
year = {2019},
isbn = {978-1-4503-6674-8},
location = {San Francisco, CA, USA},
pages = {2022--2032},
numpages = {11},
url = {http://doi.acm.org/10.1145/3308558.3313562},
doi = {10.1145/3308558.3313562},
acmid = {3313562},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Graph Analysis, Neural Network, Social Network},
}
@inproceedings{Jia:2019:CCD:3308558.3313564,
author = {Jia, Yuting and Zhang, Qinqin and Zhang, Weinan and Wang, Xinbing},
title = {CommunityGAN: Community Detection with Generative Adversarial Nets},
booktitle = {The World Wide Web Conference},
series = {WWW '19},
year = {2019},
isbn = {978-1-4503-6674-8},
location = {San Francisco, CA, USA},
pages = {784--794},
numpages = {11},
url = {http://doi.acm.org/10.1145/3308558.3313564},
doi = {10.1145/3308558.3313564},
acmid = {3313564},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Community Detection, Generative Adversarial Nets, Graph Representation Learning},
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment