@misc{Andrew-2004-list-access,
  author = {Kevin Andrew and David F. Gleich},
  howpublished = {Advanced Algorithms, Harvey Mudd College, Final Project},
  title = {MTF , BIT , and COMB: A Guide to Deterministic and Randomized Online Algorithms for the List Access Problem},
  year = {2004},
  abstract = {In this survey, we discuss two randomized online algorithms for the list access problem. First, we review competitive analysis and show that the MTF algorithm is 2-competitive using a potential function. Then, we introduce randomized competitive analysis and the associated adversary models. We show that the randomized BIT algorithm is 7/4-competitive using a potential function argument. We then introduce the pairwise property and the TIMESTAMP algorithm to show that the COMB algorithm, a COMBination of the BIT and TIMEST AMP algorithms, is 8/5-competitive. COMB is the best known randomized algorithm for the list access program.},
  file = {:Andrew 2004 - List Access.pdf},
  keywords = {self},
  owner = {David F. Gleich},
  timestamp = {2011.07.02}
}
@misc{gleich2003-clinic,
  title = {Three Methods for Improving Relevance in Web Search.},
  author = {Erin Bodine and David F. Gleich and Cathy Kurata and Jordan Kwan and Lesley Ward and Daniel Fain},
  howpublished = {Clinic Report, Harvey Mudd College},
  month = {May 9},
  note = {102 pages. Includes fully documented program code on accompanying CD.},
  year = {2003},
  abstract = {The 2002–2003 Overture clinic project evaluated and implemented three different methods for improving relevance ordering in web search. The three methods were bottom up micro information unit (MIU) analysis, top down MIU analysis, and proximity scoring. We ran these three methods on the top 200 web pages returned for each of 58 queries by an already existing algorithmic search engine. We used two metrics, precision and relevance ordering, to evaluate the results. Precision deals with how relevant the web page is for a given query, while relevance ordering is how well-ordered the returned results are. We evaluated the precision of each method and of the algorithmic search engine by hand. For relevance ordering, we recruited other humans to compare pages and used their decisions to generate an ideal ranking for each query. The results of each of our methods and of the algorithmic search engine are then compared to this ideal ranking vector using Kendall’s Tau. Our bottom up MIU analysis method achieved the highest precision score of 0.78 out of 1.00. In addition, bottom up MIU analysis received the second highest correlation coefficient (or relevance ordering score) of 0.107 while the algorithmic search engine received the highest correlation coefficient of 0.121. Interestingly, our proximity scoring method received high relevance ordering scores when the algorithmic search engine received low relevance ordering scores.},
  file = {:bodine2003 - clinic, improved relevance.pdf},
  key = {BGK+2003},
  keywords = {self, web search},
  owner = {David Gleich},
  timestamp = {2006.10.13}
}
@article{Constantine-preprint-active,
  author = {Paul G. Constantine and David F. Gleich},
  journal = {arXiv},
  title = {Computing Active Subspaces},
  year = {2014},
  pages = {1408.0545},
  volume = {math.NA},
  arxiv = {http://arxiv.org/abs/1408.0545},
  owner = {dgleich},
  timestamp = {2014.08.19},
  url = {http://arxiv.org/abs/1408.0545}
}
@misc{Gleich-2005-directed-spectral,
  title = {Hierarchical Directed Spectral Graph Partitioning},
  author = {David F. Gleich},
  howpublished = {Information Networks, Stanford University, Final Project, 2005},
  year = {2006},
  abstract = {In this report, we examine the generalization of the Laplacian of a graph due to Fan Chung. We show that Fan Chung’s generalization reduces to examining one particular symmetrization of the adjacency matrix for a directed graph. From this result, the directed Cheeger bounds trivially follow. Additionally, we implement and examine the benefits of directed hierarchical spectral clustering empirically on a dataset from Wikipedia. Finally, we examine a set of competing heuristic methods on the same dataset.},
  file = {:Gleich 2005 - hierarchical directed spectral.pdf},
  keywords = {self},
  owner = {David F. Gleich},
  timestamp = {2011.07.02},
  url = {http://www.cs.purdue.edu/homes/dgleich/publications/Gleich 2005 - hierarchical directed spectral.pdf}
}
@misc{Gleich-2005-finite-calculus,
  title = {Finite Calculus: A tutorial for solving nasty sums},
  author = {David F. Gleich},
  howpublished = {Combinatorics, Final Paper, Stanford University, 2004.},
  year = {2005},
  file = {:Gleich 2005 - finite calculus.pdf},
  keywords = {self},
  owner = {David F. Gleich},
  timestamp = {2011.07.02}
}
@misc{Gleich-2003-computer-chess,
  title = {Machine Learning in Computer Chess: Genetic Programming and KRK},
  author = {David F. Gleich},
  howpublished = {Independent Study Report, Harvey Mudd College},
  year = {2003},
  abstract = {In this paper, I describe genetic programming as a machine learning paradigm and evaluate its results in attempting to learn basic chess rules. Genetic programming exploits a simulation of Darwinian evolution to construct programs. When applied to the King-Rook-King (KRK) chess endgame problem, genetic programming shows promising results in spite of a lack of significant chess knowledge.},
  file = {:Gleich 2003 - Machine Learning in Computer Chess.pdf},
  keywords = {self},
  owner = {David F. Gleich},
  timestamp = {2011.07.02}
}
@inproceedings{gleich2007-pagerank-deriv,
  title = {Three results on the {PageRank} vector: eigenstructure, sensitivity, and the derivative},
  author = {David F. Gleich and Peter Glynn and Gene H. Golub and Chen Greif},
  booktitle = {Web Information Retrieval and Linear Algebra Algorithms},
  year = {2007},
  editor = {Andreas Frommer and Michael W. Mahoney and Daniel B. Szyld},
  number = {07071},
  publisher = {Internationales Begegnungs- und Forschungszentrum fuer Informatik (IBFI), Schloss Dagstuhl, Germany},
  series = {Dagstuhl Seminar Proceedings},
  abstract = {The three results on the PageRank vector are preliminary but shed light on the eigenstructure of a PageRank modified Markov chain and what happens when changing the teleportation parameter in the PageRank model. Computations with the derivative of the PageRank vector with respect to the teleportation parameter show predictive ability and identify an interesting set of pages from Wikipedia.},
  file = {:gleich 2007 - pagerank derivative and sensitivity.pdf},
  issn = {1862-4405},
  key = {GGGG2007},
  keywords = {self, PageRank, PageRank derivative, PageRank sensitivity, PageRankeigenstructure},
  optaddress = {Dagstuhl, Germany},
  optannote = {Keywords: PageRank, PageRank derivative, PageRank sensitivity, PageRank eigenstructure},
  opturl = {http://drops.dagstuhl.de/opus/volltexte/2007/1061 [date of citation: 2007-01-01]},
  owner = {David Gleich},
  timestamp = {2008.03.04},
  url = {http://drops.dagstuhl.de/opus/volltexte/2007/1061}
}
@misc{Gleich-2006-wom,
  title = {The World of Music: User Ratings; Spectral and Spherical Embeddings; Map Projections},
  author = {David F. Gleich and Matthew Rasmussen and Kevin Lang and Leonid Zhukov},
  howpublished = {Online report},
  year = {2006},
  abstract = {In this paper we present an algorithm for layout and visualization of music collections based on similarities between musical artists. The core of the algorithm consists of a non-linear low dimensional embedding of a similarity graph constrained to the surface of a hyper-sphere. This approach effectively uses additional dimensions in the embedding. We derive the algorithm using a simple energy minimization procedure and show the relationships to several well known eigenvector based methods. We also describe a method for constructing a similarity graph from user ratings, as well as procedures for mapping the layout from the hyper-sphere to a 2d display. We demonstrate our techniques on Yahoo! Music user ratings data and a MusicMatch artist similarity graph.},
  file = {:Gleich 2006 - wom.pdf},
  keywords = {self},
  owner = {David F. Gleich},
  timestamp = {2011.07.02},
  url = {http://www.cs.purdue.edu/homes/dgleich/publications/Gleich 2006 - wom.pdf}
}
@techreport{gleich2004-parallel,
  title = {Fast Parallel {PageRank}: A Linear System Approach},
  author = {David F. Gleich and Leonid Zhukov and Pavel Berkhin},
  institution = {Yahoo! Research Labs},
  year = {2004},
  number = {YRL-2004-038},
  abstract = {In this paper we investigate the convergence of iterative stationary and Krylov subspace methods for the PageRank linear system, including the convergence dependency on teleportation. We demonstrate that linear system iterations converge faster than the simple power method and are less sensitive to the changes in teleportation. In order to perform this study we developed a framework for parallel PageRank computing. We describe the details of the parallel implementation and provide experimental results obtained on a 70-node Beowulf cluster.},
  file = {:gleich2004-parallel.pdf},
  key = {GZB2006},
  keywords = {self, pagerank},
  mysoftware = {https://github.com/dgleich/ppagerank},
  owner = {David Gleich},
  timestamp = {2006.11.29},
  url = {http://www.cs.purdue.edu/homes/dgleich/publications/gleich2004-parallel.pdf}
}
@article{Stinson-preprint-zonotopes,
  title = {A randomized algorithm for enumerating zonotope vertices},
  author = {Kerrek Stinson and David F. Gleich and Paul G. Constantine},
  journal = {arXiv},
  pages = {1602.06620},
  volume = {math.NA},
  year = {2016},
  owner = {dgleich},
  timestamp = {2016.02.22},
  url = {http://arxiv.org/abs/1602.06620}
}
@misc{Zhukov-2004-soft-clustering,
  title = {Topic Identification in Soft Clustering using PCA and ICA},
  author = {Leonid Zhukov and David F. Gleich},
  howpublished = {Online report, Yahoo! research labs},
  year = {2004},
  abstract = {Many applications can benefit from soft clustering, where each datum is assigned to multiple clusters with membership weights that sum to one. In this paper we present a comparison of principal component analysis (PCA) and independent component analysis (ICA) when used for soft clustering. We provide a short mathematical background for these methods and demonstrate their application to a sponsored links search listings dataset. We present examples of the soft clusters generated by both methods and compare the results.},
  file = {:Zhukov 2004 - soft clustering pca and ica.pdf},
  keywords = {self},
  owner = {David F. Gleich},
  timestamp = {2011.07.02}
}