@article{THESIS,
      recid = {12081},
      author = {Huang, Violet},
      title = {Unveiling Gender Bias in Large Language Models},
      publisher = {University of Chicago},
      school = {M.A.},
      address = {2024-06},
      number = {THESIS},
      abstract = {This paper investigates gender bias in Large Language  Model (LLM)-generated teacher evaluations in higher  education setting, focusing on evaluations produced by  GPT-4 across six academic subjects. By applying a  comprehensive analytical framework that includes Odds Ratio  (OR) analysis, Word Embedding Association Test (WEAT),  sentiment anal- ysis, and contextual analysis, this paper  identified patterns of gender-associated language  reflecting societal stereotypes. Specifically, words  related to approachability and support were used more  frequently for female instructors, while words related to  entertainment were predominantly used for male instructors,  aligning with the concepts of communal and agen- tic  behaviors. The study also found moderate to strong  associations between male salient adjectives and male  names, though career and family words did not distinctly  capture gen- der biases. These findings align with prior  research on societal norms and stereotypes, reinforcing the  notion that LLM-generated text reflects existing biases.},
      url = {http://knowledge.uchicago.edu/record/12081},
      doi = {https://doi.org/10.6082/uchicago.12081},
}