@phdthesis{digilib72679, month = {August}, title = {ANALISIS JAWABAN CHATGPT DALAM MENYELESAIKAN SOAL FISIKA SELEKSI MASUK PERGURUAN TINGGI}, school = {UIN SUNAN KALIJAGA YOGYAKARTA}, author = {NIM.: 18106090016 Nina Unaenah}, year = {2025}, note = {Rachmad Resmiyanto, S.Si., M.Sc.}, keywords = {Kecerdasan Buatan ChatGPT, Fisika, Pendidikan, Pemecahan Masalah, Evaluasi Akurasi}, url = {https://digilib.uin-suka.ac.id/id/eprint/72679/}, abstract = {Student often encounter considerable difficulties in solving physics problems featured in universyti entrance examinations. ChatGPT, as an adaptive technology in the academic sphare, offers potential support in this regard. However, not all responses generated by ChatGPT are consistenly accurate or aligned with the contextual demand of the questions posed. The present study aims to: 1) analyze the accuracy of ChatGPT?s responses in problems of physics questions from the Computer-Based Written Examination (Ujian Tulis Berbasis Kmputer (UTBK)), and 2) assess the quality of ChatGPT?s solutions to these UTBK physics problems. The vertion of ChatGPT employed in this study is ChatGPT-4o. A qualitative content analysis method was adopted. ChatGPT?s responses were coilected in the form of image documentation and subsequently compared with solutions provided in Wangsit Pawang Soal Sulit HOTS SBMPTN Saintek 2023 and Kompeten Komposisi dan Bank Soal Paten SNBT dan Seleksi Mandiri PTN Saintek 2023. A total of 30 Physics problems were selected 15 from the 2019 UTBK dan 15 from the 2021 UTBK. For responses deemed inaccurate, the corresponding questions were modified to address the specific misconceptions or conceptual errors exhibited by ChatGPT, without altering the substantive content, and then resubmitted for a second evaluation. The findings reveal that, out of the 30 questions, ChatGPT provided accurate responses to 23 questions and inaccurate responses to 7. Following modification of the 7 problematic items by adding narrative cues corresponding to ChatGPT?s identified misconceptions or conceptual misunderstandings ChatGPT achieved accurate solutions on 4 these questions, while 3 remained inaccurate. In terms of quality, ChatGPT initially answerd 22 questions with a score of 100\%, while the remaning 8 questions exhibited veryng quality: 1 at 75\%, 2 at 50\%, 2 at 25\%, 3 at 0\%. After the modification precces, there was a botable improvement, with 4 questions to 100\% quality, 2 questions maintaning the same quality level of 50\%, and 1 question showing only a modest improvement of 25\%.} }