{"id":921,"date":"2023-12-10T23:04:12","date_gmt":"2023-12-10T23:04:12","guid":{"rendered":"https:\/\/zli.one\/?p=921"},"modified":"2023-12-10T23:04:59","modified_gmt":"2023-12-10T23:04:59","slug":"advanced-prompting","status":"publish","type":"post","link":"https:\/\/zli.one\/?p=921","title":{"rendered":"Advanced Prompting"},"content":{"rendered":"\n<div class=\"wp-block-cover alignfull is-light\" style=\"min-height:464px;aspect-ratio:unset;\"><span aria-hidden=\"true\" class=\"wp-block-cover__background has-background-dim-100 has-background-dim\" style=\"background-color:#ffffff\"><\/span><div class=\"wp-block-cover__inner-container is-layout-flow wp-block-cover-is-layout-flow\">\n<div class=\"wp-block-media-text alignwide is-stacked-on-mobile is-vertically-aligned-center is-image-fill\" style=\"grid-template-columns:56% auto\"><figure class=\"wp-block-media-text__media\" style=\"background-image:url(https:\/\/zli.one\/wp-content\/uploads\/2023\/06\/istockphoto-491732089-170667a.jpg);background-position:58% 56%\"><img loading=\"lazy\" decoding=\"async\" width=\"343\" height=\"499\" src=\"https:\/\/zli.one\/wp-content\/uploads\/2023\/06\/istockphoto-491732089-170667a.jpg\" alt=\"\" class=\"wp-image-859 size-full\" srcset=\"https:\/\/zli.one\/wp-content\/uploads\/2023\/06\/istockphoto-491732089-170667a.jpg 343w, https:\/\/zli.one\/wp-content\/uploads\/2023\/06\/istockphoto-491732089-170667a-206x300.jpg 206w\" sizes=\"auto, (max-width: 343px) 100vw, 343px\" \/><\/figure><div class=\"wp-block-media-text__content\">\n<h2 class=\"wp-block-heading has-text-color\" style=\"color:#000000;font-size:32px\"><strong>Advanced Prompting<\/strong><\/h2>\n\n\n\n<p class=\"has-text-color\" style=\"color:#000000;font-size:17px\">This paper investigates the impact of five basic yet versatile prompting strategies\u2014zero-shot, few-shot, chain of thought, and one advanced method, Tree of Thought (ToT)\u2014on two key datasets: Game 24 and Crossword. Our research reveals that while the state-of-the-art ToT strategy is effective on its original LLM, it does not adapt well to smaller models like Vicuna-7b. A detailed examination of ToT&#8217;s performance on these benchmark datasets uncovers two major limitations, which we address with our novel method. Our approach not only successfully resolves issues where ToT typically struggles, but it also operates up to5 times faster. Additionally, we enhance the Vicuna-7b model&#8217;s capabilities by integrating an image-to-text conversion tool, adding multi-modal functionality to the model.<\/p>\n\n\n\n<div class=\"wp-block-buttons is-layout-flex wp-block-buttons-is-layout-flex\">\n<div class=\"wp-block-button is-style-fill\"><a class=\"wp-block-button__link wp-element-button\" href=\"https:\/\/zli.one\/wp-content\/uploads\/2023\/12\/TOT.pdf\" target=\"_blank\" rel=\"noreferrer noopener\">Paper<\/a><\/div>\n\n\n\n<div class=\"wp-block-button is-style-fill\"><a class=\"wp-block-button__link wp-element-button\" href=\"https:\/\/github.com\/milesway\/scientific_llm\">GitHub<\/a><\/div>\n<\/div>\n<\/div><\/div>\n<\/div><\/div>\n","protected":false},"excerpt":{"rendered":"","protected":false},"author":1,"featured_media":922,"comment_status":"closed","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[10],"tags":[],"class_list":["post-921","post","type-post","status-publish","format-standard","has-post-thumbnail","hentry","category-projects"],"amp_enabled":true,"_links":{"self":[{"href":"https:\/\/zli.one\/index.php?rest_route=\/wp\/v2\/posts\/921","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/zli.one\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/zli.one\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/zli.one\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/zli.one\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=921"}],"version-history":[{"count":2,"href":"https:\/\/zli.one\/index.php?rest_route=\/wp\/v2\/posts\/921\/revisions"}],"predecessor-version":[{"id":926,"href":"https:\/\/zli.one\/index.php?rest_route=\/wp\/v2\/posts\/921\/revisions\/926"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/zli.one\/index.php?rest_route=\/wp\/v2\/media\/922"}],"wp:attachment":[{"href":"https:\/\/zli.one\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=921"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/zli.one\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=921"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/zli.one\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=921"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}