{"id":1076,"date":"2025-03-15T09:24:13","date_gmt":"2025-03-15T00:24:13","guid":{"rendered":"https:\/\/kmlab.nagaokaut.ac.jp\/?p=1076"},"modified":"2025-04-19T09:34:21","modified_gmt":"2025-04-19T00:34:21","slug":"our-paper-published-in-neurocomputing","status":"publish","type":"post","link":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/2025\/03\/15\/our-paper-published-in-neurocomputing\/","title":{"rendered":"Our paper published in Neurocomputing"},"content":{"rendered":"<p class=\"\" data-start=\"82\" data-end=\"448\">Our latest work has been published in <em data-start=\"202\" data-end=\"218\">Neurocomputing<\/em> (Volume 637, 7 July 2025).<\/p>\n<h5 data-start=\"82\" data-end=\"448\"><strong data-start=\"251\" data-end=\"261\">Title:<\/strong> <em data-start=\"262\" data-end=\"373\">Dstsa-Gcn: Advancing Skeleton-Based Gesture Recognition with Semantic-Aware Spatio-Temporal Topology Modeling<\/em><br data-start=\"373\" data-end=\"376\" \/><strong data-start=\"458\" data-end=\"470\">Authors:<\/strong> Hu Cui, Renjing Huang, Ruoyu Zhang, Tessai Hayama<\/h5>\n<p data-start=\"82\" data-end=\"448\"><a href=\"https:\/\/doi.org\/10.1016\/j.neucom.2025.130066\">https:\/\/doi.org\/10.1016\/j.neucom.2025.130066<\/a><\/p>\n<p class=\"\" data-start=\"533\" data-end=\"802\">Graph Convolutional Networks (GCNs) have great potential for recognizing human gestures from skeleton data. However, existing methods struggle to capture dynamic and multiscale patterns. Our proposed method, <strong data-start=\"767\" data-end=\"780\">DSTSA-GCN<\/strong>, addresses this with:<\/p>\n<ul data-start=\"804\" data-end=\"1077\">\n<li class=\"\" data-start=\"804\" data-end=\"898\">\n<p class=\"\" data-start=\"806\" data-end=\"898\"><strong data-start=\"809\" data-end=\"826\">GC-GC &amp; GT-GC<\/strong>: New modules for modeling correlations across channels and time frames.<\/p>\n<\/li>\n<li class=\"\" data-start=\"899\" data-end=\"980\">\n<p class=\"\" data-start=\"901\" data-end=\"980\"><strong data-start=\"904\" data-end=\"914\">MS-TCN<\/strong>: A multi-scale convolutional module to handle temporal diversity.<\/p>\n<\/li>\n<li class=\"\" data-start=\"981\" data-end=\"1077\">\n<p class=\"\" data-start=\"983\" data-end=\"1077\"><strong data-start=\"986\" data-end=\"1022\">Semantic-Aware Topology Modeling<\/strong>: Better understanding of gesture structure and motion.<\/p>\n<\/li>\n<\/ul>\n<p class=\"\" data-start=\"1079\" data-end=\"1193\">Our method achieves <strong data-start=\"1102\" data-end=\"1134\">state-of-the-art performance<\/strong> on key benchmarks like SHREC\u201917, DHG-14\/28, and NTU-RGB+D.<\/p>\n<p class=\"\" data-start=\"1195\" data-end=\"1298\"><strong data-start=\"1198\" data-end=\"1217\">Code Available:<\/strong> <a class=\"\" href=\"https:\/\/hucui2022.github.io\/dstsa_gcn\/\" target=\"_new\" rel=\"noopener\" data-start=\"1218\" data-end=\"1298\">https:\/\/hucui2022.github.io\/dstsa_gcn\/<\/a><\/p>\n<p data-start=\"1195\" data-end=\"1298\">\n","protected":false},"excerpt":{"rendered":"<p>Our latest work has been published in Neurocomputing (Volume 637, 7 July 2025). Title: Dstsa-Gcn: Advancing Sk &hellip; <a href=\"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/2025\/03\/15\/our-paper-published-in-neurocomputing\/\" class=\"more-link\">Continue reading <span class=\"screen-reader-text\">Our paper published in Neurocomputing<\/span><\/a><\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"_exactmetrics_skip_tracking":false,"_exactmetrics_sitenote_active":false,"_exactmetrics_sitenote_note":"","_exactmetrics_sitenote_category":0,"_monsterinsights_skip_tracking":false,"_monsterinsights_sitenote_active":false,"_monsterinsights_sitenote_note":"","_monsterinsights_sitenote_category":0,"footnotes":""},"categories":[1],"tags":[],"class_list":["post-1076","post","type-post","status-publish","format-standard","hentry","category-1","without-featured-image"],"_links":{"self":[{"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/posts\/1076","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/comments?post=1076"}],"version-history":[{"count":1,"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/posts\/1076\/revisions"}],"predecessor-version":[{"id":1077,"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/posts\/1076\/revisions\/1077"}],"wp:attachment":[{"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/media?parent=1076"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/categories?post=1076"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/kmlab.nagaokaut.ac.jp\/index.php\/wp-json\/wp\/v2\/tags?post=1076"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}