Go Integration Guide
Scrape anti-bot protected sites with Go. Uses only the standard library — no external dependencies.
Prerequisites: Go 1.18+. No external packages needed.
Sync Scraping
Scrape a single URL with a clean, reusable function.
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
)
type ScrapeRequest struct {
URLs []URLItem `json:"urls"`
Mode string `json:"mode"`
}
type URLItem struct {
URL string `json:"url"`
WaitFor int `json:"waitFor,omitempty"`
WaitForSelector string `json:"waitForSelector,omitempty"`
}
type ScrapeResponse struct {
HTML string `json:"html"`
Title string `json:"title"`
URL string `json:"url"`
}
func scrapeSync(apiKey, targetURL string) (*ScrapeResponse, error) {
body, _ := json.Marshal(ScrapeRequest{
URLs: []URLItem{{URL: targetURL}},
Mode: "sync",
})
req, _ := http.NewRequest("POST",
"https://api.ultrawebscrapingapi.com/v1/scrape",
bytes.NewReader(body))
req.Header.Set("X-API-Key", apiKey)
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, _ := io.ReadAll(resp.Body)
var result ScrapeResponse
json.Unmarshal(data, &result)
return &result, nil
}
func main() {
result, err := scrapeSync("your_api_key", "https://example.com")
if err != nil {
panic(err)
}
fmt.Printf("Title: %s\n", result.Title)
fmt.Printf("HTML: %d chars\n", len(result.HTML))
} Async Scraping
Submit multiple URLs, poll for completion, then fetch all results.
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
const (
apiKey = "your_api_key"
base = "https://api.ultrawebscrapingapi.com/v1"
)
type AsyncResponse struct {
SubscriptionID string `json:"subscriptionId"`
}
type StatusResponse struct {
Total int `json:"total"`
Completed int `json:"completed"`
Processing int `json:"processing"`
Queued int `json:"queued"`
Jobs []Job `json:"jobs"`
}
type Job struct {
Index int `json:"index"`
URL string `json:"url"`
Status string `json:"status"`
}
type ResultResponse struct {
URL string `json:"url"`
Title string `json:"title"`
HTML string `json:"html"`
}
func main() {
urls := []URLItem{
{URL: "https://site-a.com/page1"},
{URL: "https://site-b.com/page2"},
{URL: "https://site-c.com/page3"},
}
// 1. Submit
body, _ := json.Marshal(ScrapeRequest{URLs: urls, Mode: "async"})
req, _ := http.NewRequest("POST", base+"/scrape", bytes.NewReader(body))
req.Header.Set("X-API-Key", apiKey)
req.Header.Set("Content-Type", "application/json")
resp, _ := http.DefaultClient.Do(req)
data, _ := io.ReadAll(resp.Body)
resp.Body.Close()
var asyncResp AsyncResponse
json.Unmarshal(data, &asyncResp)
subID := asyncResp.SubscriptionID
fmt.Printf("Submitted: %s\n", subID)
// 2. Poll
var status StatusResponse
for {
pollReq, _ := http.NewRequest("GET",
fmt.Sprintf("%s/subscription/%s", base, subID), nil)
pollReq.Header.Set("X-API-Key", apiKey)
pollResp, _ := http.DefaultClient.Do(pollReq)
pollData, _ := io.ReadAll(pollResp.Body)
pollResp.Body.Close()
json.Unmarshal(pollData, &status)
fmt.Printf("Progress: %d/%d\n", status.Completed, status.Total)
if status.Processing == 0 && status.Queued == 0 {
break
}
time.Sleep(5 * time.Second)
}
// 3. Fetch results
for _, job := range status.Jobs {
if job.Status != "completed" {
continue
}
resultReq, _ := http.NewRequest("GET",
fmt.Sprintf("%s/result/%s/%d", base, subID, job.Index), nil)
resultReq.Header.Set("X-API-Key", apiKey)
resultResp, _ := http.DefaultClient.Do(resultReq)
resultData, _ := io.ReadAll(resultResp.Body)
resultResp.Body.Close()
var result ResultResponse
json.Unmarshal(resultData, &result)
fmt.Printf("%s — %d chars\n", result.URL, len(result.HTML))
}
} Error Handling & Retry
Production-ready scraping with exponential backoff, rate limit handling, and timeouts.
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"math"
"net/http"
"time"
)
func scrapeWithRetry(apiKey, url string, maxRetries int) (*ScrapeResponse, error) {
for attempt := 0; attempt < maxRetries; attempt++ {
body, _ := json.Marshal(ScrapeRequest{
URLs: []URLItem{{URL: url}},
Mode: "sync",
})
client := &http.Client{Timeout: 120 * time.Second}
req, _ := http.NewRequest("POST",
"https://api.ultrawebscrapingapi.com/v1/scrape",
bytes.NewReader(body))
req.Header.Set("X-API-Key", apiKey)
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
fmt.Printf("Attempt %d: network error: %v\n", attempt+1, err)
continue
}
data, _ := io.ReadAll(resp.Body)
resp.Body.Close()
switch resp.StatusCode {
case 200:
var result ScrapeResponse
json.Unmarshal(data, &result)
return &result, nil
case 402:
return nil, fmt.Errorf("insufficient credits")
case 429:
wait := time.Duration(math.Pow(2, float64(attempt))) * 5 * time.Second
fmt.Printf("Rate limited, waiting %v...\n", wait)
time.Sleep(wait)
case 503:
fmt.Println("No capacity, retrying in 30s...")
time.Sleep(30 * time.Second)
default:
fmt.Printf("Error %d: %s\n", resp.StatusCode, string(data))
}
}
return nil, fmt.Errorf("failed after %d attempts", maxRetries)
}