diff --git a/cmd/rekor-server/app/flags.go b/cmd/rekor-server/app/flags.go
index ba6d748ede5cc623de0398cb09be694962310f2c..49d339f3afc8ff3dc1b2bc6f51b334af890553bc 100644
--- a/cmd/rekor-server/app/flags.go
+++ b/cmd/rekor-server/app/flags.go
@@ -72,19 +72,12 @@ func (l *LogRangesFlag) Set(s string) error {
 		}
 		TreeIDs[lr.TreeID] = struct{}{}
 	}
-
-	l.Ranges = sharding.LogRanges{
-		Ranges: inputRanges,
-	}
+	l.Ranges.SetRanges(inputRanges)
 	return nil
 }
 
 func (l *LogRangesFlag) String() string {
-	ranges := []string{}
-	for _, r := range l.Ranges.Ranges {
-		ranges = append(ranges, fmt.Sprintf("%d=%d", r.TreeID, r.TreeLength))
-	}
-	return strings.Join(ranges, ",")
+	return l.Ranges.String()
 }
 
 func (l *LogRangesFlag) Type() string {
diff --git a/cmd/rekor-server/app/flags_test.go b/cmd/rekor-server/app/flags_test.go
index e9028759bf33a95a2e8841d12a7c7863399815a1..49302f7897922a23538a0bb087b13408f7b2bc05 100644
--- a/cmd/rekor-server/app/flags_test.go
+++ b/cmd/rekor-server/app/flags_test.go
@@ -62,12 +62,11 @@ func TestLogRanges_Set(t *testing.T) {
 			if err := l.Set(tt.arg); err != nil {
 				t.Errorf("LogRanges.Set() expected no error, got %v", err)
 			}
-
-			if diff := cmp.Diff(tt.want, l.Ranges.Ranges); diff != "" {
+			if diff := cmp.Diff(tt.want, l.Ranges.GetRanges()); diff != "" {
 				t.Errorf(diff)
 			}
 
-			active := l.Ranges.ActiveIndex()
+			active := l.Ranges.ActiveTreeID()
 			if active != tt.active {
 				t.Errorf("LogRanges.Active() expected %d no error, got %d", tt.active, active)
 			}
diff --git a/pkg/api/api.go b/pkg/api/api.go
index 3755f21b930b2f285a95ef4d5da6a85605980e85..84694590caefe41887f0a4c66d8b454c303d23ab 100644
--- a/pkg/api/api.go
+++ b/pkg/api/api.go
@@ -57,7 +57,7 @@ func dial(ctx context.Context, rpcServer string) (*grpc.ClientConn, error) {
 type API struct {
 	logClient    trillian.TrillianLogClient
 	logID        int64
-	logRanges    *sharding.LogRanges
+	logRanges    sharding.LogRanges
 	pubkey       string // PEM encoded public key
 	pubkeyHash   string // SHA256 hash of DER-encoded public key
 	signer       signature.Signer
@@ -88,7 +88,7 @@ func NewAPI(ranges sharding.LogRanges) (*API, error) {
 		tLogID = t.TreeId
 	}
 	// append the active treeID to the API's logRangeMap for lookups
-	ranges.Ranges = append(ranges.Ranges, sharding.LogRange{TreeID: tLogID})
+	ranges.AppendRange(sharding.LogRange{TreeID: tLogID})
 
 	rekorSigner, err := signer.New(ctx, viper.GetString("rekor_server.signer"))
 	if err != nil {
@@ -142,7 +142,7 @@ func NewAPI(ranges sharding.LogRanges) (*API, error) {
 		// Transparency Log Stuff
 		logClient: logClient,
 		logID:     tLogID,
-		logRanges: &ranges,
+		logRanges: ranges,
 		// Signing/verifying fields
 		pubkey:     string(pubkey),
 		pubkeyHash: hex.EncodeToString(pubkeyHashBytes[:]),
diff --git a/pkg/api/entries.go b/pkg/api/entries.go
index 4be145b8b1fd51875216cb363dd4980dde9e048c..f272677fbc155701269ddcd0dace1795bae74dab 100644
--- a/pkg/api/entries.go
+++ b/pkg/api/entries.go
@@ -64,7 +64,7 @@ func signEntry(ctx context.Context, signer signature.Signer, entry models.LogEnt
 
 // logEntryFromLeaf creates a signed LogEntry struct from trillian structs
 func logEntryFromLeaf(ctx context.Context, signer signature.Signer, tc TrillianClient, leaf *trillian.LogLeaf,
-	signedLogRoot *trillian.SignedLogRoot, proof *trillian.Proof) (models.LogEntry, error) {
+	signedLogRoot *trillian.SignedLogRoot, proof *trillian.Proof, tid int64, ranges sharding.LogRanges) (models.LogEntry, error) {
 
 	root := &ttypes.LogRootV1{}
 	if err := root.UnmarshalBinary(signedLogRoot.LogRoot); err != nil {
@@ -75,9 +75,10 @@ func logEntryFromLeaf(ctx context.Context, signer signature.Signer, tc TrillianC
 		hashes = append(hashes, hex.EncodeToString(hash))
 	}
 
+	virtualIndex := sharding.VirtualLogIndex(leaf.GetLeafIndex(), tid, ranges)
 	logEntryAnon := models.LogEntryAnon{
 		LogID:          swag.String(api.pubkeyHash),
-		LogIndex:       &leaf.LeafIndex,
+		LogIndex:       &virtualIndex,
 		Body:           leaf.LeafValue,
 		IntegratedTime: swag.Int64(leaf.IntegrateTimestamp.AsTime().Unix()),
 	}
@@ -137,7 +138,7 @@ func GetLogEntryByIndexHandler(params entries.GetLogEntryByIndexParams) middlewa
 		return handleRekorAPIError(params, http.StatusNotFound, errors.New("grpc returned 0 leaves with success code"), "")
 	}
 
-	logEntry, err := logEntryFromLeaf(ctx, api.signer, tc, leaf, result.SignedLogRoot, result.Proof)
+	logEntry, err := logEntryFromLeaf(ctx, api.signer, tc, leaf, result.SignedLogRoot, result.Proof, tid, api.logRanges)
 	if err != nil {
 		return handleRekorAPIError(params, http.StatusInternalServerError, err, err.Error())
 	}
@@ -188,9 +189,11 @@ func createLogEntry(params entries.CreateLogEntryParams) (models.LogEntry, middl
 	queuedLeaf := resp.getAddResult.QueuedLeaf.Leaf
 	uuid := hex.EncodeToString(queuedLeaf.GetMerkleLeafHash())
 
+	// The log index should be the virtual log index across all shards
+	virtualIndex := sharding.VirtualLogIndex(queuedLeaf.LeafIndex, api.logRanges.ActiveTreeID(), api.logRanges)
 	logEntryAnon := models.LogEntryAnon{
 		LogID:          swag.String(api.pubkeyHash),
-		LogIndex:       swag.Int64(queuedLeaf.LeafIndex),
+		LogIndex:       swag.Int64(virtualIndex),
 		Body:           queuedLeaf.GetLeafValue(),
 		IntegratedTime: swag.Int64(queuedLeaf.IntegrateTimestamp.AsTime().Unix()),
 	}
@@ -281,7 +284,7 @@ func GetLogEntryByUUIDHandler(params entries.GetLogEntryByUUIDParams) middleware
 		// If EntryID is plain UUID, assume no sharding and use ActiveIndex. The ActiveIndex
 		// will == the tlog_id if a tlog_id is passed in at server startup.
 		if err.Error() == "cannot get treeID from plain UUID" {
-			tid = api.logRanges.ActiveIndex()
+			tid = api.logRanges.ActiveTreeID()
 		} else {
 			return handleRekorAPIError(params, http.StatusBadRequest, err, "")
 		}
@@ -311,7 +314,7 @@ func GetLogEntryByUUIDHandler(params entries.GetLogEntryByUUIDParams) middleware
 		return handleRekorAPIError(params, http.StatusNotFound, errors.New("grpc returned 0 leaves with success code"), "")
 	}
 
-	logEntry, err := logEntryFromLeaf(ctx, api.signer, tc, leaf, result.SignedLogRoot, result.Proof)
+	logEntry, err := logEntryFromLeaf(ctx, api.signer, tc, leaf, result.SignedLogRoot, result.Proof, tid, api.logRanges)
 	if err != nil {
 		return handleRekorAPIError(params, http.StatusInternalServerError, err, "")
 	}
@@ -387,7 +390,7 @@ func SearchLogQueryHandler(params entries.SearchLogQueryParams) middleware.Respo
 
 		for _, leafResp := range searchByHashResults {
 			if leafResp != nil {
-				logEntry, err := logEntryFromLeaf(httpReqCtx, api.signer, tc, leafResp.Leaf, leafResp.SignedLogRoot, leafResp.Proof)
+				logEntry, err := logEntryFromLeaf(httpReqCtx, api.signer, tc, leafResp.Leaf, leafResp.SignedLogRoot, leafResp.Proof, api.logRanges.ActiveTreeID(), api.logRanges)
 				if err != nil {
 					return handleRekorAPIError(params, code, err, err.Error())
 				}
@@ -424,7 +427,7 @@ func SearchLogQueryHandler(params entries.SearchLogQueryParams) middleware.Respo
 
 		for _, result := range leafResults {
 			if result != nil {
-				logEntry, err := logEntryFromLeaf(httpReqCtx, api.signer, tc, result.Leaf, result.SignedLogRoot, result.Proof)
+				logEntry, err := logEntryFromLeaf(httpReqCtx, api.signer, tc, result.Leaf, result.SignedLogRoot, result.Proof, api.logRanges.ActiveTreeID(), api.logRanges)
 				if err != nil {
 					return handleRekorAPIError(params, http.StatusInternalServerError, err, trillianUnexpectedResult)
 				}
diff --git a/pkg/sharding/log_index.go b/pkg/sharding/log_index.go
new file mode 100644
index 0000000000000000000000000000000000000000..443fda577596556385016a09aa749fd9d35cbfb0
--- /dev/null
+++ b/pkg/sharding/log_index.go
@@ -0,0 +1,33 @@
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sharding
+
+// VirtualLogIndex returns the virtual log index for a given leaf index
+func VirtualLogIndex(leafIndex int64, tid int64, ranges LogRanges) int64 {
+	// if we have no ranges, we have just one log! return the leafIndex as is
+	if ranges.Empty() {
+		return leafIndex
+	}
+
+	var virtualIndex int64
+	for _, r := range ranges.GetRanges() {
+		if r.TreeID == tid {
+			return virtualIndex + leafIndex
+		}
+		virtualIndex += r.TreeLength
+	}
+	// this should never happen
+	return -1
+}
diff --git a/pkg/sharding/log_index_test.go b/pkg/sharding/log_index_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b99274a6d74cc833da9ae543eeb0c677d5957fd1
--- /dev/null
+++ b/pkg/sharding/log_index_test.go
@@ -0,0 +1,105 @@
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sharding
+
+import (
+	"testing"
+)
+
+func TestVirtualLogIndex(t *testing.T) {
+	tests := []struct {
+		description   string
+		leafIndex     int64
+		tid           int64
+		ranges        LogRanges
+		expectedIndex int64
+	}{
+		{
+			description:   "no ranges",
+			leafIndex:     5,
+			ranges:        LogRanges{},
+			expectedIndex: 5,
+		},
+		// Log 100: 0 1 2 3 4
+		// Log 300: 5 6 7
+		{
+			description: "two shards",
+			leafIndex:   2,
+			tid:         300,
+			ranges: LogRanges{
+				ranges: []LogRange{
+					{
+						TreeID:     100,
+						TreeLength: 5,
+					}, {
+						TreeID: 300,
+					},
+				},
+			},
+			expectedIndex: 7,
+		}, {
+			description: "three shards",
+			leafIndex:   1,
+			tid:         300,
+			ranges: LogRanges{
+				ranges: []LogRange{
+					{
+						TreeID:     100,
+						TreeLength: 5,
+					}, {
+						TreeID:     300,
+						TreeLength: 4,
+					}, {
+						TreeID: 400,
+					},
+				},
+			},
+			expectedIndex: 6,
+		}, {
+			description: "ranges is empty but not-nil",
+			leafIndex:   2,
+			tid:         30,
+			ranges: LogRanges{
+				ranges: []LogRange{
+					{
+						TreeID: 30,
+					},
+				},
+			},
+			expectedIndex: 2,
+		}, {
+			description: "invalid tid passed in",
+			leafIndex:   2,
+			tid:         4,
+			ranges: LogRanges{
+				ranges: []LogRange{
+					{
+						TreeID: 30,
+					},
+				},
+			},
+			expectedIndex: -1,
+		},
+	}
+
+	for _, test := range tests {
+		t.Run(test.description, func(t *testing.T) {
+			got := VirtualLogIndex(test.leafIndex, test.tid, test.ranges)
+			if got != test.expectedIndex {
+				t.Fatalf("expected %v got %v", test.expectedIndex, got)
+			}
+		})
+	}
+}
diff --git a/pkg/sharding/ranges.go b/pkg/sharding/ranges.go
index 3358855b032fd2e457df30d3230b6055db264c57..b5c083eed4d68ab060a9cdd6be5105a6cfb3a4e4 100644
--- a/pkg/sharding/ranges.go
+++ b/pkg/sharding/ranges.go
@@ -15,8 +15,13 @@
 
 package sharding
 
+import (
+	"fmt"
+	"strings"
+)
+
 type LogRanges struct {
-	Ranges []LogRange
+	ranges []LogRange
 }
 
 type LogRange struct {
@@ -26,7 +31,7 @@ type LogRange struct {
 
 func (l *LogRanges) ResolveVirtualIndex(index int) (int64, int64) {
 	indexLeft := index
-	for _, l := range l.Ranges {
+	for _, l := range l.ranges {
 		if indexLeft < int(l.TreeLength) {
 			return l.TreeID, int64(indexLeft)
 		}
@@ -34,10 +39,43 @@ func (l *LogRanges) ResolveVirtualIndex(index int) (int64, int64) {
 	}
 
 	// Return the last one!
-	return l.Ranges[len(l.Ranges)-1].TreeID, int64(indexLeft)
+	return l.ranges[len(l.ranges)-1].TreeID, int64(indexLeft)
+}
+
+// ActiveTreeID returns the active shard index, always the last shard in the range
+func (l *LogRanges) ActiveTreeID() int64 {
+	return l.ranges[len(l.ranges)-1].TreeID
+}
+
+func (l *LogRanges) Empty() bool {
+	return l.ranges == nil
+}
+
+// TotalLength returns the total length across all shards
+func (l *LogRanges) TotalLength() int64 {
+	var total int64
+	for _, r := range l.ranges {
+		total += r.TreeLength
+	}
+	return total
 }
 
-// ActiveIndex returns the active shard index, always the last shard in the range
-func (l *LogRanges) ActiveIndex() int64 {
-	return l.Ranges[len(l.Ranges)-1].TreeID
+func (l *LogRanges) SetRanges(r []LogRange) {
+	l.ranges = r
+}
+
+func (l *LogRanges) GetRanges() []LogRange {
+	return l.ranges
+}
+
+func (l *LogRanges) AppendRange(r LogRange) {
+	l.ranges = append(l.ranges, r)
+}
+
+func (l *LogRanges) String() string {
+	ranges := []string{}
+	for _, r := range l.ranges {
+		ranges = append(ranges, fmt.Sprintf("%d=%d", r.TreeID, r.TreeLength))
+	}
+	return strings.Join(ranges, ",")
 }
diff --git a/pkg/sharding/ranges_test.go b/pkg/sharding/ranges_test.go
index 2249ea3c2a309daefa992c7794c340bd5802983a..d40b89dd13748f2faa4439f6c61d06bbf4926cc6 100644
--- a/pkg/sharding/ranges_test.go
+++ b/pkg/sharding/ranges_test.go
@@ -19,7 +19,7 @@ import "testing"
 
 func TestLogRanges_ResolveVirtualIndex(t *testing.T) {
 	lrs := LogRanges{
-		Ranges: []LogRange{
+		ranges: []LogRange{
 			{TreeID: 1, TreeLength: 17},
 			{TreeID: 2, TreeLength: 1},
 			{TreeID: 3, TreeLength: 100},
diff --git a/tests/sharding-e2e-test.sh b/tests/sharding-e2e-test.sh
index a2b578e43f98b6c98a05f8f1b303e4da027dbd93..451ab02141cebb5cdc3f1aca4acace76d13097bb 100755
--- a/tests/sharding-e2e-test.sh
+++ b/tests/sharding-e2e-test.sh
@@ -34,6 +34,20 @@ go build -o rekor-cli ./cmd/rekor-cli
 REKOR_CLI=$(pwd)/rekor-cli
 go build -o rekor-server ./cmd/rekor-server
 
+function check_log_index () {
+  logIndex=$1
+  # make sure we can get this log index from rekor
+  $REKOR_CLI get --log-index $logIndex --rekor_server http://localhost:3000
+  # make sure the entry index matches the log index
+  gotIndex=$($REKOR_CLI get --log-index $logIndex --rekor_server http://localhost:3000 --format json | jq -r .LogIndex)
+  if [[ "$gotIndex" == $logIndex ]]; then
+    echo "New entry has expected virtual log index $gotIndex"
+  else
+    echo "FAIL: expected virtual log index $logIndex, got $gotIndex"
+    exit 1
+  fi
+}
+
 count=0
 
 echo -n "waiting up to 60 sec for system to start"
@@ -66,7 +80,7 @@ $REKOR_CLI upload --artifact file2 --signature file2.sig --pki-format=x509 --pub
 cd ../..
 
 # Make sure we have three entries in the log
-$REKOR_CLI get --log-index 2 --rekor_server http://localhost:3000
+check_log_index 2
 
 # Now, we want to shard the log.
 # Create a new tree
@@ -143,7 +157,14 @@ fi
 
 # Now, if we run $REKOR_CLI get --log_index 2 again, it should grab the log index
 # from Shard 0
-$REKOR_CLI get --log-index 2 --rekor_server http://localhost:3000
+check_log_index 2
+
+# Add in a new entry to this shard
+pushd tests/sharding-testdata
+$REKOR_CLI upload --artifact file2 --signature file2.sig --pki-format=x509 --public-key=ec_public.pem --rekor_server http://localhost:3000
+popd
+# Pass in the universal log_index & make sure it resolves 
+check_log_index 3
 
 # TODO: Try to get the entry via Entry ID (Tree ID in hex + UUID)
 UUID=$($REKOR_CLI get --log-index 2 --rekor_server http://localhost:3000 --format json | jq -r .UUID)